• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2021 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15""" test ops """
16import functools
17import pytest
18
19import numpy as np
20
21import mindspore.nn as nn
22import mindspore.ops.composite as C
23from mindspore import Tensor
24from mindspore import ops, Parameter, context
25from mindspore import ms_function
26from mindspore.common import dtype as mstype
27from mindspore.ops import functional as F
28from mindspore.ops import operations as P
29from mindspore.ops.operations import _grad_ops as G
30from mindspore.ops.operations import _inner_ops as inner
31from mindspore.ops.operations import _quant_ops as Q
32from mindspore.ops.operations import nn_ops as nps
33from mindspore.nn.layer import normalization
34from mindspore._c_expression import security
35from tests.security_utils import security_off_wrap
36from ..ut_filter import non_graph_engine
37from ....mindspore_test_framework.mindspore_test import mindspore_test
38from ....mindspore_test_framework.pipeline.forward.compile_forward \
39    import (pipeline_for_compile_forward_ge_graph_for_case_by_case_config,
40            pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception)
41from ....mindspore_test_framework.pipeline.gradient.compile_gradient \
42    import pipeline_for_compile_grad_ge_graph_for_case_by_case_config
43from ....ops_common import convert
44
45grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True)
46
47
48class TargetNet(nn.Cell):
49    def __init__(self):
50        super(TargetNet, self).__init__()
51        self.mul = P.Mul()
52
53    def construct(self, x, y):
54        return self.mul(x, y)
55
56# Recursive GradOperation in Cell.
57class Grad(nn.Cell):
58    def __init__(self, network):
59        super(Grad, self).__init__()
60        self.grad = C.GradOperation()
61        self.network = network
62
63    def construct(self, x, y):
64        return self.grad(self.network)(x, y)
65
66# Recursive GradOperaton with GradOperation object.
67grad1 = C.GradOperation()
68@ms_function
69def f1(x, y):
70    return grad1(grad1(TargetNet()))(x, y)
71
72# Recursive GradOperaton with F.grad.
73@ms_function
74def f2(x, y):
75    return F.grad(F.grad(TargetNet()))(x, y)
76
77def test_recursive_grad():
78    x = Tensor(3, mstype.float32)
79    y = Tensor(1, mstype.float32)
80
81    Grad(Grad(TargetNet()))(x, y)
82    f1(x, y)
83    f2(x, y)
84
85
86class IndexAdd(nn.Cell):
87    """IndexAdd net definition"""
88
89    def __init__(self, axis):
90        super(IndexAdd, self).__init__()
91        self.index_add = P.IndexAdd(axis)
92        self.input_x = Parameter(Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32)))
93
94    def construct(self, indices, updates):
95        return self.index_add(self.input_x, indices, updates)
96
97
98class InputBackward(nn.Cell):
99    def __init__(self, network):
100        super(InputBackward, self).__init__()
101        self.network = network
102        self.network.set_train()
103        self.grad = grad_all_with_sens
104
105    def construct(self, x1, x2, x3, sens):
106        return self.grad(self.network)(x1, x2, x3, sens)
107
108
109class NetForTupleInput(nn.Cell):
110    def __init__(self, op):
111        super(NetForTupleInput, self).__init__()
112        self.op = op
113
114    def construct(self, x1, x2):
115        return self.op((x1, x2))
116
117
118class StridedSlicessdNet(nn.Cell):
119    def __init__(self):
120        super(StridedSlicessdNet, self).__init__()
121        self.rank = P.Rank()
122
123    def construct(self, x1):
124        return P.StridedSlice(1, 1, 0, self.rank(x1), 0)(x1, (0, 0), (0, 0), (1, 1))
125
126
127class NetForConcat(nn.Cell):
128    def __init__(self):
129        super(NetForConcat, self).__init__()
130        self.concat = P.Concat()
131
132    def construct(self, x1):
133        return self.concat((x1, x1))
134
135
136class NetForConcat1(nn.Cell):
137    def __init__(self):
138        super(NetForConcat1, self).__init__()
139        self.concat = P.Concat()
140
141    def construct(self, x1, x2):
142        return self.concat((x1, x2))
143
144
145class NetForConcat2(nn.Cell):
146    def __init__(self):
147        super(NetForConcat2, self).__init__()
148        self.concat = P.Concat(axis=2)
149
150    def construct(self, x1, x2):
151        return self.concat((x1, x2))
152
153
154class NetForConcat3(nn.Cell):
155    def __init__(self):
156        super(NetForConcat3, self).__init__()
157        self.concat = P.Concat(axis=0)
158
159    def construct(self, x1, x2, x3):
160        return self.concat((x1, x2, x3))
161
162
163class NetForConcat4(nn.Cell):
164    def __init__(self):
165        super(NetForConcat4, self).__init__()
166        self.concat = P.Concat(axis=-1)
167
168    def construct(self, x1, x2, x3):
169        return self.concat((x1, x2, x3))
170
171
172class NetForStackInput(nn.Cell):
173    def __init__(self, op):
174        super(NetForStackInput, self).__init__()
175        self.op = op
176        self.mul = P.Mul()
177
178    def construct(self, *args):
179        t = ()
180        for element in args:
181            t = t + (self.mul(element, element),)
182        return self.op(t)
183
184
185class NetForUnpackInput(nn.Cell):
186    def __init__(self, op):
187        super(NetForUnpackInput, self).__init__()
188        self.op = op
189        self.mul = P.Mul()
190
191    def construct(self, x1):
192        return self.op((self.mul(x1, x1)))
193
194
195class NetForFlatten(nn.Cell):
196    def __init__(self):
197        super(NetForFlatten, self).__init__()
198        self.flatten = P.Flatten()
199
200    def construct(self, x, y):
201        return self.flatten(x) + y
202
203
204class NetForFlatten0D(nn.Cell):
205    def __init__(self):
206        super(NetForFlatten0D, self).__init__()
207        self.flatten = P.Flatten()
208
209    def construct(self, x):
210        return self.flatten(x)
211
212
213class NetForFlattenComposed(nn.Cell):
214    # make flatten op together with other ops for testing flatten grad
215    def __init__(self):
216        super(NetForFlattenComposed, self).__init__()
217        self.flatten = P.Flatten()
218
219    def construct(self, x, y):
220        return self.flatten(x + x) + y
221
222
223class ArgmaxNet(nn.Cell):
224    def __init__(self):
225        super(ArgmaxNet, self).__init__()
226        self.argmax = P.Argmax(axis=1)
227
228    def construct(self, input_):
229        return self.argmax(input_)
230
231
232class ArgminNet(nn.Cell):
233    def __init__(self):
234        super(ArgminNet, self).__init__()
235        self.argmin = P.Argmin(axis=1)
236
237    def construct(self, input_):
238        return self.argmin(input_)
239
240
241class CumSumNet(nn.Cell):
242    def __init__(self):
243        super(CumSumNet, self).__init__()
244        self.cumsum = P.CumSum()
245        self.axis = 1
246
247    def construct(self, input_):
248        return self.cumsum(input_, self.axis)
249
250
251class SummaryNet(nn.Cell):
252    def __init__(self):
253        super(SummaryNet, self).__init__()
254        self.s = P.ScalarSummary()
255        self.add = P.Add()
256
257    def construct(self, x, y):
258        self.s("x1", x)
259        return self.add(x, y)
260
261
262class HistogramSummaryNet(nn.Cell):
263    def __init__(self):
264        super(HistogramSummaryNet, self).__init__()
265        self.summary = P.HistogramSummary()
266        self.add = P.Add()
267
268    def construct(self, x, y):
269        out = self.add(x, y)
270        string_in = "out"
271        self.summary(string_in, out)
272        return out
273
274
275class Moments(nn.Cell):
276    """Moments net definition"""
277
278    def __init__(self, axis=None, keep_dims=None):
279        super(Moments, self).__init__()
280        self.moments = nn.Moments(axis=axis, keep_dims=keep_dims)
281
282    def construct(self, input_x):
283        mean, variance = self.moments(input_x)
284        return mean, variance
285
286
287class BatchNorm3d(nn.Cell):
288    """BatchNorm3d net definition"""
289
290    def __init__(self, num_features):
291        super(BatchNorm3d, self).__init__()
292        self.bn3d = normalization.BatchNorm3d(num_features=num_features)
293
294    def construct(self, input_x):
295        bn3d_out = self.bn3d(input_x)
296        return bn3d_out
297
298
299class NLLLoss(nn.Cell):
300    """NLLLoss net definition"""
301
302    def __init__(self, reduction):
303        super(NLLLoss, self).__init__()
304        self.nll_loss = P.NLLLoss(reduction=reduction)
305
306    def construct(self, input_x, target, weight):
307        loss = self.nll_loss(input_x, target, weight)
308        return loss
309
310
311class ClipByNorm(nn.Cell):
312    """ClipByNorm net definition"""
313
314    def __init__(self, axis=None):
315        super(ClipByNorm, self).__init__()
316        self.clip_by_norm = nn.ClipByNorm(axis=axis)
317
318    def construct(self, input_x, max_norm):
319        norm = self.clip_by_norm(input_x, max_norm)
320        return norm
321
322
323class ClipByGlobalNorm(nn.Cell):
324    """ClipByGlobalNorm net definition"""
325
326    def __init__(self, x, clip_norm=1.0, use_norm=None):
327        super(ClipByGlobalNorm, self).__init__()
328        self.x = x
329        self.clip_norm = clip_norm
330        self.use_norm = use_norm
331
332    def construct(self):
333        norm = C.clip_by_global_norm(self.x, self.clip_norm, self.use_norm)
334        return norm
335
336
337class Embedding(nn.Cell):
338    """Embedding net definition"""
339
340    def __init__(self, vocab_size, embedding_size, padding_idx=None):
341        super(Embedding, self).__init__()
342        self.embedding = nn.Embedding(vocab_size=vocab_size, embedding_size=embedding_size,
343                                      padding_idx=padding_idx)
344
345    def construct(self, index):
346        res = self.embedding(index)
347        return res
348
349
350class EmbeddingLookup(nn.Cell):
351    """EmbeddingLookup net definition"""
352
353    def __init__(self, vocab_size, embedding_size, max_norm=None):
354        super(EmbeddingLookup, self).__init__()
355        self.embedding_lookup = nn.EmbeddingLookup(vocab_size=vocab_size, embedding_size=embedding_size,
356                                                   max_norm=max_norm)
357
358    def construct(self, index):
359        res = self.embedding_lookup(index)
360        return res
361
362
363class CountNonZero(nn.Cell):
364    """CountNonZero net definition"""
365
366    def __init__(self, axis, keep_dims, dtype):
367        super(CountNonZero, self).__init__()
368        self.axis = axis
369        self.keep_dims = keep_dims
370        self.dtype = dtype
371
372    def construct(self, input_x):
373        nonzero_num = C.count_nonzero(input_x, self.axis, self.keep_dims, self.dtype)
374        return nonzero_num
375
376
377class Mish(nn.Cell):
378    """Mish net definition"""
379
380    def __init__(self):
381        super(Mish, self).__init__()
382        self.mish = P.Mish()
383
384    def construct(self, input_x):
385        out = self.mish(input_x)
386        return out
387
388
389class SeLU(nn.Cell):
390    """Selu net definition"""
391
392    def __init__(self):
393        super(SeLU, self).__init__()
394        self.selu = P.SeLU()
395
396    def construct(self, input_x):
397        out = self.selu(input_x)
398        return out
399
400
401class MulNoNan(nn.Cell):
402    """MulNoNan net definition"""
403
404    def __init__(self):
405        super(MulNoNan, self).__init__()
406        self.mul_no_nan = P.MulNoNan()
407
408    def construct(self, input_x, input_y):
409        out = self.mul_no_nan(input_x, input_y)
410        return out
411
412
413class ScatterUpdate(nn.Cell):
414    """ScatterUpdate net definition"""
415
416    def __init__(self, ref_shape, dtype=np.float32, use_locking=False):
417        super(ScatterUpdate, self).__init__()
418        self.scatter_update = P.ScatterUpdate(use_locking)
419        self.ref = Parameter(Tensor(np.ones(ref_shape, dtype)), name="ref")
420
421    def construct(self, indices, updates):
422        out = self.scatter_update(self.ref, indices, updates)
423        return out
424
425
426class ScatterMax(nn.Cell):
427    """ScatterMax net definition"""
428
429    def __init__(self, dtype=np.float32, use_locking=False):
430        super(ScatterMax, self).__init__()
431        self.scatter_max = P.ScatterMax(use_locking)
432        self.ref = Parameter(Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype)), name="ref")
433
434    def construct(self, indices, updates):
435        out = self.scatter_max(self.ref, indices, updates)
436        return out
437
438
439class ScatterMin(nn.Cell):
440    """ScatterMin net definition"""
441
442    def __init__(self, dtype=np.float32, use_locking=False):
443        super(ScatterMin, self).__init__()
444        self.scatter_min = P.ScatterMin(use_locking)
445        self.ref = Parameter(Tensor(np.array([[-1.0, 2.0, 3.0], [-4.0, 1.0, 6.0]], dtype)), name="ref")
446
447    def construct(self, indices, updates):
448        out = self.scatter_min(self.ref, indices, updates)
449        return out
450
451
452class ScatterAdd(nn.Cell):
453    """ScatterAdd net definition"""
454
455    def __init__(self, ref_shape, dtype=np.float32, use_locking=False):
456        super(ScatterAdd, self).__init__()
457        self.scatter_add = P.ScatterAdd(use_locking)
458        self.ref = Parameter(Tensor(np.ones(ref_shape, dtype)), name="ref")
459
460    def construct(self, indices, updates):
461        out = self.scatter_add(self.ref, indices, updates)
462        return out
463
464
465class ScatterNonAliasingAdd(nn.Cell):
466    """ScatterNonAliasingAdd net definition"""
467
468    def __init__(self, ref_shape, dtype=np.float32):
469        super(ScatterNonAliasingAdd, self).__init__()
470        self.scatter_no_aliasing_add = P.ScatterNonAliasingAdd()
471        self.ref = Parameter(Tensor(np.ones(ref_shape, dtype)), name="ref")
472
473    def construct(self, indices, updates):
474        out = self.scatter_no_aliasing_add(self.ref, indices, updates)
475        return out
476
477
478class ScatterNdSub(nn.Cell):
479    """ScatterNdSub net definition"""
480
481    def __init__(self, ref_shape, dtype=np.float32):
482        super(ScatterNdSub, self).__init__()
483        self.scatter_nd_sub = P.ScatterNdSub()
484        self.ref = Parameter(Tensor(np.ones(ref_shape, dtype)), name="ref")
485
486    def construct(self, indices, updates):
487        out = self.scatter_nd_sub(self.ref, indices, updates)
488        return out
489
490
491class ScatterNdAdd(nn.Cell):
492    """ScatterNdAdd net definition"""
493
494    def __init__(self, ref_shape, dtype=np.float32):
495        super(ScatterNdAdd, self).__init__()
496        self.scatter_nd_add = P.ScatterNdAdd()
497        self.ref = Parameter(Tensor(np.ones(ref_shape, dtype)), name="ref")
498
499    def construct(self, indices, updates):
500        out = self.scatter_nd_add(self.ref, indices, updates)
501        return out
502
503
504class ScatterSub(nn.Cell):
505    """ScatterSub net definition"""
506
507    def __init__(self, ref_shape, dtype=np.float32, use_locking=False):
508        super(ScatterSub, self).__init__()
509        self.scatter_sub = P.ScatterSub(use_locking)
510        self.ref = Parameter(Tensor(np.ones(ref_shape, dtype)), name="ref")
511
512    def construct(self, indices, updates):
513        out = self.scatter_sub(self.ref, indices, updates)
514        return out
515
516
517class ScatterMul(nn.Cell):
518    """ScatterMul net definition"""
519
520    def __init__(self, ref_shape, dtype=np.float32, use_locking=False):
521        super(ScatterMul, self).__init__()
522        self.scatter_mul = P.ScatterMul(use_locking)
523        self.ref = Parameter(Tensor(np.ones(ref_shape, dtype)), name="ref")
524
525    def construct(self, indices, updates):
526        out = self.scatter_mul(self.ref, indices, updates)
527        return out
528
529
530class ScatterDiv(nn.Cell):
531    """ScatterDiv net definition"""
532
533    def __init__(self, ref_shape, dtype=np.float32, use_locking=False):
534        super(ScatterDiv, self).__init__()
535        self.scatter_div = P.ScatterDiv(use_locking)
536        self.ref = Parameter(Tensor(np.ones(ref_shape, dtype) * 10), name="ref")
537
538    def construct(self, indices, updates):
539        out = self.scatter_div(self.ref, indices, updates)
540        return out
541
542
543class Conv3D(nn.Cell):
544    """Conv3D net definition"""
545
546    def __init__(self, out_channel, kernel_size, mode, pad_mode, pad, stride, dilation, group, data_format):
547        super(Conv3D, self).__init__()
548        self.conv = nps.Conv3D(out_channel=out_channel, kernel_size=kernel_size, mode=mode, pad_mode=pad_mode,
549                               pad=pad, stride=stride, dilation=dilation, group=group, data_format=data_format)
550
551    def construct(self, x, w):
552        out = self.conv(x, w)
553        return out
554
555
556class Conv3DBackpropInput(nn.Cell):
557    """Conv3DBackpropInput net definition"""
558
559    def __init__(self, input_shape, out_channel, kernel_size, mode, pad_mode, pad, stride, dilation, group,
560                 data_format):
561        super(Conv3DBackpropInput, self).__init__()
562        self.conv = nps.Conv3DBackpropInput(out_channel=out_channel, kernel_size=kernel_size, mode=mode,
563                                            pad_mode=pad_mode, pad=pad, stride=stride, dilation=dilation,
564                                            group=group, data_format=data_format)
565        self.x_size = input_shape
566
567    def construct(self, w, doutput):
568        ms_out = self.conv(w, doutput, self.x_size)
569        return ms_out
570
571
572class Conv3DBackpropFilter(nn.Cell):
573    """Conv3DBackpropFilter net definition"""
574
575    def __init__(self, w_shape, out_channel, kernel_size, mode, pad_mode, pad, stride, dilation, group, data_format):
576        super(Conv3DBackpropFilter, self).__init__()
577        self.conv = G.Conv3DBackpropFilter(out_channel=out_channel, kernel_size=kernel_size, mode=mode,
578                                           pad_mode=pad_mode, pad=pad, stride=stride, dilation=dilation,
579                                           group=group, data_format=data_format)
580        self.w_size = w_shape
581
582    def construct(self, x, doutput):
583        ms_out = self.conv(x, doutput, self.w_size)
584        return ms_out
585
586
587class Conv3DTranspose(nn.Cell):
588    """Conv3DTranspose net definition"""
589
590    def __init__(self, in_channel, out_channel, kernel_size, mode, pad, stride, dilation, group, data_format):
591        super(Conv3DTranspose, self).__init__()
592        self.conv = nps.Conv3DTranspose(in_channel=in_channel, out_channel=out_channel, kernel_size=kernel_size,
593                                        mode=mode, pad=pad, stride=stride, dilation=dilation, group=group,
594                                        data_format=data_format)
595
596    def construct(self, x, w):
597        ms_out = self.conv(x, w)
598        return ms_out
599
600
601class ApplyFtrlNet(nn.Cell):
602    def __init__(self):
603        super(ApplyFtrlNet, self).__init__()
604        self.apply_ftrl = P.ApplyFtrl()
605        self.lr = 0.001
606        self.l1 = 0.0
607        self.l2 = 0.0
608        self.lr_power = -0.5
609        self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
610        self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
611        self.linear = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="linear")
612
613    def construct(self, grad):
614        out = self.apply_ftrl(self.var, self.accum, self.linear, grad, self.lr, self.l1, self.l2, self.lr_power)
615        return out
616
617
618class SparseApplyFtrlNet(nn.Cell):
619    def __init__(self):
620        super(SparseApplyFtrlNet, self).__init__()
621        self.sparse_apply_ftrl = P.SparseApplyFtrl(lr=0.001, l1=0.0, l2=0.0, lr_power=-0.5)
622        self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
623        self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
624        self.linear = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="linear")
625
626    def construct(self, grad, indices):
627        out = self.sparse_apply_ftrl(self.var, self.accum, self.linear, grad, indices)
628        return out
629
630
631class SparseApplyFtrlV2Net(nn.Cell):
632    def __init__(self):
633        super(SparseApplyFtrlV2Net, self).__init__()
634        self.sparse_apply_ftrl_v2 = P.SparseApplyFtrlV2(lr=0.001, l1=0.0, l2=0.0, l2_shrinkage=0.0, lr_power=-0.5)
635        self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
636        self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
637        self.linear = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="linear")
638
639    def construct(self, grad, indices):
640        out = self.sparse_apply_ftrl_v2(self.var, self.accum, self.linear, grad, indices)
641        return out
642
643
644class SparseApplyProximalAdagradNet(nn.Cell):
645    def __init__(self):
646        super(SparseApplyProximalAdagradNet, self).__init__()
647        self.sparse_apply_proximal_adagrad = P.SparseApplyProximalAdagrad()
648        self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
649        self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
650        self.lr = 0.01
651        self.l1 = 0.0
652        self.l2 = 0.0
653
654    def construct(self, grad, indices):
655        out = self.sparse_apply_proximal_adagrad(self.var, self.accum, self.lr, self.l1, self.l2, grad, indices)
656        return out
657
658
659class ApplyProximalAdagradNet(nn.Cell):
660    def __init__(self):
661        super(ApplyProximalAdagradNet, self).__init__()
662        self.apply_proximal_adagrad = P.ApplyProximalAdagrad()
663        self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
664        self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
665        self.lr = 0.01
666        self.l1 = 0.0
667        self.l2 = 0.0
668
669    def construct(self, grad):
670        out = self.apply_proximal_adagrad(self.var, self.accum, self.lr, self.l1, self.l2, grad)
671        return out
672
673
674class ApplyAdaMaxNet(nn.Cell):
675    def __init__(self):
676        super(ApplyAdaMaxNet, self).__init__()
677        self.apply_ada_max = P.ApplyAdaMax()
678        self.beta1_power = 0.9
679        self.lr = 0.001
680        self.beta1 = 0.9
681        self.beta2 = 0.99
682        self.epsilon = 1e-10
683        self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
684        self.m = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="m")
685        self.v = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="v")
686
687    def construct(self, grad):
688        out = self.apply_ada_max(self.var, self.m, self.v, self.beta1_power, self.lr,
689                                 self.beta1, self.beta2, self.epsilon, grad)
690        return out
691
692
693class ApplyAdadeltaNet(nn.Cell):
694    def __init__(self):
695        super(ApplyAdadeltaNet, self).__init__()
696        self.apply_adadelta = P.ApplyAdadelta()
697        self.lr = 0.001
698        self.rho = 0.0
699        self.epsilon = 1e-6
700        self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
701        self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
702        self.accum_update = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum_update")
703
704    def construct(self, grad):
705        out = self.apply_adadelta(self.var, self.accum, self.accum_update, self.lr, self.rho, self.epsilon, grad)
706        return out
707
708
709class ApplyAdagradNet(nn.Cell):
710    def __init__(self):
711        super(ApplyAdagradNet, self).__init__()
712        self.apply_adagrad = P.ApplyAdagrad()
713        self.lr = 0.001
714        self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
715        self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
716
717    def construct(self, grad):
718        out = self.apply_adagrad(self.var, self.accum, self.lr, grad)
719        return out
720
721
722class ApplyAdagradV2Net(nn.Cell):
723    def __init__(self):
724        super(ApplyAdagradV2Net, self).__init__()
725        self.apply_adagrad_v2 = P.ApplyAdagradV2(epsilon=1e-6)
726        self.lr = 0.001
727        self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
728        self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
729
730    def construct(self, grad):
731        out = self.apply_adagrad_v2(self.var, self.accum, self.lr, grad)
732        return out
733
734
735class ApplyAddSignNet(nn.Cell):
736    def __init__(self):
737        super(ApplyAddSignNet, self).__init__()
738        self.apply_add_sign = P.ApplyAddSign()
739        self.lr = 0.001
740        self.alpha = 1.0
741        self.sign_decay = 0.99
742        self.beta = 0.99
743        self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
744        self.m = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="m")
745
746    def construct(self, grad):
747        out = self.apply_add_sign(self.var, self.m, self.lr, self.alpha, self.sign_decay, self.beta, grad)
748        return out
749
750
751class ApplyPowerSignNet(nn.Cell):
752    def __init__(self):
753        super(ApplyPowerSignNet, self).__init__()
754        self.apply_power_sign = P.ApplyPowerSign()
755        self.lr = 0.001
756        self.logbase = np.e
757        self.sign_decay = 0.99
758        self.beta = 0.99
759        self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
760        self.m = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="m")
761
762    def construct(self, grad):
763        out = self.apply_power_sign(self.var, self.m, self.lr, self.logbase, self.sign_decay, self.beta, grad)
764        return out
765
766
767class ApplyGradientDescentNet(nn.Cell):
768    def __init__(self):
769        super(ApplyGradientDescentNet, self).__init__()
770        self.apply_gradient_descent = P.ApplyGradientDescent()
771        self.alpha = 0.001
772        self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
773
774    def construct(self, delta):
775        out = self.apply_gradient_descent(self.var, self.alpha, delta)
776        return out
777
778
779class ApplyProximalGradientDescentNet(nn.Cell):
780    def __init__(self):
781        super(ApplyProximalGradientDescentNet, self).__init__()
782        self.apply_proximal_gradient_descent = P.ApplyProximalGradientDescent()
783        self.alpha = 0.001
784        self.l1 = 0.0
785        self.l2 = 0.0
786        self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
787
788    def construct(self, delta):
789        out = self.apply_proximal_gradient_descent(self.var, self.alpha, self.l1, self.l2, delta)
790        return out
791
792
793class SparseApplyAdagradNet(nn.Cell):
794    def __init__(self):
795        super(SparseApplyAdagradNet, self).__init__()
796        self.sparse_apply_adagrad = P.SparseApplyAdagrad(lr=0.01)
797        self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
798        self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
799
800    def construct(self, grad, indices):
801        out = self.sparse_apply_adagrad(self.var, self.accum, grad, indices)
802        return out
803
804
805class SparseApplyAdagradV2Net(nn.Cell):
806    def __init__(self):
807        super(SparseApplyAdagradV2Net, self).__init__()
808        self.sparse_apply_adagrad_v2 = P.SparseApplyAdagradV2(lr=0.01, epsilon=0.001)
809        self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
810        self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
811
812    def construct(self, grad, indices):
813        out = self.sparse_apply_adagrad_v2(self.var, self.accum, grad, indices)
814        return out
815
816
817class ApplyRMSNet(nn.Cell):
818    def __init__(self):
819        super(ApplyRMSNet, self).__init__()
820        self.apply_rms = P.ApplyRMSProp()
821        self.lr = 0.001
822        self.rho = 0.0
823        self.momentum = 0.0
824        self.epsilon = 1e-10
825        self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
826        self.ms = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="ms")
827        self.moment = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="moment")
828
829    def construct(self, grad):
830        out = self.apply_rms(self.var, self.ms, self.moment, self.lr, grad, self.rho, self.momentum, self.epsilon)
831        return out
832
833
834class InplaceAddNet(nn.Cell):
835    def __init__(self):
836        super(InplaceAddNet, self).__init__()
837        self.inplace_add = P.InplaceAdd(indices=(0, 1))
838
839    def construct(self, x, v):
840        out = self.inplace_add(x, v)
841        return out
842
843
844class InplaceSubNet(nn.Cell):
845    def __init__(self):
846        super(InplaceSubNet, self).__init__()
847        self.inplace_sub = P.InplaceSub(indices=(0, 1))
848
849    def construct(self, x, v):
850        out = self.inplace_sub(x, v)
851        return out
852
853
854class NormalNet(nn.Cell):
855    def __init__(self, shape=None, seed=0):
856        super(NormalNet, self).__init__()
857        self.shape = shape
858        self.seed = seed
859
860    def construct(self, mean, stddev):
861        out = C.normal(self.shape, mean, stddev, self.seed)
862        return out
863
864
865class LaplaceNet(nn.Cell):
866    def __init__(self, shape=None, seed=0):
867        super(LaplaceNet, self).__init__()
868        self.shape = shape
869        self.seed = seed
870
871    def construct(self, mean, lambda_param):
872        out = C.laplace(self.shape, mean, lambda_param, self.seed)
873        return out
874
875
876class GammaNet(nn.Cell):
877    def __init__(self, shape=None, seed=0):
878        super(GammaNet, self).__init__()
879        self.shape = shape
880        self.seed = seed
881
882    def construct(self, alpha, beta):
883        out = C.gamma(self.shape, alpha, beta, self.seed)
884        return out
885
886
887class PoissonNet(nn.Cell):
888    def __init__(self, shape=None, seed=0):
889        super(PoissonNet, self).__init__()
890        self.shape = shape
891        self.seed = seed
892
893    def construct(self, mean):
894        out = C.poisson(self.shape, mean, self.seed)
895        return out
896
897
898class UniformNet(nn.Cell):
899    def __init__(self, shape=None, seed=0):
900        super(UniformNet, self).__init__()
901        self.shape = shape
902        self.seed = seed
903
904    def construct(self, a, b):
905        out = C.uniform(self.shape, a, b, self.seed)
906        return out
907
908
909class CTCGreedyDecoderNet(nn.Cell):
910    def __init__(self):
911        super(CTCGreedyDecoderNet, self).__init__()
912        self.ctc_greedy_decoder = P.CTCGreedyDecoder()
913        self.assert_op = P.Assert(300)
914
915    def construct(self, inputs, sequence_length):
916        out = self.ctc_greedy_decoder(inputs, sequence_length)
917        self.assert_op(True, (out[0], out[1], out[2], out[3]))
918        return out[2]
919
920
921class StridedSliceNet(nn.Cell):
922    def __init__(self):
923        super(StridedSliceNet, self).__init__()
924        self.begins = (1, 2, 3, 2, 1)
925        self.ends = (5, 6, 7, 8, 9)
926        self.strides = (1, 2, 3, 2, 1)
927        self.strided_slice_0 = P.StridedSlice(begin_mask=3, end_mask=5, ellipsis_mask=4,
928                                              shrink_axis_mask=2, new_axis_mask=8)
929        self.strided_slice_1 = P.StridedSlice(begin_mask=5, end_mask=2, ellipsis_mask=2,
930                                              shrink_axis_mask=6, new_axis_mask=10)
931        self.strided_slice_2 = P.StridedSlice(begin_mask=3, end_mask=3, ellipsis_mask=4,
932                                              shrink_axis_mask=5, new_axis_mask=13)
933        self.strided_slice_3 = P.StridedSlice(begin_mask=0, end_mask=0, ellipsis_mask=4,
934                                              shrink_axis_mask=12, new_axis_mask=15)
935        self.const_0 = Tensor(np.ones([6, 8, 9, 1, 8], np.float32))
936        self.const_1 = Tensor(np.ones([5, 7, 8, 1, 8], np.float32))
937        self.const_2 = Tensor(np.ones([1, 3, 7, 8, 9, 1, 8], np.float32))
938        self.const_3 = Tensor(np.ones([1, 1, 6, 7, 8, 9, 1, 8], np.float32))
939
940    def construct(self, x):
941        out_0 = self.strided_slice_0(x, self.begins, self.ends, self.strides) + self.const_0
942        out_1 = self.strided_slice_1(x, self.begins, self.ends, self.strides) + self.const_1
943        out_2 = self.strided_slice_2(x, self.begins, self.ends, self.strides) + self.const_2
944        out_3 = self.strided_slice_3(x, self.begins, self.ends, self.strides) + self.const_3
945        return out_0, out_1, out_2, out_3
946
947@pytest.mark.skip(reason='0 in shape is not support')
948def test_strided_slice_const():
949    class StridedSLiceConstNet(nn.Cell):
950        """StridedSLiceConstNet net definition"""
951
952        def __init__(self):
953            super(StridedSLiceConstNet, self).__init__()
954            self.begins = (0, 2, -5, 2, 1)
955            self.ends = (0, 6, 9, 8, 9)
956            self.strides = (1, 2, 1, 2, 1)
957            self.strided_slice = P.StridedSlice(begin_mask=2,
958                                                end_mask=6,
959                                                ellipsis_mask=4,
960                                                shrink_axis_mask=6,
961                                                new_axis_mask=18)
962
963        def construct(self, x):
964            out = self.strided_slice(x, self.begins, self.ends, self.strides)
965            return out
966
967    net = StridedSLiceConstNet()
968    context.set_context(mode=context.GRAPH_MODE)
969    x = Tensor(np.ones([6, 7, 8, 9, 10]), mstype.float32)
970    ret = net(x)
971    assert ret.shape == (0, 1, 7, 8, 9, 3, 1)
972    assert (ret.asnumpy() == np.array([], np.float32).reshape([0, 1, 7, 8, 9, 3, 1])).all()
973
974
975class ParallelConcatNet(nn.Cell):
976    def __init__(self):
977        super(ParallelConcatNet, self).__init__()
978        self.parallel_concat = P.ParallelConcat()
979
980    def construct(self, x1, x2):
981        return self.parallel_concat((x1, x2))
982
983
984class BasicLSTMCellNet(nn.Cell):
985    """ BasicLSTMCellNet definition """
986
987    def __init__(self):
988        super(BasicLSTMCellNet, self).__init__()
989        self.lstm = P.BasicLSTMCell()
990
991    def construct(self, x, h, c, w, b):
992        return self.lstm(x, h, c, w, b)
993
994
995class DynamicGRUV2Net(nn.Cell):
996    """ DynamicGRUV2Net definition """
997
998    def __init__(self):
999        super(DynamicGRUV2Net, self).__init__()
1000        self.dynamic_gru = P.DynamicGRUV2()
1001
1002    def construct(self, x, w_i, w_h, b_i, b_h, init_h):
1003        return self.dynamic_gru(x, w_i, w_h, b_i, b_h, None, init_h)
1004
1005
1006class EditDistance(nn.Cell):
1007    def __init__(self, hypothesis_shape, truth_shape, normalize=True):
1008        super(EditDistance, self).__init__()
1009        self.edit_distance = P.EditDistance(normalize)
1010        self.hypothesis_shape = hypothesis_shape
1011        self.truth_shape = truth_shape
1012
1013    def construct(self, hypothesis_indices, hypothesis_values, truth_indices, truth_values):
1014        return self.edit_distance(hypothesis_indices, hypothesis_values, self.hypothesis_shape,
1015                                  truth_indices, truth_values, self.truth_shape)
1016
1017
1018class ApplyAdagradDANet(nn.Cell):
1019    def __init__(self, use_locking=False):
1020        super(ApplyAdagradDANet, self).__init__()
1021        self.apply_adagrad_d_a = P.ApplyAdagradDA(use_locking)
1022        self.var = Parameter(Tensor(np.array([[0.6, 0.4], [0.1, 0.5]]).astype(np.float32)), name="var")
1023        self.gradient_accumulator = Parameter(Tensor(np.array([[0.1, 0.3], [0.1, 0.5]]).astype(np.float32)),
1024                                              name="gradient_accumulator")
1025        self.gradient_squared_accumulator = Parameter(Tensor(np.array([[0.2, 0.1], [0.1, 0.2]]).astype(np.float32)),
1026                                                      name="gradient_squared_accumulator")
1027    def construct(self, grad, lr, l1, l2, global_step):
1028        out = self.apply_adagrad_d_a(self.var, self.gradient_accumulator, self.gradient_squared_accumulator, grad,
1029                                     lr, l1, l2, global_step)
1030        return out
1031
1032
1033class SparseApplyRMSPropNet(nn.Cell):
1034    def __init__(self, rho, momentum, epsilon, use_locking=False):
1035        super(SparseApplyRMSPropNet, self).__init__()
1036        self.sparse_apply_r_m_s_prop = P.SparseApplyRMSProp(rho, momentum, epsilon, use_locking)
1037        self.var = Parameter(Tensor(np.array([[0.6, 0.3], [0.1, 0.5]]).astype(np.float32)), name="var")
1038        self.ms = Parameter(Tensor(np.array([[0.2, 0.4], [0.1, 0.3]]).astype(np.float32)), name="ms")
1039        self.mom = Parameter(Tensor(np.array([[0.3, 0.1], [0.3, 0.6]]).astype(np.float32)), name="mom")
1040
1041    def construct(self, lr, grad, indices):
1042        out = self.sparse_apply_r_m_s_prop(self.var, self.ms, self.mom, lr, grad, indices)
1043        return out
1044
1045test_case_math_ops = [
1046    ('BitwiseAnd', {
1047        'block': P.BitwiseAnd(),
1048        'desc_inputs': [Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16),
1049                        Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mstype.int16)],
1050        'skip': ['backward']}),
1051    ('BitwiseAnd_1', {
1052        'block': P.BitwiseAnd(),
1053        'desc_inputs': [Tensor(np.array([[1, 2, 3], [-1, -2, -3]]), mstype.int16),
1054                        Tensor(np.array([1, 1, 1]), mstype.int16)],
1055        'skip': ['backward']}),
1056    ('BitwiseOr', {
1057        'block': P.BitwiseOr(),
1058        'desc_inputs': [Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16),
1059                        Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mstype.int16)],
1060        'skip': ['backward']}),
1061    ('BitwiseOr_1', {
1062        'block': P.BitwiseOr(),
1063        'desc_inputs': [Tensor(np.array([[1, 2, 3], [-1, -2, -3]]), mstype.int16),
1064                        Tensor(np.array([1, 1, 1]), mstype.int16)],
1065        'skip': ['backward']}),
1066    ('BitwiseXor', {
1067        'block': P.BitwiseXor(),
1068        'desc_inputs': [Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16),
1069                        Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mstype.int16)],
1070        'skip': ['backward']}),
1071    ('BitwiseXor_1', {
1072        'block': P.BitwiseXor(),
1073        'desc_inputs': [Tensor(np.array([[1, 2, 3], [-1, -2, -3]]), mstype.int16),
1074                        Tensor(np.array([1, 1, 1]), mstype.int16)],
1075        'skip': ['backward']}),
1076    ('Neg', {
1077        'block': P.Neg(),
1078        'desc_inputs': [[1, 3, 4, 4]],
1079        'desc_bprop': [[1, 3, 4, 4]]}),
1080    ('Sub', {
1081        'block': P.Sub(),
1082        'desc_inputs': [[3, 5], [2, 3, 3, 5]],
1083        'desc_bprop': [[2, 3, 3, 5]]}),
1084    ('Add', {
1085        'block': P.Add(),
1086        'desc_inputs': [[3, 5], [2, 3, 3, 5]],
1087        'desc_bprop': [[2, 3, 3, 5]]}),
1088    ('Mul0', {
1089        'block': P.Mul(),
1090        'desc_inputs': [[2, 3, 3, 5], [2, 3, 3, 5]],
1091        'desc_bprop': [[2, 3, 3, 5]]}),
1092    ('Mul1', {
1093        'block': P.Mul(),
1094        'desc_inputs': [[2, 3, 1, 1], [2, 3, 3, 5]],
1095        'desc_bprop': [[2, 3, 3, 5]]}),
1096    ('Mul2', {
1097        'block': P.Mul(),
1098        'desc_inputs': [[2, 3, 3, 5], [2, 3, 1, 1]],
1099        'desc_bprop': [[2, 3, 3, 5]],
1100        'skip': ['backward']}),
1101    ('Mul3', {
1102        'block': P.Mul(),
1103        'desc_inputs': [[3, 5], [2, 3, 3, 5]],
1104        'desc_bprop': [[2, 3, 3, 5]],
1105        'skip': ['backward']}),
1106    ('Mul4', {
1107        'block': P.Mul(),
1108        'desc_inputs': [[2, 3, 3, 5], [3, 5]],
1109        'desc_bprop': [[2, 3, 3, 5]],
1110        'skip': ['backward']}),
1111    ('Add0', {
1112        'block': P.Add(),
1113        'desc_inputs': [[2, 3, 3, 5], [2, 3, 3, 5]],
1114        'desc_bprop': [[2, 3, 3, 5]]}),
1115    ('Add1', {
1116        'block': P.Add(),
1117        'desc_inputs': [[3, 5], [2, 3, 3, 5]],
1118        'desc_bprop': [[2, 3, 3, 5]],
1119        'skip': ['backward']}),
1120    ('Add2', {
1121        'block': P.Add(),
1122        'desc_inputs': [[2, 3, 3, 5], [3, 5]],
1123        'desc_bprop': [[2, 3, 3, 5]],
1124        'skip': ['backward']}),
1125    ('Add3', {
1126        'block': P.Add(),
1127        'desc_inputs': [[2, 3, 1, 1], [2, 3, 3, 5]],
1128        'desc_bprop': [[2, 3, 3, 5]],
1129        'skip': ['backward']}),
1130    ('Add4', {
1131        'block': P.Add(),
1132        'desc_inputs': [[2, 3, 3, 5], [2, 3, 1, 1]],
1133        'desc_bprop': [[2, 3, 3, 5]],
1134        'skip': ['backward']}),
1135    ('Minimum', {
1136        'block': P.Minimum(),
1137        'desc_inputs': [[2, 3, 3, 5], [2, 3, 3, 5]],
1138        'desc_bprop': [[2, 3, 3, 5]]}),
1139    ('Pow_0', {
1140        'block': P.Pow(),
1141        'desc_const': [2.0],
1142        'desc_inputs': [[2, 3, 3, 5]],
1143        'desc_bprop': [[2, 3, 3, 5]]}),
1144    ('Pow_1', {
1145        'block': P.Pow(),
1146        'desc_inputs': [[3, 5], [2, 3, 3, 5]],
1147        'desc_bprop': [[2, 3, 3, 5]]}),
1148    ('Exp', {
1149        'block': P.Exp(),
1150        'desc_inputs': [[2, 3]],
1151        'desc_bprop': [[2, 3]]}),
1152    ('Expm1', {
1153        'block': P.Expm1(),
1154        'desc_inputs': [[2, 3]],
1155        'desc_bprop': [[2, 3]]}),
1156    ('Erf', {
1157        'block': P.Erf(),
1158        'desc_inputs': [Tensor(np.array([-2, -1, 0, 1, 2]).astype(np.float16))],
1159        'desc_bprop': [Tensor(np.array([-2, -1, 0, 1, 2]).astype(np.float16))]}),
1160    ('Floor', {
1161        'block': P.Floor(),
1162        'desc_inputs': [[2, 512, 56, 56]],
1163        'desc_bprop': [[2, 512, 56, 56]],
1164        'skip': ['backward']}),
1165    ('Ceil', {
1166        'block': P.Ceil(),
1167        'desc_inputs': [[2, 512, 56, 56]],
1168        'desc_bprop': [[2, 512, 56, 56]],
1169        'skip': ['backward']}),
1170    ('InplaceAdd', {
1171        'block': InplaceAddNet(),
1172        'desc_inputs': [Tensor(np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32)),
1173                        Tensor(np.array([[0.5, 1], [1, 1.5]]).astype(np.float32))],
1174        'skip': ['backward']}),
1175    ('InplaceSub', {
1176        'block': InplaceSubNet(),
1177        'desc_inputs': [Tensor(np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32)),
1178                        Tensor(np.array([[0.5, 1], [1, 1.5]]).astype(np.float32))],
1179        'skip': ['backward']}),
1180    ('ACos', {
1181        'block': P.ACos(),
1182        'desc_inputs': [Tensor(np.array([2., 3.]).astype(np.float32))],
1183        'desc_bprop': [Tensor(np.array([2., 3.]).astype(np.float32))]}),
1184    ('ACosGrad', {
1185        'block': G.ACosGrad(),
1186        'desc_inputs': [[2, 3], [2, 3]],
1187        'skip': ['backward']}),
1188    ('Acosh', {
1189        'block': P.Acosh(),
1190        'desc_inputs': [Tensor(np.array([2., 3.]).astype(np.float32))],
1191        'desc_bprop': [Tensor(np.array([2., 3.]).astype(np.float32))]}),
1192    ('AcoshGrad', {
1193        'block': G.AcoshGrad(),
1194        'desc_inputs': [[2, 3], [2, 3]],
1195        'skip': ['backward']}),
1196    ('Sin', {
1197        'block': P.Sin(),
1198        'desc_inputs': [[2, 3]],
1199        'desc_bprop': [[2, 3]]}),
1200    ('Asin', {
1201        'block': P.Asin(),
1202        'desc_inputs': [[2, 3]],
1203        'desc_bprop': [[2, 3]]}),
1204    ('Asinh', {
1205        'block': P.Asinh(),
1206        'desc_inputs': [[3, 4, 5]],
1207        'desc_bprop': [[3, 4, 5]]}),
1208    ('Tan', {
1209        'block': P.Tan(),
1210        'desc_inputs': [[2, 3]],
1211        'desc_bprop': [[2, 3]]}),
1212    ('Reciprocal', {
1213        'block': P.Reciprocal(),
1214        'desc_inputs': [[2, 3, 3, 5]],
1215        'desc_bprop': [[2, 3, 3, 5]]}),
1216    ('Minimum_0', {
1217        'block': P.Minimum(),
1218        'desc_inputs': [[2, 3, 3, 5], [3, 3, 5]],
1219        'desc_bprop': [[2, 3, 3, 5]]}),
1220    ('Maximum', {
1221        'block': P.Maximum(),
1222        'desc_inputs': [[2, 3, 3, 5], [2, 3, 3, 5]],
1223        'desc_bprop': [[2, 3, 3, 5]]}),
1224    ('Maximum_0', {
1225        'block': P.Maximum(),
1226        'desc_inputs': [[3, 5], [2, 3, 3, 5]],
1227        'desc_bprop': [[2, 3, 3, 5]]}),
1228    ('MaximumGrad', {
1229        'block': G.MaximumGrad(),
1230        'desc_inputs': [[2, 3, 3, 5], [2, 3, 3, 5], [2, 3, 3, 5]],
1231        'skip': ['backward']}),
1232    ('MinimumGrad', {
1233        'block': G.MinimumGrad(),
1234        'desc_inputs': [[2, 3, 3, 5], [2, 3, 3, 5], [2, 3, 3, 5]],
1235        'skip': ['backward']}),
1236    ('StridedSlice_00', {
1237        'block': P.StridedSlice(shrink_axis_mask=0),
1238        'desc_const': [(0, 1, 2, 1),
1239                       (2, 3, 3, 4),
1240                       (1, 1, 1, 2)],
1241        'desc_inputs': [[2, 3, 3, 5]],
1242        'desc_bprop': [[2, 2, 1, 3]],
1243        'skip': ['backward']}),
1244    ('Slice_1', {
1245        'block': P.Slice(),
1246        'desc_const': [(0, 1, 2, 1),
1247                       (1, 1, 1, 2)],
1248        'desc_inputs': [[2, 3, 3, 5]],
1249        'desc_bprop': [[1, 1, 1, 2]]}),
1250    ('StridedSliceGrad', {
1251        'block': G.StridedSliceGrad(),
1252        'desc_const': [(64, 1, 1024),
1253                       (0, 1, 0),
1254                       (64, 2, 1024),
1255                       (1, 1, 1)],
1256        'desc_inputs': [[64, 128, 1024]],
1257        'skip': ['backward']}),
1258    ('Normal', {
1259        'block': NormalNet((3, 2, 4), 0),
1260        'desc_inputs': [Tensor(0.0, mstype.float32), Tensor(1.0, mstype.float32)],
1261        'skip': ['backward']}),
1262    ('Laplace', {
1263        'block': LaplaceNet((3, 2, 4), 0),
1264        'desc_inputs': [Tensor(1.0, mstype.float32), Tensor(1.0, mstype.float32)],
1265        'skip': ['backward']}),
1266    ('Gamma', {
1267        'block': GammaNet((3, 2, 4), 0),
1268        'desc_inputs': [Tensor(1.0, mstype.float32), Tensor(1.0, mstype.float32)],
1269        'skip': ['backward']}),
1270    ('Poisson', {
1271        'block': PoissonNet((3, 2, 4), 0),
1272        'desc_inputs': [Tensor(2.0, mstype.float32)],
1273        'skip': ['backward']}),
1274    ('Uniform', {
1275        'block': UniformNet((3, 2, 4), 0),
1276        'desc_inputs': [Tensor(0.0, mstype.float32), Tensor(1.0, mstype.float32)],
1277        'skip': ['backward']}),
1278    ('RandomChoiceWithMask', {
1279        'block': P.RandomChoiceWithMask(256),
1280        'desc_inputs': [Tensor(np.random.rand(24000, 4).astype(np.bool_))],
1281        'desc_bprop': [[256, 4], [256, 4]],
1282        'skip': ['backward']}),
1283    ('Lerp', {
1284        'block': P.Lerp(),
1285        'desc_inputs': [Tensor(np.array([1., 2., 3., 4.]).astype(np.float32)),
1286                        Tensor(np.array([10., 10., 10., 10.]).astype(np.float32)),
1287                        Tensor(0.5, mstype.float32)],
1288        'desc_bprop': [Tensor(np.array([1., 2., 3., 4.]).astype(np.float32))]}),
1289    ('LessEqual', {
1290        'block': P.LessEqual(),
1291        'desc_inputs': [Tensor(np.random.rand(4).astype(np.float16)),
1292                        Tensor(np.random.rand(4).astype(np.float16))],
1293        'skip': ['backward']}),
1294    ('Less', {
1295        'block': P.Less(),
1296        'desc_inputs': [[2, 1, 4, 5], [2, 1, 4, 5]],
1297        'desc_bprop': [Tensor(np.zeros((2, 1, 4, 5), np.bool_))],
1298        'skip': ['backward']}),
1299    ('RealDiv_0', {
1300        'block': P.RealDiv(),
1301        'desc_const': [Tensor(2048.0), Tensor(0.0)],
1302        'desc_inputs': [],
1303        'skip': ['backward']}),
1304    ('RealDiv', {
1305        'block': P.RealDiv(),
1306        'desc_inputs': [[4], Tensor(np.ones(4).astype(np.float32))],
1307        'desc_bprop': [[4]]}),
1308    ('RealDiv_1', {
1309        'block': P.RealDiv(),
1310        'desc_inputs': [[512, 1024], [512, 1024]],
1311        'desc_bprop': [[512, 1024]]}),
1312    ('FloorDiv', {
1313        'block': P.FloorDiv(),
1314        'desc_inputs': [Tensor(np.random.rand(4).astype(np.float16)),
1315                        Tensor(np.random.rand(4).astype(np.float16))],
1316        'skip': ['backward']}),
1317    ('FloorMod', {
1318        'block': P.FloorMod(),
1319        'desc_inputs': [[3, 4, 5], [2, 3, 4, 5]],
1320        'desc_bprop': [[2, 3, 4, 5]]}),
1321    ('TruncateDiv', {
1322        'block': P.TruncateDiv(),
1323        'desc_inputs': [[3, 4, 5], [2, 3, 4, 5]],
1324        'desc_bprop': [[2, 3, 4, 5]]}),
1325    ('TruncateMod', {
1326        'block': P.TruncateMod(),
1327        'desc_inputs': [[3, 4, 5], [2, 3, 4, 5]],
1328        'desc_bprop': [[2, 3, 4, 5]]}),
1329    ('identity', {
1330        'block': ops.functional.identity,
1331        'desc_inputs': [[2, 2]],
1332        'skip': ['backward']}),
1333    ('MatMul_1', {
1334        'block': P.MatMul(transpose_a=False, transpose_b=False),
1335        'desc_inputs': [[1024, 160], [160, 1024]],
1336        'desc_bprop': [[1024, 1024]]}),
1337    ('MatMul_2', {
1338        'block': P.MatMul(transpose_a=True, transpose_b=True),
1339        'desc_inputs': [[160, 1024], [1024, 160]],
1340        'desc_bprop': [[1024, 1024]]}),
1341    ('Sub', {
1342        'block': P.Sub(),
1343        'desc_inputs': [[3], [3]],
1344        'desc_bprop': [[3]]}),
1345    ('TruncatedNormal', {
1346        'block': P.TruncatedNormal(),
1347        'desc_const': [(1, 2, 3)],
1348        'desc_inputs': [],
1349        'skip': ['backward'],
1350        'add_fake_input': True}),
1351    ('Select', {
1352        'block': P.Select(),
1353        'desc_inputs': [Tensor(np.array([[True, False, False], [False, True, True]])),
1354                        [2, 3], [2, 3]],
1355        'desc_bprop': [[2, 3]]}),
1356    ('ClipByNorm_1', {
1357        'block': ClipByNorm(),
1358        'desc_inputs': [Tensor(np.random.rand(3, 16, 5, 4).astype(np.float32)),
1359                        Tensor(np.array([0.01]).astype(np.float32))],
1360        'skip': ['backward']}),
1361    ('ClipByNorm_2', {
1362        'block': ClipByNorm(axis=0),
1363        'desc_inputs': [Tensor(np.random.rand(3, 16, 5, 4).astype(np.float32)),
1364                        Tensor(np.array([0.01]).astype(np.float32))],
1365        'skip': ['backward']}),
1366    ('ClipByGlobalNorm', {
1367        'block': ClipByGlobalNorm(x=Tensor(np.random.rand(3, 16, 5, 4).astype(np.float32)),
1368                                  clip_norm=1.0, use_norm=None),
1369        'desc_inputs': [],
1370        'skip': ['backward']}),
1371    ('Cdist', {
1372        'block': P.Cdist(p=2.0),
1373        'desc_inputs': [Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32)),
1374                        Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))],
1375        'desc_bprop': [Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))]}),
1376    ('Embedding_1', {
1377        'block': Embedding(vocab_size=10, embedding_size=3),
1378        'desc_inputs': [Tensor(np.array([0, 2, 2, 7]).astype(np.int32))],
1379        'skip': ['backward']}),
1380    ('Embedding_2', {
1381        'block': Embedding(vocab_size=10, embedding_size=3, padding_idx=2),
1382        'desc_inputs': [Tensor(np.array([0, 2, 2, 7]).astype(np.int32))],
1383        'skip': ['backward']}),
1384    ('EmbeddingLookup_1', {
1385        'block': EmbeddingLookup(vocab_size=10, embedding_size=3),
1386        'desc_inputs': [Tensor(np.array([0, 2, 2, 7]).astype(np.int32))],
1387        'skip': ['backward']}),
1388    ('EmbeddingLookup_2', {
1389        'block': EmbeddingLookup(vocab_size=10, embedding_size=3, max_norm=0.01),
1390        'desc_inputs': [Tensor(np.array([0, 2, 2, 7]).astype(np.int32))],
1391        'skip': ['backward']}),
1392    ('Moments', {
1393        'block': Moments(axis=(), keep_dims=False),
1394        'desc_inputs': [Tensor(np.random.rand(3, 16, 5, 4).astype(np.float32))],
1395        'skip': ['backward']}),
1396    ('NLLLoss', {
1397        'block': NLLLoss(reduction="mean"),
1398        'desc_inputs': [Tensor(np.random.rand(3, 16), mstype.float32),
1399                        Tensor(np.random.rand(3), mstype.int32),
1400                        Tensor(np.random.rand(16), mstype.float32)],
1401        'desc_bprop': [(Tensor(np.random.rand(1), mstype.float32), Tensor(np.random.rand(1), mstype.float32))]}),
1402    ('BatchNorm3d', {
1403        'block': BatchNorm3d(num_features=3),
1404        'desc_inputs': [Tensor(np.random.rand(3, 3, 3, 5, 4).astype(np.float32))],
1405        'skip': ['backward']}),
1406    ('Conv3D', {
1407        'block': Conv3D(out_channel=32, kernel_size=(4, 3, 3), mode=1, pad_mode='valid', pad=0,
1408                        stride=1, dilation=1, group=1, data_format="NCDHW"),
1409        'desc_inputs': [Tensor(np.random.random((16, 3, 10, 32, 32)).astype(np.float16)),
1410                        Tensor(np.random.random((32, 3, 4, 3, 3)).astype(np.float16))],
1411        'skip': ['backward']}),
1412    ('Conv3DBackpropInput', {
1413        'block': Conv3DBackpropInput(input_shape=(16, 32, 13, 37, 33), out_channel=32, kernel_size=(4, 6, 2), mode=1,
1414                                     pad_mode='valid', pad=0, stride=1, dilation=1, group=1, data_format="NCDHW"),
1415        'desc_inputs': [Tensor(np.random.random((32, 32, 4, 6, 2)).astype(np.float16)),
1416                        Tensor(np.random.random((16, 32, 10, 32, 32)).astype(np.float16))],
1417        'skip': ['backward']}),
1418    ('Conv3DBackpropFilter', {
1419        'block': Conv3DBackpropFilter(w_shape=(32, 32, 4, 6, 2), out_channel=32, kernel_size=(4, 6, 2), mode=1,
1420                                      pad_mode='valid', pad=0, stride=1, dilation=1, group=1, data_format="NCDHW"),
1421        'desc_inputs': [Tensor(np.random.random((16, 32, 13, 37, 33)).astype(np.float16)),
1422                        Tensor(np.random.random((16, 32, 10, 32, 32)).astype(np.float16))],
1423        'skip': ['backward']}),
1424    ('Conv3DTranspose', {
1425        'block': Conv3DTranspose(in_channel=32, out_channel=3, kernel_size=(4, 6, 2), mode=1,
1426                                 pad=0, stride=1, dilation=1, group=1, data_format="NCDHW"),
1427        'desc_inputs': [Tensor(np.random.random((32, 3, 10, 32, 32)).astype(np.float16)),
1428                        Tensor(np.random.random((3, 3, 4, 6, 2)).astype(np.float16))],
1429        'skip': ['backward']}),
1430    ('CountNonZero', {
1431        'block': CountNonZero(axis=(), keep_dims=False, dtype=mstype.int32),
1432        'desc_inputs': [Tensor(np.random.rand(3, 16, 5, 4).astype(np.float32))],
1433        'skip': ['backward']}),
1434    ('FakeQuantWithMinMaxVars', {
1435        'block': Q.FakeQuantWithMinMaxVars(num_bits=8, narrow_range=False),
1436        'desc_inputs': [Tensor(np.random.rand(3, 16, 5, 5), mstype.float32),
1437                        Tensor(np.array([-6]), mstype.float32),
1438                        Tensor(np.array([6]), mstype.float32)],
1439        'desc_bprop': [Tensor(np.random.rand(3, 16, 5, 5), mstype.float32)]}),
1440    ('FakeQuantWithMinMaxVarsPerChannel', {
1441        'block': Q.FakeQuantWithMinMaxVarsPerChannel(num_bits=8, narrow_range=False),
1442        'desc_inputs': [Tensor(np.random.rand(3, 16, 5, 4), mstype.float32),
1443                        Tensor(np.array([-6, -1, -2, -3]), mstype.float32),
1444                        Tensor(np.array([6, 1, 2, 3]), mstype.float32)],
1445        'desc_bprop': [Tensor(np.random.rand(3, 16, 5, 4), mstype.float32)]}),
1446    ('Mish', {
1447        'block': Mish(),
1448        'desc_inputs': [Tensor(np.random.rand(3, 6, 16, 16), mstype.float32)],
1449        'desc_bprop': [Tensor(np.random.rand(3, 6, 16, 16), mstype.float32)]}),
1450    ('SeLU', {
1451        'block': SeLU(),
1452        'desc_inputs': [Tensor(np.random.rand(3, 6, 16, 16), mstype.float32)],
1453        'desc_bprop': [Tensor(np.random.rand(3, 6, 16, 16), mstype.float32)]}),
1454    ('MulNoNan', {
1455        'block': MulNoNan(),
1456        'desc_inputs': [Tensor(np.random.rand(3, 6, 16, 16), mstype.float32),
1457                        Tensor(np.random.rand(3, 6, 16, 16), mstype.float32)],
1458        'desc_bprop': [Tensor(np.random.rand(3, 6, 16, 16), mstype.float32)]}),
1459    ('Rank', {
1460        'block': P.Rank(),
1461        'desc_inputs': [[2, 3]],
1462        'skip': ['backward']}),
1463    ('InvertPermutation', {
1464        'block': P.InvertPermutation(),
1465        'desc_const': [(0, 3, 1, 2)],
1466        'desc_inputs': [],
1467        'skip': ['backward']}),
1468    ('Xdivy', {
1469        'block': P.Xdivy(),
1470        'desc_inputs': [[4, 5], [2, 3, 4, 5]],
1471        'desc_bprop': [[2, 3, 4, 5]]}),
1472    ('Xlogy', {
1473        'block': P.Xlogy(),
1474        'desc_inputs': [[4, 5], [2, 3, 4, 5]],
1475        'desc_bprop': [[2, 3, 4, 5]]}),
1476    ('SquaredDifference', {
1477        'block': P.SquaredDifference(),
1478        'desc_inputs': [[4, 5], [2, 3, 4, 5]],
1479        'desc_bprop': [[2, 3, 4, 5]]}),
1480    ('Square', {
1481        'block': P.Square(),
1482        'desc_inputs': [[4]],
1483        'desc_bprop': [[4]]}),
1484    ('Rsqrt', {
1485        'block': P.Rsqrt(),
1486        'desc_inputs': [[4]],
1487        'desc_bprop': [[4]]}),
1488    ('Sqrt', {
1489        'block': P.Sqrt(),
1490        'desc_inputs': [[4]],
1491        'desc_bprop': [[4]]}),
1492    ('RealDiv', {
1493        'block': P.RealDiv(),
1494        'desc_inputs': [[4, 5], [2, 3, 4, 5]],
1495        'desc_bprop': [[2, 3, 4, 5]]}),
1496    ('IsFinite', {
1497        'block': P.IsFinite(),
1498        'desc_inputs': [Tensor(np.random.random((3, 4, 5)).astype(np.float32))],
1499        'desc_bprop': [Tensor(np.random.random((3, 4, 5)).astype(np.bool))]}),
1500    ('Div', {
1501        'block': P.Div(),
1502        'desc_inputs': [[4, 5], [2, 3, 4, 5]],
1503        'desc_bprop': [[2, 3, 4, 5]]}),
1504    ('Equal', {
1505        'block': P.Equal(),
1506        'desc_inputs': [[3, 4, 5], [4, 5]],
1507        'desc_bprop': [Tensor(np.zeros((3, 4, 5), np.bool_))]}),
1508    ('NotEqual', {
1509        'block': P.NotEqual(),
1510        'desc_inputs': [[4, 1], [2, 3, 4, 5]],
1511        'desc_bprop': [Tensor(np.ones((2, 3, 4, 5), np.bool_))]}),
1512    ('NotEqual_0', {
1513        'block': P.NotEqual(),
1514        'desc_inputs': [Tensor(np.array(1).astype(np.int32)), [2, 3, 4, 5]],
1515        'desc_bprop': [Tensor(np.ones((2, 3, 4, 5), np.bool_))],
1516        'skip': ['backward']}),
1517    ('ApproximateEqual', {
1518        'block': P.ApproximateEqual(),
1519        'desc_inputs': [[3, 4, 5], [3, 4, 5]],
1520        'desc_bprop': [Tensor(np.zeros((3, 4, 5), np.bool_))]}),
1521    ('Greater', {
1522        'block': P.Greater(),
1523        'desc_inputs': [[2, 3, 4, 1], [4, 5]],
1524        'desc_bprop': [Tensor(np.ones((2, 3, 4, 5), np.bool_))]}),
1525    ('GreaterEqual', {
1526        'block': P.GreaterEqual(),
1527        'desc_inputs': [[2, 3, 4, 1], [4, 5]],
1528        'desc_bprop': [Tensor(np.ones((2, 3, 4, 5), np.bool_))]}),
1529    ('LogicalNot', {
1530        'block': P.LogicalNot(),
1531        'desc_inputs': [Tensor(np.zeros((3, 4, 5), np.bool_))],
1532        'desc_bprop': [Tensor(np.ones((3, 4, 5), np.bool_))]}),
1533    ('LogicalAnd', {
1534        'block': P.LogicalAnd(),
1535        'desc_inputs': [Tensor(np.zeros((2, 3, 4), np.bool_)), Tensor(np.ones((1), np.bool_))],
1536        'desc_bprop': [Tensor(np.zeros((2, 3, 4), np.bool_))]}),
1537    ('LogicalOr', {
1538        'block': P.LogicalOr(),
1539        'desc_inputs': [Tensor(np.zeros((3, 4, 5), np.bool_)), Tensor(np.ones((3, 1, 1), np.bool_))],
1540        'desc_bprop': [Tensor(np.zeros((3, 4, 5), np.bool_))]}),
1541    ('NpuAllocFloatStatus', {
1542        'block': P.NPUAllocFloatStatus(),
1543        'desc_inputs': [],
1544        'add_fack_input': True,
1545        'fack_input_type': np.float32,
1546        'desc_bprop': [Tensor(np.zeros([8]).astype(np.float32))],
1547        'skip': ['backward']}),
1548    ('NpuGetFloatStatus', {
1549        'block': P.NPUGetFloatStatus(),
1550        'desc_inputs': [Tensor(np.zeros([8]).astype(np.float32))],
1551        'desc_bprop': [Tensor(np.zeros([8]).astype(np.float32))],
1552        'skip': ['backward']}),
1553    ('NpuClearFloatStatus', {
1554        'block': P.NPUClearFloatStatus(),
1555        'desc_inputs': [Tensor(np.zeros([8]).astype(np.float32))],
1556        'desc_bprop': [Tensor(np.zeros([8]).astype(np.float32))],
1557        'skip': ['backward']}),
1558    ('CheckValid', {
1559        'block': P.CheckValid(),
1560        'desc_inputs': [[20000, 4], [3]],
1561        'desc_bprop': [[20000]],
1562        'skip': ['backward']}),
1563    ('NMSWithMask', {
1564        'block': P.NMSWithMask(0.5),
1565        'desc_inputs': [[128, 5]],
1566        'desc_bprop': [[128, 5], [128], [128]],
1567        'skip': ['backward']}),
1568    ('Abs', {
1569        'block': P.Abs(),
1570        'desc_inputs': [[4]],
1571        'desc_bprop': [[4]]}),
1572    ('CumSum', {
1573        'block': CumSumNet(),
1574        'desc_inputs': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))],
1575        'desc_bprop': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7],
1576                                        [1, 3, 7, 9]]).astype(np.float32))]}),
1577    ('ReduceSum_3', {
1578        'block': P.ReduceSum(),
1579        'desc_const': [0],
1580        'desc_inputs': [[3, 2]],
1581        'desc_bprop': [[2]]}),
1582    ('ReduceSum_4', {
1583        'block': P.ReduceSum(keep_dims=True),
1584        'desc_const': [0],
1585        'desc_inputs': [[3, 2]],
1586        'desc_bprop': [[1, 2]]}),
1587    ('ReduceSum_5', {
1588        'block': P.ReduceSum(keep_dims=True),
1589        'desc_inputs': [[2, 3, 4]],
1590        'desc_bprop': [[1, 1, 1]]}),
1591    ('ReduceSum_6', {
1592        'block': P.ReduceSum(),
1593        'desc_inputs': [[2, 3, 4]],
1594        'desc_bprop': [[1]]}),
1595    ('Sum_0', {
1596        'block': P.ReduceSum(),
1597        'desc_const': [(1,)],
1598        'desc_inputs': [[3, 2]],
1599        'desc_bprop': [[3]]}),
1600    ('Sum_1', {
1601        'block': P.ReduceSum(keep_dims=True),
1602        'desc_const': [(1,)],
1603        'desc_inputs': [[3, 2]],
1604        'desc_bprop': [[3, 1]]}),
1605    ('Sum_2', {
1606        'block': P.ReduceSum(),
1607        'desc_const': [(0, 1)],
1608        'desc_inputs': [[3, 2]],
1609        'desc_bprop': [[1]]}),
1610    ('Sum_3', {
1611        'block': P.ReduceSum(),
1612        'desc_const': [0],
1613        'desc_inputs': [[3, 2]],
1614        'desc_bprop': [[2]]}),
1615    ('Sum_4', {
1616        'block': P.ReduceSum(keep_dims=True),
1617        'desc_const': [0],
1618        'desc_inputs': [[3, 2]],
1619        'desc_bprop': [[1, 2]]}),
1620    ('Sum_5', {
1621        'block': P.ReduceSum(keep_dims=True),
1622        'desc_const': [()],
1623        'desc_inputs': [[2, 3, 4]],
1624        'desc_bprop': [[1, 1, 1]]}),
1625    ('Sum_6', {
1626        'block': P.ReduceSum(),
1627        'desc_const': [()],
1628        'desc_inputs': [[2, 3, 4]],
1629        'desc_bprop': [[1]]}),
1630    ('Sign', {
1631        'block': P.Sign(),
1632        'desc_inputs': [[3]],
1633        'desc_bprop': [[3]]}),
1634    ('Round', {
1635        'block': P.Round(),
1636        'desc_inputs': [[3]],
1637        'desc_bprop': [[3]]}),
1638    ('Atan2', {
1639        'block': P.Atan2(),
1640        'desc_inputs': [Tensor(np.array([0, 1]).astype(np.float32)),
1641                        Tensor(np.array([1, 1]).astype(np.float32))],
1642        'desc_bprop': [[2]]}),
1643    ('SquareSumAll', {
1644        'block': P.SquareSumAll(),
1645        'desc_inputs': [Tensor(np.array([0, 1, 4, 5]).astype(np.float32)),
1646                        Tensor(np.array([1, 1, 3, 7]).astype(np.float32))],
1647        'desc_bprop': [Tensor(np.array(0.1).astype(np.float32)),
1648                       Tensor(np.array(0.1).astype(np.float32))]}),
1649    ('Cos', {
1650        'block': P.Cos(),
1651        'desc_inputs': [[2, 3]],
1652        'desc_bprop': [[2, 3]]}),
1653    ('ReduceAll', {
1654        'block': P.ReduceAll(),
1655        'desc_const': [1],
1656        'desc_inputs': [Tensor(np.array([[True, False], [True, True]]))],
1657        'desc_bprop': []}),
1658    ('ReduceAny', {
1659        'block': P.ReduceAny(),
1660        'desc_const': [1],
1661        'desc_inputs': [Tensor(np.array([[True, False], [True, True]]))],
1662        'desc_bprop': []}),
1663    ('BesselI0e', {
1664        'block': P.BesselI0e(),
1665        'desc_inputs': [[2, 3]],
1666        'desc_bprop': [[2, 3]]}),
1667    ('BesselI1e', {
1668        'block': P.BesselI1e(),
1669        'desc_inputs': [[2, 3]],
1670        'desc_bprop': [[2, 3]]}),
1671    ('Atan', {
1672        'block': P.Atan(),
1673        'desc_inputs': [[2, 3]],
1674        'desc_bprop': [[2, 3]]}),
1675    ('AtanGrad', {
1676        'block': G.AtanGrad(),
1677        'desc_inputs': [[2, 3], [2, 3]],
1678        'skip': ['backward']}),
1679    ('Atanh', {
1680        'block': P.Atanh(),
1681        'desc_inputs': [[2, 3]],
1682        'desc_bprop': [[2, 3]]}),
1683    ('Cosh', {
1684        'block': P.Cosh(),
1685        'desc_inputs': [[3, 4, 5]],
1686        'desc_bprop': [[3, 4, 5]]}),
1687    ('Sinh', {
1688        'block': P.Sinh(),
1689        'desc_inputs': [[3, 4, 5]],
1690        'desc_bprop': [[3, 4, 5]]}),
1691    ('Inv', {
1692        'block': P.Inv(),
1693        'desc_inputs': [[21, 9, 12, 5]],
1694        'desc_bprop': [[21, 9, 12, 5]]}),
1695    ('Invert', {
1696        'block': P.Invert(),
1697        'desc_inputs': [Tensor(np.array([[24, 4, 13, 9], [1, 5, 10, 8]]).astype(np.int16))],
1698        'desc_bprop': [],
1699        'skip': ['backward']}),
1700    ('HistogramFixedWidth', {
1701        'block': P.HistogramFixedWidth(5),
1702        'desc_inputs': [Tensor([-1.0, 0.0, 1.5, 2.0, 5.0, 15], mstype.float16), Tensor([0.0, 5.0], mstype.float16)],
1703        'desc_bprop': [],
1704        'skip': ['backward']}),
1705    ('Mod', {
1706        'block': P.Mod(),
1707        'desc_inputs': [[3, 4, 5], [2, 3, 4, 5]],
1708        'desc_bprop': [[2, 3, 4, 5]]}),
1709    ('IFMR', {
1710        'block': Q.IFMR(min_percentile=0.2, max_percentile=0.9, search_range=(1.0, 2.0),
1711                        search_step=1.0, with_offset=False),
1712        'desc_inputs': [[3, 4, 5], Tensor([0.1], mstype.float32), Tensor([0.9], mstype.float32),
1713                        Tensor(np.random.rand(4).astype(np.int32))],
1714        'desc_bprop': [],
1715        'skip': ['backward']}),
1716    ('Erfinv', {
1717        'block': P.Erfinv(),
1718        'desc_inputs': [Tensor(np.array([0.1, 0.1, 0.1]).astype(np.float16))],
1719        'desc_bprop': [Tensor(np.array([1, 1, 1]).astype(np.float16))]}),
1720    ('IndexAdd', {
1721        'block': IndexAdd(1),
1722        'desc_inputs': (Tensor(np.array([0, 1, 2]).astype(np.int32)),
1723                        Tensor(np.array([[0.5, 1.0, 1.5], [1.0, 1.5, 2.0], [2.0, 2.5, 3.0]]).astype(np.float32))),
1724        'desc_bprop': [Tensor(np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]).astype(np.float32))]}),
1725]
1726
1727test_case_nn_ops = [
1728    ('BiasAdd', {
1729        'block': P.BiasAdd(),
1730        'desc_inputs': [[1, 3, 3, 3], [3]],
1731        'desc_bprop': [[1, 3, 3, 3]]}),
1732    ('BiasAddGrad', {
1733        'block': G.BiasAddGrad(),
1734        'desc_inputs': [[1, 3, 3, 3]],
1735        'skip': ['backward']}),
1736    ('GeLU', {
1737        'block': P.GeLU(),
1738        'desc_inputs': [[1, 3, 4, 4]],
1739        'desc_bprop': [[1, 3, 4, 4]]}),
1740    ('GeLUGrad', {
1741        'block': G.GeLUGrad(),
1742        'desc_inputs': [[2, 2], [2, 2], [2, 2]],
1743        'desc_bprop': [[2, 2]],
1744        'skip': ['backward']}),
1745    ('Tanh', {
1746        'block': P.Tanh(),
1747        'desc_inputs': [[1, 3, 4, 4]],
1748        'desc_bprop': [[1, 3, 4, 4]]}),
1749    ('TanhGrad', {
1750        'block': G.TanhGrad(),
1751        'desc_inputs': [[1, 3, 4, 4], [1, 3, 4, 4]],
1752        'desc_bprop': [[1, 3, 4, 4]],
1753        'skip': ['backward']}),
1754    ('ReLU', {
1755        'block': P.ReLU(),
1756        'desc_inputs': [[1, 3, 4, 4]],
1757        'desc_bprop': [[1, 3, 4, 4]]}),
1758    ('ReLU6', {
1759        'block': P.ReLU6(),
1760        'desc_inputs': [[1, 3, 4, 4]],
1761        'desc_bprop': [[1, 3, 4, 4]]}),
1762    ('ReLUV2', {
1763        'block': P.ReLUV2(),
1764        'desc_inputs': [[1, 3, 4, 4]],
1765        'desc_bprop': [[1, 3, 4, 4], ([1, 1, 4, 4, 2], {'dtype': np.uint8})]}),
1766    ('ReLUGrad', {
1767        'block': G.ReluGrad(),
1768        'desc_inputs': [[1, 3, 4, 4], [1, 3, 4, 4]],
1769        'skip': ['backward']}),
1770    ('Softplus', {
1771        'block': P.Softplus(),
1772        'desc_inputs': [[1, 3, 4, 4]],
1773        'desc_bprop': [[1, 3, 4, 4]]}),
1774    ('SoftplusGrad', {
1775        'block': G.SoftplusGrad(),
1776        'desc_inputs': [[1, 3, 4, 4], [1, 3, 4, 4]],
1777        'skip': ['backward']}),
1778    ('Elu', {
1779        'block': P.Elu(),
1780        'desc_inputs': [[2, 3, 4]],
1781        'desc_bprop': [[2, 3, 4]]}),
1782    ('EluGrad', {
1783        'block': G.EluGrad(),
1784        'desc_inputs': [[2, 3, 4], [2, 3, 4]],
1785        'desc_bprop': [[2, 3, 4]],
1786        'skip': ['backward']}),
1787    ('Sigmoid', {
1788        'block': P.Sigmoid(),
1789        'desc_inputs': [[1, 3, 4, 4]],
1790        'desc_bprop': [[1, 3, 4, 4]]}),
1791    ('MaxPool', {
1792        'block': P.MaxPool(kernel_size=(2, 2), strides=(2, 2), pad_mode="VALID"),
1793        'desc_inputs': [[100, 3, 28, 28]],
1794        'desc_bprop': [[100, 3, 14, 14]]}),
1795    ('MaxPoolGrad', {
1796        'block': G.MaxPoolGrad(kernel_size=(2, 2), strides=(2, 2), pad_mode="VALID"),
1797        'desc_inputs': [[3, 4, 6, 6], [3, 4, 3, 3], [3, 4, 3, 3]],
1798        'desc_bprop': [[3, 4, 6, 6]],
1799        'skip': ['backward']}),
1800    ('MaxPool3D', {
1801        'block': P.MaxPool3D(kernel_size=2, strides=2, pad_mode="VALID"),
1802        'desc_inputs': [[100, 3, 28, 28, 28]],
1803        'desc_bprop': [[100, 3, 14, 14, 14]]}),
1804    ('MaxPool3D', {
1805        'block': P.MaxPool3D(kernel_size=4, strides=2, pad_mode="PAD", pad_list=2, ceil_mode=False),
1806        'desc_inputs': [[100, 3, 28, 28, 28]],
1807        'desc_bprop': [[100, 3, 14, 14, 14]]}),
1808    ('MaxPool3D', {
1809        'block': P.MaxPool3D(kernel_size=4, strides=2, pad_mode="PAD", pad_list=2, ceil_mode=True),
1810        'desc_inputs': [[100, 3, 28, 28, 28]],
1811        'desc_bprop': [[100, 3, 14, 14, 14]]}),
1812    ('MaxPool3DGrad', {
1813        'block': G.MaxPool3DGrad(kernel_size=2, strides=2, pad_mode="VALID"),
1814        'desc_inputs': [[3, 4, 6, 6, 6], [3, 4, 3, 3, 3], [3, 4, 3, 3, 3]],
1815        'desc_bprop': [[3, 4, 6, 6, 6]]}),
1816    ('AvgPool', {
1817        'block': P.AvgPool(kernel_size=(2, 2), strides=(2, 2), pad_mode="VALID"),
1818        'desc_inputs': [[100, 3, 28, 28]],
1819        'desc_bprop': [[100, 3, 14, 14]]}),
1820    ('AvgPool3D_1', {
1821        'block': P.AvgPool3D(kernel_size=2, strides=2, pad_mode="VALID"),
1822        'desc_inputs': [[10, 3, 28, 28, 28]],
1823        'desc_bprop': [[10, 3, 14, 14, 14]]}),
1824    ('AvgPool3D_2', {
1825        'block': P.AvgPool3D(kernel_size=3, strides=2, pad_mode="PAD", pad=1),
1826        'desc_inputs': [[10, 3, 28, 31, 24]],
1827        'desc_bprop': [[10, 3, 14, 16, 12]]}),
1828    ('MaxPoolWithArgmax', {
1829        'block': P.MaxPoolWithArgmax(kernel_size=2, strides=2),
1830        'desc_inputs': [[128, 32, 32, 64]],
1831        'desc_bprop': [[128, 32, 16, 32], ([128, 32, 16, 32], {'dtype': np.int32})]}),
1832    ('SoftmaxCrossEntropyWithLogits', {
1833        'block': P.SoftmaxCrossEntropyWithLogits(),
1834        'desc_inputs': [[1, 10], [1, 10]],
1835        'desc_bprop': [[1], [1, 10]],
1836        'skip': ['backward_exec']}),
1837    ('Flatten', {
1838        'block': P.Flatten(),
1839        'desc_inputs': [[128, 32, 32, 64]],
1840        'desc_bprop': [[128, 65536]]}),
1841    ('LogSoftmax', {
1842        'block': P.LogSoftmax(),
1843        'desc_inputs': [[64, 2]],
1844        'desc_bprop': [[64, 2]]}),
1845    ('LogSoftmaxGrad', {
1846        'block': G.LogSoftmaxGrad(),
1847        'desc_inputs': [[16, 1234], [16, 1234]],
1848        'desc_bprop': [[64, 2]],
1849        'skip': ['backward']}),
1850    ('L2Normalize', {
1851        'block': P.L2Normalize(),
1852        'desc_inputs': [[2, 2]],
1853        'desc_bprop': [[2, 2]]}),
1854    ('L2NormalizeGrad', {
1855        'block': G.L2NormalizeGrad(),
1856        'desc_inputs': [[2, 2], [2, 2], [2, 2]],
1857        'desc_bprop': [[2, 2]],
1858        'skip': ['backward']}),
1859    ('LayerNorm', {
1860        'block': P.LayerNorm(),
1861        'desc_inputs': [[2, 16], [16], [16]],
1862        'desc_bprop': [[2, 16], [2, 1], [2, 1]]}),
1863    ('LayerNormGrad', {
1864        'block': G.LayerNormGrad(),
1865        'desc_inputs': [[2, 16], [2, 16], [2, 16], [2, 16], [16]],
1866        'desc_bprop': [[2, 16], [16], [16]],
1867        'skip': ['backward']}),
1868    ('BatchNorm', {
1869        'block': P.BatchNorm(),
1870        'desc_inputs': [[128, 64, 32, 32], [64], [64], [64], [64]],
1871        'desc_bprop': [[128, 64, 32, 32], [64], [64], [64], [64]],
1872        'skip': []}),
1873    ('BatchNormGrad', {
1874        'block': G.BatchNormGrad(),
1875        'desc_inputs': [[128, 64, 32, 32], [128, 64, 32, 32], [64], [64], [64], [64]],
1876        'desc_bprop': [[128, 64, 32, 32], [64], [64]],
1877        'skip': ['backward']}),
1878    ('SyncBatchNorm', {
1879        'block': inner.SyncBatchNorm(),
1880        'desc_inputs': [[128, 64, 32, 32], [64], [64], [64], [64]],
1881        'desc_bprop': [[128, 64, 32, 32], [64], [64], [64], [64]],
1882        'skip': []}),
1883    ('SyncBatchNormGrad', {
1884        'block': G.SyncBatchNormGrad(),
1885        'desc_inputs': [[128, 64, 32, 32], [128, 64, 32, 32], [64], [64], [64]],
1886        'desc_bprop': [[128, 64, 32, 32], [64], [64], [64], [64]],
1887        'skip': ['backward']}),
1888    ('TopK', {
1889        'block': P.TopK(),
1890        'desc_const': [5],
1891        'desc_inputs': [[20, 20, 10]],
1892        'desc_bprop': [[20, 20, 5]],
1893        'skip': ['backward']}),
1894    ('Sort', {
1895        'block': P.Sort(),
1896        'desc_inputs': [[2, 3, 4]],
1897        'desc_bprop': [[2, 3, 4], ([2, 3, 4], {'dtype': np.int32})]}),
1898    ('GatherV2_0', {
1899        'block': P.Gather(),
1900        'desc_const': [0],
1901        'desc_inputs': [[3, 1, 2], Tensor(np.array([0, 1]).astype(np.int32))],
1902        'desc_bprop': [[2, 1, 2]]}),
1903    ('GatherV2_1', {
1904        'block': P.Gather(),
1905        'desc_const': [2],
1906        'desc_inputs': [[3, 1, 3], Tensor(np.array([0, 1]).astype(np.int32))],
1907        'desc_bprop': [[3, 1, 2]]}),
1908    ('GatherV2_2', {
1909        'block': P.Gather(),
1910        'desc_const': [0],
1911        'desc_inputs': [[3, 1, 3], Tensor(np.array([[0, 1], [0, 1], [0, 1]]).astype(np.int32))],
1912        'desc_bprop': [[3, 2, 1, 3]]}),
1913    ('GatherV2_3', {
1914        'block': P.Gather(),
1915        'desc_const': [2],
1916        'desc_inputs': [[3, 1, 3], Tensor(np.array([[0, 1], [0, 1], [0, 1]]).astype(np.int32))],
1917        'desc_bprop': [[3, 1, 3, 2]]}),
1918    ('GatherV2_4', {
1919        'block': P.Gather(),
1920        'desc_const': [1],
1921        'desc_inputs': [[32, 5, 1024], Tensor(np.array([3]).astype(np.int32))],
1922        'desc_bprop': [[32, 1, 1024]]}),
1923    ('GatherV2_5', {
1924        'block': P.Gather(),
1925        'desc_const': [-1],
1926        'desc_inputs': [[3, 1, 3], Tensor(np.array([0, 1]).astype(np.int32))],
1927        'desc_bprop': [[3, 1, 2]]}),
1928    ('GatherV2_6', {
1929        'block': P.Gather(),
1930        'desc_const': [0],
1931        'desc_inputs': [[1152], Tensor(np.array(10).astype(np.int32))],
1932        'desc_bprop': [Tensor(np.array(10).astype(np.float32))]}),
1933    ('SparseGatherV2_0', {
1934        'block': P.SparseGatherV2(),
1935        'desc_const': [0],
1936        'desc_inputs': [[3, 1, 2], Tensor(np.array([0, 1]).astype(np.int32))],
1937        'desc_bprop': [[2, 1, 2]]}),
1938    ('Range', {
1939        'block': inner.Range(1.0, 5.0),
1940        'desc_inputs': [Tensor(np.ones([10]).astype(np.float32))],
1941        'desc_bprop': [[10]]}),
1942    ('UnsortedSegmentSum', {
1943        'block': P.UnsortedSegmentSum(),
1944        'desc_const': [1280],
1945        'desc_inputs': [[1280, 1024], Tensor(np.ones(1280).astype(np.int32))],
1946        'desc_bprop': [[1280, 1024]]}),
1947    ('UnsortedSegmentSum_1', {
1948        'block': P.UnsortedSegmentSum(),
1949        'desc_const': [4],
1950        'desc_inputs': [[3, 2, 1, 3], Tensor(np.array([[0, 1], [0, 1], [0, 1]]).astype(np.int32))],
1951        'desc_bprop': [[4, 1, 3]]}),
1952    ('UnsortedSegmentMin', {
1953        'block': P.UnsortedSegmentMin(),
1954        'desc_const': [4],
1955        'desc_inputs': [[3, 2, 1, 3], Tensor(np.array([1, 2, 3]).astype(np.int32))],
1956        'desc_bprop': [[4, 2, 1, 3]]}),
1957    ('UnsortedSegmentMax', {
1958        'block': P.UnsortedSegmentMax(),
1959        'desc_const': [4],
1960        'desc_inputs': [[3, 2, 1, 3], Tensor(np.array([1, 2, 3]).astype(np.int32))],
1961        'desc_bprop': [[4, 2, 1, 3]]}),
1962    ('UnsortedSegmentProd', {
1963        'block': P.UnsortedSegmentProd(),
1964        'desc_const': [4],
1965        'desc_inputs': [[3, 2, 1, 3], Tensor(np.array([0, 1, 0]).astype(np.int32))],
1966        'desc_bprop': [[4, 2, 1, 3]]}),
1967    ('DropoutGenMask', {
1968        'block': P.DropoutGenMask(),
1969        'desc_const': [(2, 2), Tensor(0.5, mstype.float32)],
1970        'desc_inputs': [],
1971        'desc_bprop': [Tensor(np.ones(1).astype(np.int8))],
1972        'skip': ['backward']}),
1973    ('DropoutDoMask', {
1974        'block': P.DropoutDoMask(),
1975        'desc_const': [Tensor(0.5)],
1976        'desc_inputs': [[64, 12, 128, 128], Tensor(np.ones(1572864).astype(np.uint8))],
1977        'desc_bprop': [[64, 12, 128, 128]]}),
1978    ('Dropout', {
1979        'block': nn.Dropout(0.5),
1980        'desc_inputs': [[64, 12, 128, 128]],
1981        'desc_bprop': [[64, 12, 128, 128]]}),
1982    ('ReduceMean0', {
1983        'block': P.ReduceMean(),
1984        'desc_const': [(2,)],
1985        'desc_inputs': [[3, 2, 2]],
1986        'desc_bprop': [[3, 2]]}),
1987    ('ReduceMean1', {
1988        'block': P.ReduceMean(),
1989        'desc_const': [2],
1990        'desc_inputs': [[3, 2, 2]],
1991        'desc_bprop': [[3, 2]]}),
1992    ('All', {
1993        'block': P.ReduceAll(),
1994        'desc_const': [(1,)],
1995        'desc_inputs': [Tensor(np.ones([3, 2]).astype(np.bool_))],
1996        'desc_bprop': [[3]],
1997        'skip': ['backward']}),
1998    ('DescConst', {
1999        'block': Tensor(np.array([2], np.float32)),
2000        'desc_inputs': [],
2001        'desc_bprop': [[1]],
2002        'skip': ['backward'],
2003        'add_fake_input': True}),
2004    ('Fill', {
2005        'block': P.Fill(),
2006        'desc_const': [mstype.float32, (2, 3), 1.0],
2007        'desc_inputs': [],
2008        'desc_bprop': [[2, 3]],
2009        'skip': ['backward'],
2010        'add_fake_input': True}),
2011    ('OnesLike', {
2012        'block': P.OnesLike(),
2013        'desc_inputs': [Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))],
2014        'desc_bprop': [Tensor(np.array([[1, 1], [1, 1]]).astype(np.int32))]
2015    }),
2016    ('ZerosLike', {
2017        'block': P.ZerosLike(),
2018        'desc_inputs': [Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))],
2019        'desc_bprop': [Tensor(np.array([[1, 1], [1, 1]]).astype(np.int32))]
2020    }),
2021    ('Softmax', {
2022        'block': P.Softmax(),
2023        'desc_inputs': [[5, 5]],
2024        'desc_bprop': [[5, 5]]}),
2025    ('Softsign', {
2026        'block': P.Softsign(),
2027        'desc_inputs': [[5, 5]],
2028        'desc_bprop': [[5, 5]]}),
2029    ('DepthwiseConv2dNative_1', {
2030        'block': P.DepthwiseConv2dNative(3, (3, 3), pad_mode="pad", pad=1, stride=2),
2031        'desc_inputs': [[10, 32, 32, 32], [1, 32, 3, 3]],
2032        'desc_bprop': [[10, 32, 16, 16]]}),
2033    ('DepthwiseConv2dNative_2', {
2034        'block': P.DepthwiseConv2dNative(1, (3, 3), pad_mode="same", pad=0, stride=1),
2035        'desc_inputs': [[2592, 2048, 4, 4], [1, 2048, 3, 3]],
2036        'desc_bprop': [[2592, 2048, 4, 4]]}),
2037    ('SigmoidCrossEntropyWithLogits', {
2038        'block': P.SigmoidCrossEntropyWithLogits(),
2039        'desc_inputs': [[128, 10], [128, 10]],
2040        'desc_bprop': [[128, 10]]}),
2041    ('Pad', {
2042        'block': P.Pad(((1, 2), (2, 3))),
2043        'desc_inputs': [[7, 7]],
2044        'desc_bprop': [[10, 12]]}),
2045    ('BinaryCrossEntropy', {
2046        'block': P.BinaryCrossEntropy(),
2047        'desc_inputs': [[1, 2, 3], [1, 2, 3], [1, 2, 3]],
2048        'desc_bprop': []}),
2049    ('SparseApplyAdagrad', {
2050        'block': SparseApplyAdagradNet(),
2051        'desc_inputs': [[3, 3], Tensor(np.ones((3,), np.int32))],
2052        'desc_bprop': [[3, 3], [3, 3]],
2053        'skip': ['backward']}),
2054    ('SparseApplyAdagradV2', {
2055        'block': SparseApplyAdagradV2Net(),
2056        'desc_inputs': [[3, 3], Tensor(np.ones((3,), np.int32))],
2057        'skip': ['backward']}),
2058    ('SparseApplyFtrl', {
2059        'block': SparseApplyFtrlNet(),
2060        'desc_inputs': [[3, 3], Tensor(np.ones((3,), np.int32))],
2061        'skip': ['backward']}),
2062    ('SparseApplyFtrlV2', {
2063        'block': SparseApplyFtrlV2Net(),
2064        'desc_inputs': [[3, 3], Tensor(np.ones((3,), np.int32))],
2065        'skip': ['backward']}),
2066    ('ApplyProximalAdagrad', {
2067        'block': ApplyProximalAdagradNet(),
2068        'desc_inputs': [[3, 3]],
2069        'skip': ['backward']}),
2070    ('SparseApplyProximalAdagrad', {
2071        'block': SparseApplyProximalAdagradNet(),
2072        'desc_inputs': [[3, 3], Tensor(np.ones((3,), np.int32))],
2073        'skip': ['backward']}),
2074    ('ApplyAdaMax', {
2075        'block': ApplyAdaMaxNet(),
2076        'desc_inputs': [[3, 3]],
2077        'skip': ['backward']}),
2078    ('ApplyAdadelta', {
2079        'block': ApplyAdadeltaNet(),
2080        'desc_inputs': [[3, 3]],
2081        'skip': ['backward']}),
2082    ('ApplyAdagrad', {
2083        'block': ApplyAdagradNet(),
2084        'desc_inputs': [[3, 3]],
2085        'skip': ['backward']}),
2086    ('ApplyAdagradV2', {
2087        'block': ApplyAdagradV2Net(),
2088        'desc_inputs': [[3, 3]],
2089        'skip': ['backward']}),
2090    ('ApplyAddSign', {
2091        'block': ApplyAddSignNet(),
2092        'desc_inputs': [[3, 3]],
2093        'skip': ['backward']}),
2094    ('ApplyPowerSign', {
2095        'block': ApplyPowerSignNet(),
2096        'desc_inputs': [[3, 3]],
2097        'skip': ['backward']}),
2098    ('ApplyGradientDescent', {
2099        'block': ApplyGradientDescentNet(),
2100        'desc_inputs': [[3, 3]],
2101        'skip': ['backward']}),
2102    ('ApplyProximalGradientDescent', {
2103        'block': ApplyProximalGradientDescentNet(),
2104        'desc_inputs': [[3, 3]],
2105        'skip': ['backward']}),
2106    ('Flatten_1', {
2107        'block': NetForFlatten(),
2108        'desc_inputs': [Tensor(np.ones([2, 3, 4]).astype(np.int32)), Tensor(np.ones([2, 12]).astype(np.int32))],
2109        'desc_bprop': [Tensor(np.ones([2, 12]).astype(np.int32))],
2110        'skip': ['backward']}),
2111    ('Flatten_2', {
2112        'block': NetForFlatten(),
2113        'desc_inputs': [Tensor(np.ones([8]).astype(np.int32)), Tensor(np.ones([8, 3]).astype(np.int32))],
2114        'desc_bprop': [Tensor(np.ones([8, 3]).astype(np.int32))],
2115        'skip': ['backward']}),
2116    ('Flatten_3', {
2117        'block': NetForFlattenComposed(),
2118        'desc_inputs': [Tensor(np.ones([2, 3, 4]).astype(np.int32)), Tensor(np.ones([2, 12]).astype(np.int32))],
2119        'desc_bprop': [Tensor(np.ones([2, 12]).astype(np.int32))],
2120        'skip': []}),
2121    ('ArgmaxNet', {
2122        'block': ArgmaxNet(),
2123        'desc_inputs': [Tensor(np.array([[128, 32, 32, 64], [128, 32, 32, 64]]).astype(np.float16))],
2124        'desc_bprop': [Tensor(np.array([[128, 32, 32, 64], [128, 32, 32, 64]]).astype(np.float16))],
2125        'skip': ['backward']}),
2126    ('ArgminNet', {
2127        'block': ArgminNet(),
2128        'desc_inputs': [Tensor(np.array([[128, 32, 32, 64], [128, 32, 32, 64]]).astype(np.float16))],
2129        'desc_bprop': [Tensor(np.array([[128, 32, 32, 64], [128, 32, 32, 64]]).astype(np.float16))],
2130        'skip': ['backward']}),
2131    ('StridedSliceNet', {
2132        'block': StridedSliceNet(),
2133        'desc_inputs': [[6, 7, 8, 9, 10]],
2134        'skip': ['backward']}),
2135    ('OneHot', {
2136        'block': P.OneHot(),
2137        'desc_const': [3, Tensor(1.0, mstype.float32), Tensor(0.0, mstype.float32)],
2138        'desc_inputs': [Tensor(np.array([64]).astype(np.int32))],
2139        'desc_bprop': [[1, 3]]}),
2140    ('ReduceProd_0', {
2141        'block': P.ReduceProd(),
2142        'desc_const': [0],
2143        'desc_inputs': [[3, 2]],
2144        'desc_bprop': [[2]]}),
2145    ('ReduceProd_1', {
2146        'block': P.ReduceProd(keep_dims=True),
2147        'desc_const': [0],
2148        'desc_inputs': [[3, 2]],
2149        'desc_bprop': [[1, 2]]}),
2150    ('CumProd', {
2151        'block': P.CumProd(),
2152        'desc_const': [0],
2153        'desc_inputs': [[3, 2]],
2154        'desc_bprop': [[3, 2]]}),
2155    ('ApplyFtrl', {
2156        'block': ApplyFtrlNet(),
2157        'desc_inputs': [[3, 3]],
2158        'desc_bprop': [3, 3],
2159        'skip': ['backward']}),
2160    ('ApplyRMSProp', {
2161        'block': ApplyRMSNet(),
2162        'desc_inputs': [[3, 3]],
2163        'desc_bprop': [3, 3],
2164        'skip': ['backward']}),
2165    ('ApplyCenteredRMSProp', {
2166        'block': P.ApplyCenteredRMSProp(),
2167        'desc_const': [0.9, 0.0, 1e-10, 0.001],
2168        'desc_inputs': [Tensor(1., mstype.float32), Tensor(2., mstype.float32), Tensor(1., mstype.float32),
2169                        Tensor(2., mstype.float32), Tensor(1., mstype.float32)],
2170        'desc_bprop': [1],
2171        'skip': ['backward']}),
2172    ('CTCLoss', {
2173        'block': P.CTCLoss(),
2174        'desc_inputs': [Tensor(np.ones([6, 4, 6]).astype(np.float32)),
2175                        Tensor(np.array([[0, 1], [1, 0], [2, 3], [3, 2]]).astype(np.int64)),
2176                        Tensor(np.array([1, 2, 3, 4]).astype(np.int32)),
2177                        Tensor(np.array([6, 6, 6, 6]).astype(np.int32))],
2178        'desc_bprop': [[4], [6, 4, 6]]}),
2179    ('CTCGreedyDecoder', {
2180        'block': CTCGreedyDecoderNet(),
2181        'desc_inputs': [[2, 2, 3], Tensor(np.array([2, 2]).astype(np.int32))],
2182        'skip': ['backward']}),
2183    ('L2Loss_1', {
2184        'block': P.L2Loss(),
2185        'desc_inputs': [Tensor(np.array([1, 2, 3, 4]), mstype.float32)],
2186        'desc_bprop': []}),
2187    ('L2Loss_2', {
2188        'block': P.L2Loss(),
2189        'desc_inputs': [Tensor(np.array([[1, 1], [2, 2], [3, 3], [4, 4]]), mstype.float16)],
2190        'desc_bprop': []}),
2191    ('SoftMarginLoss', {
2192        'block': P.SoftMarginLoss(reduction="none"),
2193        'desc_inputs': [Tensor(np.array([[0.3, 0.7], [0.5, 0.5]]).astype(np.float32)),
2194                        Tensor(np.array([[-1, 1], [1, -1]]).astype(np.float32))],
2195        'desc_bprop': [Tensor(np.array([[1, 1], [1, 1]]).astype(np.float32))]}),
2196    ('BCEWithLogitsLoss', {
2197        'block': P.BCEWithLogitsLoss(),
2198        'desc_inputs': [[3, 3], [3, 3], [3, 3], [3, 3]],
2199        'desc_bprop': []}),
2200    ('ResizeBilinear', {
2201        'block': P.ResizeBilinear((5, 5)),
2202        'desc_inputs': [Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mstype.float16)],
2203        'desc_bprop': [Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mstype.float16)]}),
2204    ('ResizeBilinearGrad', {
2205        'block': G.ResizeBilinearGrad(),
2206        'desc_inputs': [Tensor([[[[1, 2, 3, 4, 5]]]], mstype.float32), Tensor([[[[1, 2, 3, 4, 5]]]], mstype.float32)],
2207        'desc_bprop': [Tensor([[[[1, 2, 3, 4, 5]]]], mstype.float32)],
2208        'skip': ['backward']}),
2209    ('ROIAlign', {
2210        'block': P.ROIAlign(7, 7, 0.03125, 2),
2211        'desc_inputs': [[2, 256, 192, 320], [1024, 5]],
2212        'desc_bprop': [[1024, 256, 7, 7]]}),
2213    ('ROIAlignGrad', {
2214        'block': G.ROIAlignGrad((1, 1, 1, 1), 2, 2, 0.5, 2),
2215        'desc_inputs': [[1, 1, 2, 2], [1, 5]],
2216        'desc_bprop': [[1, 1, 2, 2]],
2217        'skip': ['backward']}),
2218    ('LARSUpdate', {
2219        'block': P.LARSUpdate(1e-05, 0.001, False),
2220        'desc_const': [0.0, 0.001],
2221        'desc_inputs': [[3, 3], [3, 3], [3, 3], [3, 3]],
2222        'desc_bprop': [3, 3],
2223        'skip': ['backward']}),
2224    ('SGD', {
2225        'block': P.SGD(0.0, 0.0, False),
2226        'desc_inputs': [[3, 3], [3, 3], Tensor(0.001, mstype.float32), [3, 3], Tensor(0.1, mstype.float32), [3, 3]],
2227        'desc_bprop': [3, 3],
2228        'skip': ['backward']}),
2229    ('BinaryCrossEntropy', {
2230        'block': P.BinaryCrossEntropy(),
2231        'desc_inputs': [Tensor([[0.3, 0.8], [0.4, 0.3]], mstype.float16),
2232                        Tensor([[0.4, 1.2], [-0.4, -0.9]], mstype.float16),
2233                        Tensor([[-1.4, -0.7], [0.9, 0.7]], mstype.float16)],
2234        'desc_bprop': []}),
2235    ('BinaryCrossEntropyGrad', {
2236        'block': G.BinaryCrossEntropyGrad(),
2237        'desc_inputs': [Tensor([[0.3, 0.8], [0.4, 0.3]], mstype.float16),
2238                        Tensor([[0.4, 1.2], [-0.4, -0.9]], mstype.float16), Tensor(0.85, mstype.float16),
2239                        Tensor([[-1.4, -0.7], [0.9, 0.7]], mstype.float16)],
2240        'desc_bprop': [],
2241        'skip': ['backward']}),
2242    ('DataFormatDimMap', {
2243        'block': P.DataFormatDimMap(),
2244        'desc_inputs': [Tensor([0, 1, 2, 3], mstype.int32)],
2245        'desc_bprop': [],
2246        'skip': ['backward']}),
2247    ('MaxPoolGradGrad', {
2248        'block': G.MaxPoolGradGrad(),
2249        'desc_inputs': [Tensor(np.random.rand(1, 1, 2, 2), mstype.float16),
2250                        Tensor(np.random.rand(1, 1, 2, 2), mstype.float16),
2251                        Tensor(np.random.rand(1, 1, 2, 2), mstype.float16)],
2252        'desc_bprop': [],
2253        'skip': ['backward']}),
2254    ('MaxPoolGradGradWithArgmax', {
2255        'block': G.MaxPoolGradGradWithArgmax(),
2256        'desc_inputs': [Tensor(np.random.rand(1, 1, 2, 2), mstype.float16),
2257                        Tensor(np.random.rand(1, 1, 2, 2), mstype.float16),
2258                        Tensor(np.zeros((1, 1, 2, 2)), mstype.uint16)],
2259        'desc_bprop': [],
2260        'skip': ['backward']}),
2261    ('Roll', {
2262        'block': nn.Roll(shift=[1, -2], axis=[0, 1]),
2263        'desc_inputs': [Tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], mstype.float32)],
2264        'desc_bprop': [Tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], mstype.float32)]}),
2265    ('SoftShrink', {
2266        'block': P.SoftShrink(),
2267        'desc_inputs': [Tensor(np.array([[0.5297, 0.7871, 1.1754], [0.7836, 0.6218, -1.1542]]), mstype.float32)],
2268        'desc_bprop': [Tensor(np.array([[0, 0.4, 1], [1, 2, 4]]), mstype.float32)]}),
2269    ('SoftShrinkGrad', {
2270        'block': G.SoftShrinkGrad(),
2271        'desc_inputs': [Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]), mstype.float16),
2272                        Tensor(np.array([[-3, -2, 0], [1, 2, 4]]), mstype.float16)],
2273        'desc_bprop': [],
2274        'skip': ['backward']}),
2275    ('HSigmoidGrad', {
2276        'block': G.HSigmoidGrad(),
2277        'desc_inputs': [Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]), mstype.float16),
2278                        Tensor(np.array([[-4, -3, -2], [1, 2, 4]]), mstype.float16)],
2279        'skip': ['backward']}),
2280    ('HSigmoid', {
2281        'block': P.HSigmoid(),
2282        'desc_inputs': [Tensor(np.array([[-4, 4, 1]]), mstype.float32)],
2283        'desc_bprop': [Tensor(np.array([[0, 1, 0.6666]]), mstype.float32)],
2284        'skip': ['backward']}),
2285    ('HardShrink', {
2286        'block': P.HShrink(),
2287        'desc_inputs': [Tensor(np.array([[0.5, 1, 2.0], [0.0533, 0.0776, -2.1233]]), mstype.float32)],
2288        'desc_bprop': [],
2289        'skip': ['backward']}),
2290    ('HShrinkGrad', {
2291        'block': G.HShrinkGrad(),
2292        'desc_inputs': [Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]), mstype.float16),
2293                        Tensor(np.array([[-4, -3, -2], [1, 2, 4]]), mstype.float16)],
2294        'skip': ['backward']}),
2295    ('ApplyAdagradDA', {
2296        'block': ApplyAdagradDANet(),
2297        'desc_inputs': [Tensor(np.array([[0.3, 0.4], [0.1, 0.2]]).astype(np.float32)),
2298                        Tensor(0.001, mstype.float32),
2299                        Tensor(0.001, mstype.float32),
2300                        Tensor(0.001, mstype.float32),
2301                        Tensor(2, mstype.int32)],
2302        'skip': ['backward']}),
2303    ('SparseApplyRMSProp', {
2304        'block': SparseApplyRMSPropNet(0.2, 0.01, 1e-6),
2305        'desc_inputs': [Tensor(0.01, mstype.float32),
2306                        Tensor(np.array([[0.3, 0.7], [0.1, 0.8]]).astype(np.float32)),
2307                        Tensor(np.array([0, 1], dtype=np.int32))],
2308        'skip': ['backward']}),
2309]
2310
2311test_case_array_ops = [
2312    ('SpaceToDepth', {
2313        'block': P.SpaceToDepth(2),
2314        'desc_inputs': [[1, 3, 2, 2]],
2315        'desc_bprop': [[1, 12, 1, 1]]}),
2316    ('DepthToSpace', {
2317        'block': P.DepthToSpace(2),
2318        'desc_inputs': [[1, 12, 1, 1]],
2319        'desc_bprop': [[1, 3, 2, 2]]}),
2320    ('Split', {
2321        'block': P.Split(1, 2),
2322        'desc_inputs': [Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]))],
2323        'skip': ['backward']}),
2324    ('Argmax', {
2325        'block': P.Argmax(),
2326        'desc_inputs': [[128, 32, 32, 64]],
2327        'desc_bprop': [0],
2328        'skip': ['backward']}),
2329    ('Argmin', {
2330        'block': P.Argmin(),
2331        'desc_inputs': [[128, 32, 32, 64]],
2332        'desc_bprop': [1],
2333        'skip': ['backward']}),
2334    ('ArgMaxWithValue', {
2335        'block': P.ArgMaxWithValue(),
2336        'desc_inputs': [[128, 32, 32, 64]],
2337        'desc_bprop': [[1], [1]],
2338        'skip': ['backward']}),
2339    ('ArgMinWithValue', {
2340        'block': P.ArgMinWithValue(),
2341        'desc_inputs': [[128, 32, 32, 64]],
2342        'desc_bprop': [[1], [1]],
2343        'skip': ['backward']}),
2344    ('Transpose_dim3', {
2345        'block': P.Transpose(),
2346        'desc_const': [(0, 2, 1)],
2347        'desc_inputs': [[1, 2, 3]],
2348        'desc_bprop': [[1, 3, 2]]}),
2349    ('Transpose_dim4', {
2350        'block': P.Transpose(),
2351        'desc_const': [(0, 1, 2, 3)],
2352        'desc_inputs': [[1, 2, 3, 4]],
2353        'desc_bprop': [[1, 2, 4, 3]]}),
2354    ('AddN', {
2355        'block': NetForTupleInput(P.AddN()),
2356        'desc_inputs': [[2, 3, 3, 5], [2, 3, 3, 5]],
2357        'desc_bprop': [[2, 3, 3, 5]]}),
2358    ('AccumulateNV2', {
2359        'block': NetForTupleInput(P.AccumulateNV2()),
2360        'desc_inputs': [[2, 3, 3, 5], [2, 3, 3, 5]],
2361        'desc_bprop': [[2, 3, 3, 5]]}),
2362    ('Shape', {
2363        'block': P.Shape(),
2364        'desc_inputs': [[3, 3, 2, 2]],
2365        'skip': ['backward']}),
2366    ('Reshape', {
2367        'block': P.Reshape(),
2368        'desc_const': [(64,)],
2369        'desc_inputs': [[64, 1]],
2370        'desc_bprop': [[64]]}),
2371    ('Cast', {
2372        'block': P.Cast(),
2373        'desc_const': [mstype.int32],
2374        'desc_inputs': [[2, 3, 4, 5]],
2375        'desc_bprop': [Tensor(np.ones((2, 3, 4, 5)).astype(np.int32))]}),
2376    ('ExpandDims', {
2377        'block': P.ExpandDims(),
2378        'desc_const': [0],
2379        'desc_inputs': [[2, 2]],
2380        'desc_bprop': [[1, 2, 2]]}),
2381    ('ExpandDims_1', {
2382        'block': P.ExpandDims(),
2383        'desc_const': [-1],
2384        'desc_inputs': [[2, 2]],
2385        'desc_bprop': [[2, 2, 1]]}),
2386    ('Squeeze', {
2387        'block': P.Squeeze(2),
2388        'desc_inputs': [[3, 2, 1]],
2389        'desc_bprop': [[3, 2]]}),
2390    ('Squeeze_0', {
2391        'block': P.Squeeze(),
2392        'desc_inputs': [[3, 1, 2, 1]],
2393        'desc_bprop': [[3, 2]]}),
2394    ('Squeeze_1', {
2395        'block': P.Squeeze(),
2396        'desc_inputs': [[1, 1, 1, 1]],
2397        'desc_bprop': [1.0],
2398        'skip': ['backward']}),
2399    ('Squeeze_2', {
2400        'block': P.Squeeze((2, 3)),
2401        'desc_inputs': [[3, 2, 1, 1]],
2402        'desc_bprop': [[3, 2]]}),
2403    ('Size', {
2404        'block': P.Size(),
2405        'desc_inputs': [[2, 3, 5]],
2406        'skip': ['backward']}),
2407    ('Tile_0', {
2408        'block': P.Tile(),
2409        'desc_const': [(1, 2)],
2410        'desc_inputs': [[64, 1]],
2411        'desc_bprop': [[64, 2]]}),
2412    ('Tile_1', {
2413        'block': P.Tile(),
2414        'desc_const': [(1, 1)],
2415        'desc_inputs': [[64, 1]],
2416        'desc_bprop': [[64, 1]]}),
2417    ('Tile_2', {
2418        'block': P.Tile(),
2419        'desc_const': [(2, 1, 1, 2)],
2420        'desc_inputs': [[2, 2, 2]],
2421        'desc_bprop': [[2, 2, 2, 4]]}),
2422    ('ReverseV2', {
2423        'block': P.ReverseV2(axis=[1]),
2424        'desc_inputs': [(Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]).astype(np.float32)))],
2425        'desc_bprop': [(Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]).astype(np.float32)))]}),
2426    ('Rint', {
2427        'block': P.Rint(),
2428        'desc_inputs': [(Tensor(np.array([-1.6, -0.1, 1.5, 2.0]).astype(np.float32)))],
2429        'skip': ['backward']}),
2430    ('ConcatV2_0', {
2431        'block': NetForConcat1(),
2432        'desc_inputs': [
2433            Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32)),
2434            Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))],
2435        'desc_bprop': [([4, 2], {'dtype': np.int32})]}),
2436    ('ConcatV2_1', {
2437        'block': NetForConcat2(),
2438        'desc_inputs': [Tensor(np.array([[[0, 1, 2]], [[2, 1, 2]]]).astype(np.int32)),
2439                        Tensor(np.array([[[0, 1]], [[2, 1]]]).astype(np.int32))],
2440        'desc_bprop': [([2, 1, 5], {'dtype': np.int32})]}),
2441    ('ConcatV2_2', {
2442        'block': NetForConcat(),
2443        'desc_inputs': [[2, 2]],
2444        'desc_bprop': [[4, 2]]}),
2445    ('ConcatV2_3', {
2446        'block': NetForConcat1(),
2447        'desc_inputs': [[2, 2], [2, 2]],
2448        'desc_bprop': [[4, 2]]}),
2449    ('ConcatV2_4', {
2450        'block': NetForConcat3(),
2451        'desc_inputs': [
2452            Tensor(np.ones((3, 2, 3), np.float32)),
2453            Tensor(np.ones((5, 2, 3), np.float32)),
2454            Tensor(np.ones((6, 2, 3), np.float32))],
2455        'desc_bprop': [[14, 2, 3]]}),
2456    ('ConcatV2_5', {
2457        'block': NetForConcat4(),
2458        'desc_inputs': [Tensor(np.array([1], np.float32)),
2459                        Tensor(np.array([1], np.float32)),
2460                        Tensor(np.array([1], np.float32))],
2461        'desc_bprop': [[3,]]}),
2462    ('Stack_0', {
2463        'block': NetForStackInput(P.Stack()),
2464        'desc_inputs': [[2, 2], [2, 2], [2, 2]],
2465        'desc_bprop': [[3, 2, 2]],
2466    }),
2467    ('Stack_1', {
2468        'block': NetForStackInput(P.Stack(axis=-2)),
2469        'desc_inputs': [[3, 2, 3], [3, 2, 3], [3, 2, 3]],
2470        'desc_bprop': [[3, 2, 3, 3]],
2471    }),
2472    ('Stack_2', {
2473        'block': NetForStackInput(P.Stack()),
2474        'desc_inputs': [[128, 128], [128, 128]],
2475        'desc_bprop': [[2, 128, 128]],
2476    }),
2477    ('Stack_3', {
2478        'block': NetForStackInput(P.Stack()),
2479        'desc_inputs': [[2, 2]],
2480        'desc_bprop': [[1, 2, 2]]}),
2481    ('Unpack_0', {
2482        'block': NetForUnpackInput(P.Unstack(axis=0)),
2483        'desc_inputs': [[2, 4]],
2484        'desc_bprop': [[4], [4]],
2485    }),
2486    ('Unpack_1', {
2487        'block': NetForUnpackInput(P.Unstack(axis=-1)),
2488        'desc_inputs': [Tensor(np.array([[1, 1, 1]], np.float32))],
2489        'desc_bprop': [[1], [1], [1]],
2490    }),
2491    ('Diag_1', {
2492        'block': P.Diag(),
2493        'desc_inputs': [[4]],
2494        'desc_bprop': [[4, 4]],
2495    }),
2496    ('Diag_2', {
2497        'block': P.Diag(),
2498        'desc_inputs': [[4, 4]],
2499        'desc_bprop': [[4, 4, 4, 4]],
2500    }),
2501    ('DiagPart_1', {
2502        'block': P.DiagPart(),
2503        'desc_inputs': [[4, 4]],
2504        'desc_bprop': [[4]],
2505    }),
2506    ('DiagPart_2', {
2507        'block': P.DiagPart(),
2508        'desc_inputs': [[4, 4, 4, 4]],
2509        'desc_bprop': [[4, 4]],
2510    }),
2511    ('SpaceToBatch_1', {
2512        'block': P.SpaceToBatch(2, [[0, 0], [0, 0]]),
2513        'desc_inputs': [[1, 3, 2, 2]],
2514        'desc_bprop': [[4, 3, 1, 1]],
2515    }),
2516    ('SpaceToBatch_2', {
2517        'block': P.SpaceToBatch(2, [[1, 1], [0, 4]]),
2518        'desc_inputs': [[1, 3, 2, 2]],
2519        'desc_bprop': [[4, 3, 2, 3]],
2520    }),
2521    ('BatchToSpace_1', {
2522        'block': P.BatchToSpace(2, [[0, 0], [0, 0]]),
2523        'desc_inputs': [[4, 3, 1, 1]],
2524        'desc_bprop': [[1, 3, 2, 2]],
2525    }),
2526    ('BatchToSpace_2', {
2527        'block': P.BatchToSpace(2, [[0, 0], [0, 1]]),
2528        'desc_inputs': [[4, 3, 1, 1]],
2529        'desc_bprop': [[1, 3, 2, 1]],
2530    }),
2531    ('UnsortedSegmentMin_1', {
2532        'block': P.UnsortedSegmentMin(),
2533        'desc_const': [2],
2534        'desc_inputs': [Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32)),
2535                        Tensor(np.array([0, 1, 1]).astype(np.int32))],
2536        'desc_bprop': [Tensor(np.array([[1, 2, 3], [4, 2, 1]]).astype(np.float32))]}),
2537    ('BroadcastTo', {
2538        'block': P.BroadcastTo((2, 3)),
2539        'desc_inputs': [Tensor(np.array([1, 2, 3]).astype(np.float32))],
2540        'desc_bprop': [Tensor(np.array([[1, 2, 3], [1, 2, 3]]).astype(np.float32))]}),
2541    ('InTopK', {
2542        'block': P.InTopK(2),
2543        'desc_inputs': [Tensor(np.array([[1, 2, 3], [2, 3, 6], [4, 2, 1]]).astype(np.float32)),
2544                        Tensor(np.array([2, 1, 2]).astype(np.int32))],
2545        'skip': ['backward'],
2546    }),
2547    ('InplaceUpdate', {
2548        'block': P.InplaceUpdate((0, 2)),
2549        'desc_inputs': [Tensor(np.arange(24).reshape(3, 4, 2).astype(np.float32)),
2550                        Tensor(np.arange(16).reshape(2, 4, 2).astype(np.float32))],
2551        'skip': ['backward'],
2552    }),
2553    ('ReverseSequence', {
2554        'block': P.ReverseSequence(1, 0),
2555        'desc_inputs': [Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32)),
2556                        Tensor(np.array([1, 2, 3]).astype(np.int32))],
2557        'desc_bprop': [[3, 3]]}),
2558    ('EditDistance', {
2559        'block': EditDistance(Tensor(np.array([1, 1, 2]).astype(np.int64)),
2560                              Tensor(np.array([2, 2, 2]).astype(np.int64))),
2561        'desc_inputs': [Tensor(np.array([[0, 0, 0], [1, 0, 1], [1, 1, 1]]).astype(np.int64)),
2562                        Tensor(np.array([1, 2, 3]).astype(np.float32)),
2563                        Tensor(np.array([[0, 1, 0], [0, 0, 1], [1, 1, 0], [1, 0, 1]]).astype(np.int64)),
2564                        Tensor(np.array([1, 3, 2, 1]).astype(np.float32))],
2565        'skip': ['backward'],
2566    }),
2567    ('LinSpace', {
2568        'block': P.LinSpace(),
2569        'desc_const': [5],
2570        'desc_inputs': [Tensor(1, mstype.float32),
2571                        Tensor(10, mstype.float32)],
2572        'skip': ['backward'],
2573    }),
2574    ('MaskedFill', {
2575        'block': P.MaskedFill(),
2576        'desc_inputs': [Tensor(np.array([[1.0, 2.0, 3.0]]), mstype.float32),
2577                        Tensor(np.array([[True, True, False]]), mstype.bool_),
2578                        Tensor(4.0, mstype.float32)],
2579        'desc_bprop': [Tensor(np.array([[1.0, 2.0, 3.0]]), mstype.float32)],
2580    }),
2581    ('MatrixDiag', {
2582        'block': inner.MatrixDiag(),
2583        'desc_inputs': [Tensor(np.array([1, -1]), mstype.float32),
2584                        Tensor(np.arange(-12, 0).reshape(3, 2, 2), mstype.float32)],
2585        'skip': ['backward'],
2586    }),
2587    ('MatrixDiagPart', {
2588        'block': inner.MatrixDiagPart(),
2589        'desc_inputs': [Tensor(np.arange(12).reshape(3, 2, 2), mstype.float32),
2590                        Tensor(np.arange(-12, 0).reshape(3, 2, 2), mstype.float32)],
2591        'skip': ['backward'],
2592    }),
2593    ('MatrixSetDiag', {
2594        'block': inner.MatrixSetDiag(),
2595        'desc_inputs': [Tensor(np.arange(12).reshape(3, 2, 2), mstype.float32),
2596                        Tensor(np.arange(6).reshape(3, 2), mstype.float32),
2597                        Tensor(np.arange(-12, 0).reshape(3, 2, 2), mstype.float32)],
2598        'skip': ['backward'],
2599    }),
2600    ('TransShape', {
2601        'block': P.TransShape(),
2602        'desc_const': [(1, 12, 24, 24)],
2603        'desc_inputs': [[1, 3, 24, 24]],
2604        'desc_bprop': [[1, 12, 24, 24]],
2605    }),
2606    ('ParallelConcat', {
2607        'block': ParallelConcatNet(),
2608        'desc_inputs': [Tensor([[1, 2]], mstype.float32),
2609                        Tensor([[5, 6]], mstype.float32)],
2610        'skip': ['backward'],
2611    }),
2612    ('SplitV', {
2613        'block': P.SplitV(size_splits=[1, 2], split_dim=1, num_split=2),
2614        'desc_inputs': [Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])],
2615        'desc_bprop': [(Tensor([[1], [4], [7]]),
2616                        Tensor([[2, 3], [5, 6], [8, 9]]))],
2617    }),
2618]
2619
2620test_case_other_ops = [
2621    ('ScalarLog', {
2622        'block': F.scalar_log,
2623        'desc_const': [0.0],
2624        'desc_inputs': [],
2625        'desc_bprop': [1],
2626        'skip': ['backward']}),
2627    ('BoundingBoxEncode', {
2628        'block': P.BoundingBoxEncode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0)),
2629        'desc_inputs': [[256, 4], [256, 4]],
2630        'desc_bprop': [[256, 4]],
2631        'skip': ['backward']}),
2632    ('BoundingBoxDecode', {
2633        'block': P.BoundingBoxDecode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0), max_shape=(768, 1280)),
2634        'desc_inputs': [[256, 4], [256, 4]],
2635        'desc_bprop': [[256, 4]],
2636        'skip': ['backward']}),
2637    ('GatherNd', {
2638        'block': P.GatherNd(),
2639        'desc_inputs': (Tensor(np.ones((1, 3, 6, 6), np.float32)),
2640                        Tensor(np.ones((2, 4), np.int32))),
2641        'desc_bprop': [[2]]}),
2642    ('ScatterNd', {
2643        'block': P.ScatterNd(),
2644        'desc_const': [(3, 3)],
2645        'desc_inputs': (Tensor(np.ones((2, 2), np.int32)),
2646                        Tensor(np.ones((2,), np.int32))),
2647        'desc_bprop': [([3, 3], {'dtype': np.int32})]}),
2648    ('TensorScatterUpdate', {
2649        'block': P.TensorScatterUpdate(),
2650        'desc_inputs': (Tensor(np.arange(3 * 4 * 5).reshape((3, 4, 5)), mstype.float32),
2651                        Tensor(np.array([[0, 1], [1, 2]], np.int32)),
2652                        Tensor(np.ones([2, 5], np.float32) * 99)),
2653        'desc_bprop': [([3, 4, 5], {'dtype': np.float32})]}),
2654    ('ScatterMaxUseLocking', {
2655        'block': ScatterMax(use_locking=True),
2656        'desc_inputs': (Tensor(np.array([1, 0], np.int32)),
2657                        Tensor(np.array([[5.0, 5.0, 5.0], [4.0, 4.0, 4.0]], np.float32))),
2658        'skip': ['backward']}),
2659    ('ScatterMax1d', {
2660        'block': ScatterMax(),
2661        'desc_inputs': (Tensor(np.array([1, 0], np.int32)),
2662                        Tensor(np.array([[5.0, 5.0, 5.0], [4.0, 4.0, 4.0]], np.float32))),
2663        'skip': ['backward']}),
2664    ('ScatterMaxF32', {
2665        'block': ScatterMax(),
2666        'desc_inputs': (Tensor(np.array([[0, 0], [1, 1]], np.int32)),
2667                        Tensor(np.ones([2, 2, 3], np.float32) * 99)),
2668        'skip': ['backward']}),
2669    ('ScatterMaxF16', {
2670        'block': ScatterMax(np.float16),
2671        'desc_inputs': (Tensor(np.array([[0, 0], [1, 1]], np.int32)),
2672                        Tensor(np.ones([2, 2, 3], np.float16) * 99)),
2673        'skip': ['backward']}),
2674    ('ScatterMaxI32', {
2675        'block': ScatterMax(np.int32),
2676        'desc_inputs': (Tensor(np.array([[0, 0], [1, 1]], np.int32)),
2677                        Tensor(np.ones([2, 2, 3], np.int32) * 99)),
2678        'skip': ['backward']}),
2679    ('ScatterMinUseLocking', {
2680        'block': ScatterMin(use_locking=True),
2681        'desc_inputs': (Tensor(np.array([1, 0], np.int32)),
2682                        Tensor(np.ones([2, 3], np.float32))),
2683        'skip': ['backward']}),
2684    ('ScatterMin1d', {
2685        'block': ScatterMin(),
2686        'desc_inputs': (Tensor(np.array([1, 0], np.int32)),
2687                        Tensor(np.ones([2, 3], np.float32))),
2688        'skip': ['backward']}),
2689    ('ScatterMinF32', {
2690        'block': ScatterMin(),
2691        'desc_inputs': (Tensor(np.array([[0, 0], [1, 1]], np.int32)),
2692                        Tensor(np.ones([2, 2, 3], np.float32))),
2693        'skip': ['backward']}),
2694    ('ScatterMinF16', {
2695        'block': ScatterMin(np.float16),
2696        'desc_inputs': (Tensor(np.array([[0, 0], [1, 1]], np.int32)),
2697                        Tensor(np.ones([2, 2, 3], np.float16))),
2698        'skip': ['backward']}),
2699    ('ScatterMinI32', {
2700        'block': ScatterMin(np.int32),
2701        'desc_inputs': (Tensor(np.array([[0, 0], [1, 1]], np.int32)),
2702                        Tensor(np.ones([2, 2, 3], np.int32))),
2703        'skip': ['backward']}),
2704    ('ScatterUpdate', {
2705        'block': ScatterUpdate((6,)),
2706        'desc_inputs': (Tensor(np.array([2, 0, 5], np.int32)),
2707                        Tensor(np.array([2.0, 3.0, 4.0], np.float32))),
2708        'skip': ['backward']}),
2709    ('ScatterAddUseLocking', {
2710        'block': ScatterAdd((6,), use_locking=True),
2711        'desc_inputs': (Tensor(np.array([2, 0, 5], np.int32)),
2712                        Tensor(np.array([2.0, 3.0, 4.0], np.float32))),
2713        'skip': ['backward']}),
2714    ('ScatterNonAliasingAdd_1d', {
2715        'block': ScatterNonAliasingAdd((8,)),
2716        'desc_inputs': (Tensor(np.array([[2], [3], [4], [5]], np.int32)),
2717                        Tensor(np.array([2.0, 3.0, 4.0, 8.0], np.float32))),
2718        'skip': ['backward']}),
2719    ('ScatterNdAdd', {
2720        'block': ScatterNdAdd((8,)),
2721        'desc_inputs': (Tensor(np.array([[2], [3], [4], [5]], np.int32)),
2722                        Tensor(np.array([2.0, 3.0, 4.0, 8.0], np.float32))),
2723        'skip': ['backward']}),
2724    ('ScatterNdSub', {
2725        'block': ScatterNdAdd((8,)),
2726        'desc_inputs': (Tensor(np.array([[2], [3], [4], [5]], np.int32)),
2727                        Tensor(np.array([2.0, 3.0, 4.0, 8.0], np.float32))),
2728        'skip': ['backward']}),
2729    ('ScatterAdd', {
2730        'block': ScatterAdd((6,)),
2731        'desc_inputs': (Tensor(np.array([2, 0, 5], np.int32)),
2732                        Tensor(np.array([2.0, 3.0, 4.0], np.float32))),
2733        'skip': ['backward']}),
2734    ('ScatterAddScalar', {
2735        'block': ScatterAdd((6,)),
2736        'desc_inputs': (Tensor(np.array([2], np.int32)),
2737                        Tensor(np.array([2.0], np.float32))),
2738        'skip': ['backward']}),
2739    ('ScatterAdd2d', {
2740        'block': ScatterAdd((3, 4)),
2741        'desc_inputs': (Tensor(np.array([[0, 1], [1, 2]], np.int32)),
2742                        Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2]],
2743                                         [[3, 3, 3, 3], [4, 4, 4, 4]]], np.float32))),
2744        'skip': ['backward']}),
2745    ('ScatterAddF16', {
2746        'block': ScatterAdd((6,), np.float16),
2747        'desc_inputs': (Tensor(np.array([2, 0, 5], np.int32)),
2748                        Tensor(np.array([2.0, 3.0, 4.0], np.float16))),
2749        'skip': ['backward']}),
2750    ('ScatterAddI8', {
2751        'block': ScatterAdd((6,), np.int8),
2752        'desc_inputs': (Tensor(np.array([2, 0, 5], np.int32)),
2753                        Tensor(np.array([2, 3, 4], np.int8))),
2754        'skip': ['backward']}),
2755    ('ScatterAddI32', {
2756        'block': ScatterAdd((6,), np.int32),
2757        'desc_inputs': (Tensor(np.array([2, 0, 5], np.int32)),
2758                        Tensor(np.array([2, 3, 4], np.int32))),
2759        'skip': ['backward']}),
2760    ('ScatterAddU8', {
2761        'block': ScatterAdd((6,), np.uint8),
2762        'desc_inputs': (Tensor(np.array([2, 0, 5], np.int32)),
2763                        Tensor(np.array([2, 3, 4], np.uint8))),
2764        'skip': ['backward']}),
2765    ('ScatterMulUseLocking', {
2766        'block': ScatterMul((6,), use_locking=True),
2767        'desc_inputs': (Tensor(np.array([2], np.int32)),
2768                        Tensor(np.array([2.0], np.float32))),
2769        'skip': ['backward']}),
2770    ('ScatterMulScalar', {
2771        'block': ScatterMul((6,)),
2772        'desc_inputs': (Tensor(np.array([2], np.int32)),
2773                        Tensor(np.array([2.0], np.float32))),
2774        'skip': ['backward']}),
2775    ('ScatterMul2d', {
2776        'block': ScatterMul((3, 4)),
2777        'desc_inputs': (Tensor(np.array([[0, 1], [1, 2]], np.int32)),
2778                        Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2]],
2779                                         [[3, 3, 3, 3], [4, 4, 4, 4]]], np.float32))),
2780        'skip': ['backward']}),
2781    ('ScatterMulF16', {
2782        'block': ScatterMul((6,), np.float16),
2783        'desc_inputs': (Tensor(np.array([2, 0, 5], np.int32)),
2784                        Tensor(np.array([2.0, 3.0, 4.0], np.float16))),
2785        'skip': ['backward']}),
2786    ('ScatterMulI8', {
2787        'block': ScatterMul((6,), np.int8),
2788        'desc_inputs': (Tensor(np.array([2, 0, 5], np.int32)),
2789                        Tensor(np.array([2, 3, 4], np.int8))),
2790        'skip': ['backward']}),
2791    ('ScatterMulI32', {
2792        'block': ScatterMul((6,), np.int32),
2793        'desc_inputs': (Tensor(np.array([2, 0, 5], np.int32)),
2794                        Tensor(np.array([2, 3, 4], np.int32))),
2795        'skip': ['backward']}),
2796    ('ScatterMulU8', {
2797        'block': ScatterMul((6,), np.uint8),
2798        'desc_inputs': (Tensor(np.array([2, 0, 5], np.int32)),
2799                        Tensor(np.array([2, 3, 4], np.uint8))),
2800        'skip': ['backward']}),
2801    ('ScatterDivUseLocking', {
2802        'block': ScatterDiv((6,), use_locking=True),
2803        'desc_inputs': (Tensor(np.array([2], np.int32)),
2804                        Tensor(np.array([2.0], np.float32))),
2805        'skip': ['backward']}),
2806    ('ScatterDivScalar', {
2807        'block': ScatterDiv((6,)),
2808        'desc_inputs': (Tensor(np.array([2], np.int32)),
2809                        Tensor(np.array([2.0], np.float32))),
2810        'skip': ['backward']}),
2811    ('ScatterDiv2d', {
2812        'block': ScatterDiv((3, 4)),
2813        'desc_inputs': (Tensor(np.array([[0, 1], [1, 2]], np.int32)),
2814                        Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2]],
2815                                         [[3, 3, 3, 3], [4, 4, 4, 4]]], np.float32))),
2816        'skip': ['backward']}),
2817    ('ScatterDivF16', {
2818        'block': ScatterDiv((6,), np.float16),
2819        'desc_inputs': (Tensor(np.array([2, 0, 5], np.int32)),
2820                        Tensor(np.array([2.0, 3.0, 4.0], np.float16))),
2821        'skip': ['backward']}),
2822    ('ScatterDivI8', {
2823        'block': ScatterDiv((6,), np.int8),
2824        'desc_inputs': (Tensor(np.array([2, 0, 5], np.int32)),
2825                        Tensor(np.array([2, 3, 4], np.int8))),
2826        'skip': ['backward']}),
2827    ('ScatterDivU8', {
2828        'block': ScatterDiv((6,), np.uint8),
2829        'desc_inputs': (Tensor(np.array([2, 0, 5], np.int32)),
2830                        Tensor(np.array([2, 3, 4], np.uint8))),
2831        'skip': ['backward']}),
2832    ('ScatterSubUseLocking', {
2833        'block': ScatterSub((6,), use_locking=True),
2834        'desc_inputs': (Tensor(np.array([2], np.int32)),
2835                        Tensor(np.array([2.0], np.float32))),
2836        'skip': ['backward']}),
2837    ('ScatterSubScalar', {
2838        'block': ScatterSub((6,)),
2839        'desc_inputs': (Tensor(np.array([2], np.int32)),
2840                        Tensor(np.array([2.0], np.float32))),
2841        'skip': ['backward']}),
2842    ('ScatterSub2d', {
2843        'block': ScatterSub((3, 4)),
2844        'desc_inputs': (Tensor(np.array([[0, 1], [1, 2]], np.int32)),
2845                        Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2]],
2846                                         [[3, 3, 3, 3], [4, 4, 4, 4]]], np.float32))),
2847        'skip': ['backward']}),
2848    ('ScatterSubF16', {
2849        'block': ScatterSub((6,), np.float16),
2850        'desc_inputs': (Tensor(np.array([2, 0, 5], np.int32)),
2851                        Tensor(np.array([2.0, 3.0, 4.0], np.float16))),
2852        'skip': ['backward']}),
2853    ('ScatterSubI32', {
2854        'block': ScatterSub((6,), np.int32),
2855        'desc_inputs': (Tensor(np.array([2, 0, 5], np.int32)),
2856                        Tensor(np.array([2, 3, 4], np.int32))),
2857        'skip': ['backward']}),
2858    ('ScatterSubI8', {
2859        'block': ScatterSub((6,), np.int8),
2860        'desc_inputs': (Tensor(np.array([2, 0, 5], np.int32)),
2861                        Tensor(np.array([2, 3, 4], np.int8))),
2862        'skip': ['backward']}),
2863    ('ScatterSubU8', {
2864        'block': ScatterSub((6,), np.uint8),
2865        'desc_inputs': (Tensor(np.array([2, 0, 5], np.int32)),
2866                        Tensor(np.array([1, 1, 0], np.uint8))),
2867        'skip': ['backward']}),
2868    ('SmoothL1Loss', {
2869        'block': P.SmoothL1Loss(),
2870        'desc_inputs': [[256, 4], [256, 4]],
2871        'desc_bprop': [[256, 4]]}),
2872    ('IOU', {
2873        'block': P.IOU(),
2874        'desc_inputs': [Tensor(np.ones((256, 4), np.float16)), Tensor(np.ones((128, 4), np.float16))],
2875        'desc_bprop': [convert([128, 256], np.float16)]}),
2876    ('PopulationCount', {
2877        'block': P.PopulationCount(),
2878        'desc_inputs': [Tensor(np.array([1, 2, 3]).astype(np.int16))],
2879        'skip': ['backward']}),
2880    ('BasicLSTMCellNet', {
2881        'block': BasicLSTMCellNet(),
2882        'desc_inputs': [Tensor(np.random.rand(1, 32).astype(np.float16)),
2883                        Tensor(np.random.rand(1, 64).astype(np.float16)),
2884                        Tensor(np.random.rand(1, 64).astype(np.float16)),
2885                        Tensor(np.random.rand(96, 256).astype(np.float16)),
2886                        Tensor(np.random.rand(256,).astype(np.float16))],
2887        'desc_bprop': [Tensor(np.random.rand(1, 64).astype(np.float16)),
2888                       Tensor(np.random.rand(1, 64).astype(np.float16)),
2889                       Tensor(np.random.rand(1, 64).astype(np.float16)),
2890                       Tensor(np.random.rand(1, 64).astype(np.float16)),
2891                       Tensor(np.random.rand(1, 64).astype(np.float16))]}),
2892    ('DynamicGRUV2Net', {
2893        'block': DynamicGRUV2Net(),
2894        'desc_inputs': [Tensor(np.random.rand(2, 8, 64).astype(np.float16)),
2895                        Tensor(np.random.rand(64, 48).astype(np.float16)),
2896                        Tensor(np.random.rand(16, 48).astype(np.float16)),
2897                        Tensor(np.random.rand(48).astype(np.float16)),
2898                        Tensor(np.random.rand(48).astype(np.float16)),
2899                        Tensor(np.random.rand(8, 16).astype(np.float16))],
2900        'desc_bprop': [Tensor(np.random.rand(2, 8, 16).astype(np.float16)),
2901                       Tensor(np.random.rand(2, 8, 16).astype(np.float16)),
2902                       Tensor(np.random.rand(2, 8, 16).astype(np.float16)),
2903                       Tensor(np.random.rand(2, 8, 16).astype(np.float16)),
2904                       Tensor(np.random.rand(2, 8, 16).astype(np.float16))]}),
2905]
2906
2907test_case_quant_ops = [
2908    ('Quant_1', {
2909        'block': inner.Quant(0.5, 0.0, False, "Round"),
2910        'desc_inputs': [Tensor(np.random.rand(1, 2, 4, 4), mstype.float32)],
2911        'skip': ['backward']}),
2912    ('Quant_2', {
2913        'block': inner.Quant(80.0, 10.0, True, "Round"),
2914        'desc_inputs': [Tensor([100.0, 200.0], mstype.float32)],
2915        'skip': ['backward']}),
2916    ('Quant_3', {
2917        'block': inner.Quant(80.0, 0.0, False, "Floor"),
2918        'desc_inputs': [Tensor([100.0, 200.0], mstype.float32)],
2919        'skip': ['backward']}),
2920    ('Quant_4', {
2921        'block': inner.Quant(80.0, 0.0, False, "Ceil"),
2922        'desc_inputs': [Tensor([100.0, 200.0], mstype.float32)],
2923        'skip': ['backward']}),
2924    ('Quant_5', {
2925        'block': inner.Quant(80.0, 0.0, False, "Trunc"),
2926        'desc_inputs': [Tensor([100.0, 200.0], mstype.float32)],
2927        'skip': ['backward']}),
2928    ('Quant_6', {
2929        'block': inner.Quant(-80.0, 10.0, False, "Round"),
2930        'desc_inputs': [Tensor([100.0, 200.0], mstype.float32)],
2931        'skip': ['backward']}),
2932    ('Quant_7', {
2933        'block': inner.Quant(80.0, -10.0, False, "Round"),
2934        'desc_inputs': [Tensor([100.0, 200.0], mstype.float32)],
2935        'skip': ['backward']}),
2936    ('Quant_8', {
2937        'block': inner.Quant(80.0, 10.0, False, "Round"),
2938        'desc_inputs': [Tensor([100.0, 200.0], mstype.float16)],
2939        'skip': ['backward']}),
2940]
2941
2942test_case_lists = [test_case_nn_ops, test_case_math_ops, test_case_array_ops,
2943                   test_case_other_ops, test_case_quant_ops]
2944test_case = functools.reduce(lambda x, y: x + y, test_case_lists)
2945# use -k to select certain testcast
2946# pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm
2947
2948
2949test_exec_case = test_case
2950
2951test_backward_exec_case = filter(lambda x: 'skip' not in x[1] or 'backward' not in x[1]['skip'], test_case)
2952
2953
2954@non_graph_engine
2955@mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config)
2956def test_exec():
2957    context.set_context(mode=context.GRAPH_MODE)
2958    return test_exec_case
2959
2960
2961@mindspore_test(pipeline_for_compile_grad_ge_graph_for_case_by_case_config)
2962def test_backward_exec():
2963    context.set_context(mode=context.GRAPH_MODE)
2964    return test_backward_exec_case
2965
2966
2967@security_off_wrap
2968@non_graph_engine
2969@mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config)
2970def test_summary_ops():
2971    if security.enable_security():
2972        return []
2973    test_cases_for_summary_ops = [
2974        ('Summary', {
2975            'block': SummaryNet(),
2976            'desc_inputs': [Tensor(np.array([1.1]).astype(np.float32)),
2977                            Tensor(np.array([1.2]).astype(np.float32))],
2978            'skip': ['backward']}),
2979        ('HistogramSummary', {
2980            'block': HistogramSummaryNet(),
2981            'desc_inputs': [Tensor(np.array([1.1]).astype(np.float32)),
2982                            Tensor(np.array([1.2]).astype(np.float32))],
2983            'skip': ['backward']}),
2984    ]
2985    context.set_context(mode=context.GRAPH_MODE)
2986    return test_cases_for_summary_ops
2987
2988
2989def test_summary_ops_security_on():
2990    if security.enable_security():
2991        with pytest.raises(ValueError) as exc:
2992            SummaryNet()
2993        assert str(exc.value) == 'The Summary is not supported, please without `-s on` and recompile source.'
2994        with pytest.raises(ValueError) as exc:
2995            HistogramSummaryNet()
2996        assert str(exc.value) == 'The Summary is not supported, please without `-s on` and recompile source.'
2997
2998
2999raise_set = [
3000    ('Cast_Error', {
3001        'block': (P.Cast(), {'exception': TypeError}),
3002        'desc_const': [mstype.int32],
3003        'desc_inputs': ['wrong input'],
3004        'desc_bprop': [Tensor(np.ones((2, 3, 3, 5)).astype(np.int32))]}),
3005    ('Maximum_Error', {
3006        'block': (P.Maximum(), {'exception': TypeError}),
3007        'desc_const': [(1, 2, 3)],
3008        'desc_inputs': [[2, 3, 3, 5]],
3009        'desc_bprop': [[2, 3, 3, 5]]}),
3010    ('Shape_error', {
3011        'block': (P.Shape(), {'exception': TypeError}),
3012        'desc_inputs': [(64, 1)],
3013        'desc_bprop': [[64]]}),
3014    ('Flatten_Error', {
3015        'block': (NetForFlatten0D(), {'exception': ValueError}),
3016        'desc_inputs': [Tensor(np.array(0).astype(np.int32))],
3017        'desc_bprop': [Tensor(np.array(0).astype(np.int32))]}),
3018    ('ScatterNdUpdate', {
3019        'block': (P.ScatterNdUpdate(), {'exception': TypeError}),
3020        'desc_inputs': (Tensor(np.ones((2, 3), np.float32)),
3021                        Tensor(np.ones((2, 2), np.float32)),
3022                        Tensor(np.ones((2,), np.float32))),
3023        'desc_bprop': [[2, 3]]}),
3024    ('PReLU', {
3025        'block': (P.PReLU(), {'exception': ValueError}),
3026        'desc_inputs': [[2], [1]],
3027        'desc_bprop': [[1]]}),
3028    ('SSIM', {
3029        'block': (nn.SSIM(), {'exception': ValueError}),
3030        'desc_inputs': [Tensor(np.ones((1, 3, 8, 8)), mstype.float32),
3031                        Tensor(np.ones((1, 3, 8, 8)), mstype.float32)]})
3032]
3033
3034
3035@mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception)
3036def test_check_exception():
3037    return raise_set
3038