• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2020 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15import os
16import pytest
17import numpy as np
18import mindspore as ms
19import mindspore.ops.operations as P
20from mindspore.nn import Cell
21from mindspore import context, Tensor
22from mindspore.common.parameter import Parameter
23from mindspore.common.initializer import initializer
24from mindspore.train.model import Model
25from mindspore.ops.composite import GradOperation
26from mindspore.common import ParameterTuple
27from tests.security_utils import security_off_wrap
28
29context.set_context(mode=context.GRAPH_MODE)
30
31
32class _Grad(Cell):
33    def __init__(self, grad, network, wrt_params=False, real_inputs_count=None):
34        super().__init__()
35        self.network = network
36        self.grad = grad
37        self.sens_param = self.grad.sens_param
38        self.wrt_params = wrt_params
39        self.real_inputs_count = real_inputs_count
40        if self.wrt_params:
41            self.params = ParameterTuple(self.network.trainable_params())
42
43    def construct(self, *inputs):
44        if self.real_inputs_count is None or self.sens_param is False:
45            if self.wrt_params:
46                return self.grad(self.network, self.params)(*inputs)
47            return self.grad(self.network)(*inputs)
48
49        real_inputs = inputs[:self.real_inputs_count]
50        sense_param_inputs = inputs[self.real_inputs_count:]
51        if self.wrt_params:
52            return self.grad(self.network, self.params)(*real_inputs, sense_param_inputs)
53        return self.grad(self.network)(*real_inputs, sense_param_inputs)
54
55
56class GradOfFirstInput(_Grad):
57    """
58    get grad of first input
59    """
60
61    def __init__(self, network, sens_param=True, real_inputs_count=None):
62        super().__init__(grad=GradOperation(sens_param=sens_param),
63                         network=network, real_inputs_count=real_inputs_count)
64
65
66class GradOfAllInputs(_Grad):
67    '''
68    get grads of all inputs
69    '''
70
71    def __init__(self, network, sens_param=True, real_inputs_count=None):
72        super().__init__(grad=GradOperation(get_all=True, sens_param=sens_param),
73                         network=network, real_inputs_count=real_inputs_count)
74
75
76class GradOfAllInputsAndParams(_Grad):
77    '''
78    get grads of all inputs and params
79    '''
80
81    def __init__(self, network, sens_param=True, real_inputs_count=None):
82        super().__init__(grad=GradOperation(get_all=True, get_by_list=True, sens_param=sens_param),
83                         network=network, wrt_params=True, real_inputs_count=real_inputs_count)
84
85
86def _count_unequal_element(data_expected, data_me, rtol, atol):
87    assert data_expected.shape == data_me.shape
88    total_count = len(data_expected.flatten())
89    error = np.abs(data_expected - data_me)
90    greater = np.greater(error, atol + np.abs(data_me) * rtol)
91    loss_count = np.count_nonzero(greater)
92    assert (loss_count / total_count) < rtol, \
93        "\ndata_expected_std:{0}\ndata_me_error:{1}\nloss:{2}". \
94            format(data_expected[greater], data_me[greater], error[greater])
95
96
97def allclose_nparray(data_expected, data_me, rtol, atol, equal_nan=True):
98    if np.any(np.isnan(data_expected)):
99        assert np.allclose(data_expected, data_me, rtol,
100                           atol, equal_nan=equal_nan)
101    elif not np.allclose(data_expected, data_me, rtol, atol, equal_nan=equal_nan):
102        _count_unequal_element(data_expected, data_me, rtol, atol)
103    else:
104        assert True
105
106
107class ControlGraphSupportNotEqual(Cell):
108    def construct(self, x, y, z, input_data):
109        if x != y:
110            out = input_data + input_data
111        else:
112            out = input_data - input_data
113        if x == z:
114            out2 = input_data * input_data
115        else:
116            out2 = input_data / input_data
117        if x == z:
118            out3_f = (lambda a: a + a)
119            out3 = out3_f(input_data)
120        else:
121            out3_f = (lambda a: a + a + a)
122            out3 = out3_f(input_data)
123        return out, out2, out3
124
125
126@pytest.mark.level1
127@pytest.mark.platform_arm_ascend_training
128@pytest.mark.platform_x86_ascend_training
129@pytest.mark.env_onecard
130def test_ctrl_if_while_graph_support_not_equal_true():
131    x = np.array(0).astype(np.float32)
132    y = np.array(3).astype(np.float32)
133    input_shape = (512, 512, 7, 7)
134    input_data = np.random.randn(*input_shape).astype(np.float32)
135    net = ControlGraphSupportNotEqual()
136    model = Model(net)
137    out_me = model.predict(Tensor(x), Tensor(y), Tensor(x), Tensor(input_data))
138    out = input_data + input_data
139    out2 = input_data * input_data
140    out3 = input_data + input_data
141    allclose_nparray(out, out_me[0].asnumpy(), 0.0001, 0.0001)
142    allclose_nparray(out2, out_me[1].asnumpy(), 0.0001, 0.0001)
143    allclose_nparray(out3, out_me[2].asnumpy(), 0.0001, 0.0001)
144
145
146@pytest.mark.level1
147@pytest.mark.platform_arm_ascend_training
148@pytest.mark.platform_x86_ascend_training
149@pytest.mark.env_onecard
150def test_ctrl_if_while_graph_support_not_equal_false():
151    x = np.array(0).astype(np.float32)
152    y = np.array(0).astype(np.float32)
153    z = np.array(3).astype(np.float32)
154    input_shape = (512, 512, 7, 7)
155    input_data = np.random.randn(*input_shape).astype(np.float32)
156    net = ControlGraphSupportNotEqual()
157    model = Model(net)
158    out_me = model.predict(Tensor(x), Tensor(y), Tensor(z), Tensor(input_data))
159    out = input_data - input_data
160    out2 = input_data / input_data
161    out3 = input_data + input_data + input_data
162    allclose_nparray(out, out_me[0].asnumpy(), 0.0001, 0.0001)
163    allclose_nparray(out2, out_me[1].asnumpy(), 0.0001, 0.0001)
164    allclose_nparray(out3, out_me[2].asnumpy(), 0.0001, 0.0001)
165
166
167class ControlBprop(Cell):
168    def construct(self, x, y, z, input_data):
169        if x != y:
170            out = input_data + input_data
171        else:
172            out = input_data - input_data
173        if x == z:
174            out2 = input_data * input_data
175        else:
176            out2 = input_data / input_data
177        if x == z:
178            out3_f = (lambda a: a + a)
179            out3 = out3_f(input_data)
180        else:
181            out3_f = (lambda a: a + a + a)
182            out3 = out3_f(input_data)
183        return out, out2, out3
184
185    def bprop(self, x, y, z, input_data, out, dout):
186        return x * 2, y * 3, z, input_data * 5.1
187
188
189@pytest.mark.level1
190@pytest.mark.platform_arm_ascend_training
191@pytest.mark.platform_x86_ascend_training
192@pytest.mark.env_onecard
193def test_ctrl_if_while_bprop_true():
194    x = np.array(0).astype(np.float32)
195    y = np.array(3).astype(np.float32)
196    input_shape = (512, 512, 7, 7)
197    input_data = np.random.randn(*input_shape).astype(np.float32)
198    net = ControlBprop()
199    grad_net = GradOfAllInputs(net, sens_param=False)
200    grad_net.set_train()
201    grads = grad_net(Tensor(x), Tensor(y), Tensor(x), Tensor(input_data))
202    allclose_nparray(x * 2, grads[0].asnumpy(), 0.0000, 0.0000)
203    allclose_nparray(y * 3, grads[1].asnumpy(), 0.0000, 0.0000)
204    allclose_nparray(x, grads[2].asnumpy(), 0.0000, 0.0000)
205    allclose_nparray(input_data * 5.1, grads[3].asnumpy(), 0.0000, 0.0000)
206
207
208class TwoInput(Cell):
209    def __init__(self):
210        super().__init__()
211        self.op = P.Mul()
212
213    def construct(self, x, y):
214        x = self.op(x, y)
215        return x
216
217
218class InlineBpropTwoInput1(Cell):
219    def __init__(self):
220        super().__init__()
221        self.f = TwoInput()
222        self.f.set_grad()
223        self.grad = GradOfAllInputs(self.f, sens_param=False)
224
225    def construct(self, x, y):
226        if x > y:
227            x = self.f(x, y)
228        else:
229            x = self.f(x, y)
230        return x
231
232    def bprop(self, x, y, out, dout):
233        if x > y:
234            grads = self.grad(x, y)
235        else:
236            grads = self.grad(x, y)
237        return grads[0] * 2, grads[1] * 2
238
239
240@pytest.mark.level1
241@pytest.mark.platform_arm_ascend_training
242@pytest.mark.platform_x86_ascend_training
243@pytest.mark.env_onecard
244def test_ctrl_if_while_bprop_inlinebprop_twoinput():
245    net = InlineBpropTwoInput1()
246    input1 = Tensor(np.array(2).astype(np.float32))
247    input2 = Tensor(np.array(1).astype(np.float32))
248    grad_net = GradOfAllInputs(net, sens_param=False)
249    grad_net.set_train()
250    grads = grad_net(input1, input2)
251    allclose_nparray(input1.asnumpy() * 2, grads[1].asnumpy(), 0, 0)
252    allclose_nparray(input2.asnumpy() * 2, grads[0].asnumpy(), 0, 0)
253
254
255class ControlOneIfOneParaOneAddn(Cell):
256    def __init__(self, input_shape):
257        super().__init__()
258        self.addn = P.AddN()
259        self.assign = P.Assign()
260        self.inputdata = Parameter(initializer(
261            1, input_shape, ms.float32), name="global_step")
262
263    def construct(self, x, y, input_data):
264        if x > y:
265            out = self.inputdata
266        else:
267            out = self.addn([input_data, input_data, input_data])
268        if x > y:
269            out = self.assign(self.inputdata, input_data)
270        return out
271
272
273@pytest.mark.level0
274@pytest.mark.platform_arm_ascend_training
275@pytest.mark.platform_x86_ascend_training
276@pytest.mark.env_onecard
277def test_ctrl_if_para_addn_true():
278    x = Tensor(1, ms.float32)
279    y = Tensor(0, ms.float32)
280    input_shape = (1024, 512, 7, 7)
281    input_data = np.random.randn(*input_shape).astype(np.float32)
282    net = ControlOneIfOneParaOneAddn(input_shape)
283    out = net(x, y, Tensor(input_data))
284    allclose_nparray(input_data[0], out.asnumpy()[0], 0.0001, 0.0001)
285
286
287class AddnCell(Cell):
288    def __init__(self):
289        super().__init__()
290        self.addn = P.AddN()
291
292    def construct(self, x):
293        x = self.addn((x, x))
294        return x
295
296
297class SideEffectMemoryCellAddnNet(Cell):
298    def __init__(self):
299        super().__init__()
300        self.para = Parameter(Tensor([1.0], ms.float32), name="para")
301        self.assign = P.Assign()
302        self.addn = P.AddN()
303        self.addn1 = AddnCell()
304
305    def construct(self, x):
306        x = self.addn1(x)
307        self.assign(self.para, x)
308        out = self.addn((self.para, x))
309        return out
310
311    def grad_mindspore_impl(self, params, grad_ys):
312        grad_net = GradOfAllInputsAndParams(self)
313        grad_net.set_train()
314        grad_out = grad_net(params, grad_ys)
315        return grad_out
316
317
318@pytest.mark.level1
319@pytest.mark.platform_arm_ascend_training
320@pytest.mark.platform_x86_ascend_training
321@pytest.mark.env_onecard
322def test_grad_memory_addn():
323    net = SideEffectMemoryCellAddnNet()
324    grad_ys = Tensor([18.0], ms.float32)
325    inputs = Tensor([9.0], ms.float32)
326    net.grad_mindspore_impl(inputs, grad_ys)
327
328
329class SideEffectIOCellAddnNet(Cell):
330    def __init__(self):
331        super().__init__()
332        self.para1 = Parameter(Tensor([1.0], ms.float32), name="para1")
333        self.para2 = Parameter(Tensor([3.0], ms.float32), name="para2")
334        self.print = P.Print()
335        self.addn = AddnCell()
336
337    def construct(self, x):
338        self.print("para1:", self.para1)
339        self.print("para2:", self.para2)
340        x = self.addn(x)
341        return x
342
343    def grad_mindspore_impl(self, params, grad_ys):
344        grad_net = GradOfAllInputsAndParams(self)
345        grad_net.set_train()
346        grad_out = grad_net(params, grad_ys)
347        return grad_out
348
349
350@security_off_wrap
351@pytest.mark.level1
352@pytest.mark.platform_arm_ascend_training
353@pytest.mark.platform_x86_ascend_training
354@pytest.mark.env_onecard
355def test_grad_io_addn():
356    net = SideEffectIOCellAddnNet()
357    grad_ys = Tensor([18.0], ms.float32)
358    inputs = Tensor([9.0], ms.float32)
359    net.grad_mindspore_impl(inputs, grad_ys)
360
361
362class SideEffectReturnParameterNet(Cell):
363    def __init__(self):
364        super().__init__()
365        self.para = Parameter(Tensor([1.0], ms.float32), name="para")
366        self.assign = P.Assign()
367        self.addn = P.AddN()
368        self.relu = P.ReLU()
369
370    def construct(self, inputs):
371        p1 = self.assign(self.para, inputs)
372        out = self.addn((inputs, inputs, inputs))
373        out = self.relu(out)
374        return p1
375
376    def grad_mindspore_impl(self, params, grad_ys):
377        grad_net = GradOfAllInputsAndParams(self)
378        grad_net.set_train()
379        grad_out = grad_net(params, grad_ys)
380        return grad_out
381
382
383@pytest.mark.level1
384@pytest.mark.platform_arm_ascend_training
385@pytest.mark.platform_x86_ascend_training
386@pytest.mark.env_onecard
387def test_grad_read_dependency_return_parameter():
388    net = SideEffectReturnParameterNet()
389    grad_ys = Tensor([18.0], ms.float32)
390    inputs = Tensor([9.0], ms.float32)
391    net.grad_mindspore_impl(inputs, grad_ys)
392
393
394class SideEffectAssignAddnReluReturnParNet(Cell):
395    def __init__(self):
396        super().__init__()
397        self.parameter1 = Parameter(
398            Tensor([1.0], ms.float32), name="parameter1")
399        self.assign = P.Assign()
400        self.addN = P.AddN()
401        self.relu = P.ReLU()
402
403    def construct(self, inputs):
404        p1 = self.assign(self.parameter1, inputs)
405        out = self.addN((inputs, inputs, inputs))
406        out = self.relu(out)
407        return p1
408
409    def grad_mindspore_impl(self, params, grad_ys):
410        grad_net = GradOfAllInputsAndParams(self)
411        grad_net.set_train()
412        grad_out = grad_net(params, grad_ys)
413        return grad_out
414
415
416@pytest.mark.level1
417@pytest.mark.platform_arm_ascend_training
418@pytest.mark.platform_x86_ascend_training
419@pytest.mark.env_onecard
420def test_side_effect_grad_read_dependency_assign_addn_relu_return_parameter():
421    net = SideEffectAssignAddnReluReturnParNet()
422    grad_ys = Tensor([18.0], ms.float32)
423    inputs = Tensor([9.0], ms.float32)
424    out1 = net.grad_mindspore_impl(inputs, grad_ys)
425    net = SideEffectAssignAddnReluReturnParNet()
426    try:
427        context.set_context(mode=context.PYNATIVE_MODE)
428        out2 = net.grad_mindspore_impl(inputs, grad_ys)
429        allclose_nparray(out1[0][0].asnumpy(), out2[0]
430                         [0].asnumpy(), 0.001, 0.001)
431        allclose_nparray(out1[1][0].asnumpy(), out2[1]
432                         [0].asnumpy(), 0.001, 0.001)
433    finally:
434        context.set_context(mode=context.GRAPH_MODE)
435
436
437class SideEffectPrintInHighOrdeAddnNet(Cell):
438    def __init__(self):
439        super().__init__()
440        self.parameter1 = Parameter(
441            Tensor([1.0], ms.float32), name="parameter1")
442        self.parameter2 = Parameter(
443            Tensor([3.0], ms.float32), name="parameter2")
444        self.assign = P.Assign()
445        self.addn = P.AddN()
446        self.mul = P.Mul()
447        self.print = P.Print()
448
449    def construct(self, x):
450        self.high_order_func()
451        out = self.addn((self.parameter1, x, self.parameter2))
452        return out
453
454    def high_order_func(self):
455        self.print("parameter1: ", self.parameter1)
456        self.print("parameter2: ", self.parameter2)
457        return True
458
459    def grad_mindspore_impl(self, params, grad_ys):
460        grad_net = GradOfAllInputsAndParams(self)
461        grad_net.set_train()
462        grad_out = grad_net(params, grad_ys)
463        return grad_out
464
465@security_off_wrap
466@pytest.mark.level1
467@pytest.mark.platform_arm_ascend_training
468@pytest.mark.platform_x86_ascend_training
469@pytest.mark.env_onecard
470def test_side_effect_high_order_print_in_high_order_net():
471    print_file = os.getcwd() + "/test_side_effect_high_order_print_in_high_order_net.data"
472    context.set_context(print_file_path=print_file)
473    net = SideEffectPrintInHighOrdeAddnNet()
474    out1 = net(Tensor([9.0], ms.float32))
475    net = SideEffectPrintInHighOrdeAddnNet()
476    try:
477        context.set_context(mode=context.PYNATIVE_MODE)
478        out2 = net(Tensor([9.0], ms.float32))
479        allclose_nparray(out1.asnumpy(), out2.asnumpy(), 0.001, 0.001)
480    finally:
481        context.set_context(mode=context.GRAPH_MODE)
482
483
484class SideEffectControlFlowAssignDependTwoIfNet(Cell):
485    def __init__(self):
486        super().__init__()
487        self.parameter1 = Parameter(
488            Tensor([3.0], ms.float32), name="parameter1")
489        self.assign = P.Assign()
490        self.mul = P.Mul()
491        self.addn = P.AddN()
492        self.depend = P.Depend()
493
494    def construct(self, x, y):
495        self.assign(self.parameter1, x)
496        if self.parameter1 > y:
497            x = self.mul(x, x)
498            p2 = self.assign(self.parameter1, x)
499            if self.parameter1 > y:
500                x = self.addn((x, self.parameter1))
501                p3 = self.assign(self.parameter1, x)
502                self.depend(p3, p2)
503        return x
504
505    def grad_mindspore_impl(self, params1, params2, grad_ys):
506        grad_net = GradOfAllInputsAndParams(self)
507        grad_net.set_train()
508        grad_out = grad_net(params1, params2, grad_ys)
509        return grad_out
510
511
512@pytest.mark.level1
513@pytest.mark.platform_arm_ascend_training
514@pytest.mark.platform_x86_ascend_training
515@pytest.mark.env_onecard
516def test_side_effect_grad_control_flow_assign_depend_of_two_if():
517    net = SideEffectControlFlowAssignDependTwoIfNet()
518    grad_ys = Tensor([18.0], ms.float32)
519    inputs1 = Tensor([9.0], ms.float32)
520    inputs2 = Tensor([6.0], ms.float32)
521    net.grad_mindspore_impl(inputs1, inputs2, grad_ys)
522
523
524class SideEffectTwoAddnSwitchNet(Cell):
525    def __init__(self):
526        super().__init__()
527        self.addN = P.AddN()
528
529    def construct(self, x):
530        y = x
531        x = self.addN((x, x, x))
532        y = self.addN((y, y))
533        if x > y:
534            return x
535        return y
536
537    def grad_mindspore_impl(self, params, grad_ys):
538        grad_net = GradOfAllInputsAndParams(self)
539        grad_net.set_train()
540        grad_out = grad_net(params, grad_ys)
541        return grad_out
542
543
544@pytest.mark.level1
545@pytest.mark.platform_arm_ascend_training
546@pytest.mark.platform_x86_ascend_training
547@pytest.mark.env_onecard
548def test_side_effect_grad_two_addn_switch():
549    net = SideEffectTwoAddnSwitchNet()
550    grad_ys = Tensor([18.0], ms.float32)
551    inputs = Tensor([9.0], ms.float32)
552    out1 = net.grad_mindspore_impl(inputs, grad_ys)
553    net = SideEffectTwoAddnSwitchNet()
554    try:
555        expect = 54.0
556        allclose_nparray(out1[0][0].asnumpy(), expect, 0.001, 0.001)
557    finally:
558        context.set_context(mode=context.GRAPH_MODE)
559
560
561class SideEffectGradIfNet(Cell):
562    def __init__(self):
563        super().__init__()
564        self.relu = P.ReLU()
565        a = np.full((1,), 5, dtype=np.float32)
566        self.a = Parameter(Tensor(a), name="a")
567        b = np.full((1,), 4, dtype=np.float32)
568        self.b = Parameter(Tensor(b), name="b")
569
570    def construct(self, x):
571        if self.a > self.b:
572            x = self.relu(x)
573            out = x
574        else:
575            out = x + 2
576        return out
577
578    def grad_mindspore_impl(self, params, grad_ys):
579        grad_net = GradOfFirstInput(self)
580        grad_net.set_train()
581        grad_out = grad_net(params, grad_ys)
582        return grad_out
583
584
585@pytest.mark.level1
586@pytest.mark.platform_arm_ascend_training
587@pytest.mark.platform_x86_ascend_training
588@pytest.mark.env_onecard
589def test_side_effect_grad_if():
590    context.set_context(mode=context.GRAPH_MODE)
591    net = SideEffectGradIfNet()
592    grad_ys = Tensor([18.0], ms.float32)
593    inputs = Tensor([9.0], ms.float32)
594    out1 = net.grad_mindspore_impl(inputs, grad_ys)
595    net = SideEffectGradIfNet()
596    try:
597        expect = 18.0
598        allclose_nparray(out1.asnumpy(), expect, 0.001, 0.001)
599    finally:
600        context.set_context(mode=context.GRAPH_MODE)
601
602
603class OneInputBprop(Cell):
604    def __init__(self):
605        super().__init__()
606        self.op = P.ReLU()
607
608    def construct(self, x):
609        return self.op(x)
610
611    def bprop(self, x, out, dout):
612        return (5 * x,)
613
614
615class HighGrad(Cell):
616    def __init__(self, network, grad_list, sens_param=False, real_inputs_count=None):
617        super().__init__()
618        self.grads = [network]
619        for i in range(len(grad_list) - 1):
620            _grad = grad_list[i](self.grads[i], sens_param=False)
621            self.grads.append(_grad)
622        self.final_grad = grad_list[-1](self.grads[-1],
623                                        sens_param=sens_param, real_inputs_count=real_inputs_count)
624
625    def construct(self, *inputs):
626        return self.final_grad(*inputs)
627
628
629@pytest.mark.level0
630@pytest.mark.platform_arm_ascend_training
631@pytest.mark.platform_x86_ascend_training
632@pytest.mark.env_onecard
633def test_highgrad_one_input_sec_grad():
634    net = OneInputBprop()
635    x = Tensor(np.array([2, 2]).astype(np.float32))
636    grad_net = HighGrad(net, [GradOfFirstInput, GradOfFirstInput])
637    dxdx = grad_net(x)
638    assert (dxdx.asnumpy() == np.array([5, 5]).astype(np.float32)).all()
639
640
641@pytest.mark.level1
642@pytest.mark.platform_arm_ascend_training
643@pytest.mark.platform_x86_ascend_training
644@pytest.mark.env_onecard
645def test_highgrad_one_input_third_grad():
646    net = OneInputBprop()
647    x = Tensor(np.array([2, 2]).astype(np.float32))
648    grad_net = HighGrad(
649        net, [GradOfFirstInput, GradOfFirstInput, GradOfFirstInput])
650    third_grad = grad_net(x)
651    assert (third_grad.asnumpy() == np.array([0, 0]).astype(np.float32)).all()
652
653
654class SideEffectControlFlowAssignDependWhileNet(Cell):
655    def __init__(self):
656        super().__init__()
657        self.parameter1 = Parameter(Tensor([199.0], ms.float32), name="parameter1")
658        self.assign = P.Assign()
659        self.assignadd = P.AssignAdd()
660        self.addn = P.AddN()
661        self.depend = P.Depend()
662
663    def construct(self, x, y, z):
664        p1 = self.assign(self.parameter1, x)
665        while self.parameter1 < y:
666            x = self.addn((x, x))
667            p2 = self.assignadd(self.parameter1, z)
668            self.depend(p2, p1)
669        return x
670
671    def grad_mindspore_impl(self, params1, params2, params3, grad_ys):
672        grad_net = GradOfAllInputsAndParams(self)
673        grad_net.set_train()
674        grad_out = grad_net(params1, params2, params3, grad_ys)
675        return grad_out
676
677
678@pytest.mark.level0
679@pytest.mark.platform_arm_ascend_training
680@pytest.mark.platform_x86_gpu_training
681@pytest.mark.env_onecard
682def test_side_effect_grad_control_flow_assign_depend_while_net():
683    context.set_context(mode=context.GRAPH_MODE)
684    net = SideEffectControlFlowAssignDependWhileNet()
685    grad_ys = Tensor([18.0], ms.float32)
686    inputs1 = Tensor([9.0], ms.float32)
687    inputs2 = Tensor([6.0], ms.float32)
688    inputs3 = Tensor([3.0], ms.float32)
689    out1 = net.grad_mindspore_impl(inputs1, inputs2, inputs3, grad_ys)
690
691    try:
692        expect1 = 18.0
693        expect2 = 0
694        allclose_nparray(out1[0][0].asnumpy(), expect1, 0.001, 0.001)
695        allclose_nparray(out1[1][0].asnumpy(), expect2, 0.001, 0.001)
696    finally:
697        context.set_context(mode=context.GRAPH_MODE)
698