• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2020 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15""" test_tensor_slice """
16import numpy as np
17import pytest
18
19from mindspore import Tensor
20from mindspore import Parameter
21from mindspore import context
22from mindspore import dtype as mstype
23from mindspore.nn import Cell
24from mindspore.common.parameter import ParameterTuple
25from mindspore.ops import composite as C
26
27
28grad_by_list_with_sens = C.GradOperation(get_by_list=True, sens_param=True)
29
30
31def setup_module():
32    context.set_context(mode=context.PYNATIVE_MODE)
33
34
35class NetWorkSlicePositive(Cell):
36    def __init__(self):
37        super(NetWorkSlicePositive, self).__init__()
38        self.tensor_ret0 = Tensor(np.ones([1, 2, 3], np.int32))
39        self.tensor_ret1 = Tensor(np.ones([4, 8, 10], np.int32))
40        self.tensor_ret2 = Tensor(np.ones([6, 8, 10], np.int32))
41        self.tensor_ret3 = Tensor(np.ones([3, 8, 10], np.int32))
42
43    def construct(self, tensor):
44        ret0 = tensor[3:4:1, 1:5:2, 3:6:1] + self.tensor_ret0
45        ret1 = tensor[-6:4:1, 0:8:1, ::1] + self.tensor_ret1
46        ret2 = tensor[::, ::, ::] + self.tensor_ret2
47        ret3 = tensor[::2] + self.tensor_ret3
48        return ret0, ret1, ret2, ret3
49
50
51@pytest.mark.level1
52@pytest.mark.platform_arm_ascend_training
53@pytest.mark.platform_x86_ascend_training
54@pytest.mark.platform_x86_gpu_training
55@pytest.mark.env_onecard
56def test_slice_positive():
57    net = NetWorkSlicePositive()
58    input_np = np.arange(6*8*10).reshape(6, 8, 10).astype(np.int32)
59    input_0 = Tensor(input_np)
60    output0, output1, output2, output3 = net(input_0)
61    assert np.all(output0.asnumpy() == input_np[3:4:1, 1:5:2, 3:6:1] + np.ones([1, 2, 3]))
62    assert np.all(output1.asnumpy() == input_np[-6:4:1, 0:8:1, ::1] + np.ones([4, 8, 10]))
63    assert np.all(output2.asnumpy() == input_np[::, ::, ::] + np.ones([6, 8, 10]))
64    assert np.all(output3.asnumpy() == input_np[::2] + np.ones([3, 8, 10]))
65
66
67class NetWorkSliceEllipsis(Cell):
68    def __init__(self):
69        super(NetWorkSliceEllipsis, self).__init__()
70        self.tensor_ret0 = Tensor(np.ones([2, 7, 8], np.int32))
71        self.tensor_ret1 = Tensor(np.ones([6, 7, 8, 9], np.int32))
72        self.tensor_ret2 = Tensor(np.ones([1, 6, 7, 8, 9], np.int32))
73
74    def construct(self, tensor):
75        ret0 = tensor[0:4:2, ..., 1] + self.tensor_ret0
76        ret1 = tensor[...] + self.tensor_ret1
77        ret2 = tensor[None] + self.tensor_ret2
78        ret3 = tensor[True] + self.tensor_ret2
79        return ret0, ret1, ret2, ret3
80
81
82@pytest.mark.level1
83@pytest.mark.platform_arm_ascend_training
84@pytest.mark.platform_x86_ascend_training
85@pytest.mark.platform_x86_gpu_training
86@pytest.mark.env_onecard
87def test_slice_ellipsis():
88    net = NetWorkSliceEllipsis()
89    input_np = np.arange(6*7*8*9).reshape(6, 7, 8, 9).astype(np.int32)
90    input_0 = Tensor(input_np)
91    output0, output1, output2, output3 = net(input_0)
92    assert np.all(output0.asnumpy() == input_np[0:4:2, ..., 1] + np.ones([2, 7, 8]))
93    assert np.all(output1.asnumpy() == input_np[...] + np.ones([6, 7, 8, 9]))
94    assert np.all(output2.asnumpy() == input_np[None] + np.ones([6, 7, 8, 9]))
95    assert np.all(output3.asnumpy() == input_np[True] + np.ones([1, 6, 7, 8, 9]))
96
97
98class NetWorkReduceDimension(Cell):
99    def __init__(self):
100        super(NetWorkReduceDimension, self).__init__()
101        self.tensor_ret1 = Tensor(np.ones([3, 10], np.int32))
102        self.tensor_ret2 = Tensor(np.ones([6, 8], np.int32))
103        self.tensor_ret3 = Tensor(np.array(8, np.int32))
104        self.tensor_ret4 = Tensor(np.ones([8, 10], np.int32))
105
106    def construct(self, tensor):
107        ret1 = tensor[::2, 1, ::1] + self.tensor_ret1
108        ret2 = tensor[::, ::, 0] + self.tensor_ret2
109        ret3 = tensor[3, 2, 5] + self.tensor_ret3
110        ret4 = tensor[1] + self.tensor_ret4
111        return ret1, ret2, ret3, ret4
112
113
114@pytest.mark.level1
115@pytest.mark.platform_arm_ascend_training
116@pytest.mark.platform_x86_ascend_training
117@pytest.mark.platform_x86_gpu_training
118@pytest.mark.env_onecard
119def test_reduce_dimension():
120    net = NetWorkReduceDimension()
121    input_np = np.arange(6*8*10).reshape(6, 8, 10).astype(np.int32)
122    input_0 = Tensor(input_np)
123    output1, output2, output3, output4 = net(input_0)
124    assert np.all(output1.asnumpy() == input_np[::2, 1, ::1] + np.ones([3, 10]))
125    assert np.all(output2.asnumpy() == input_np[::, ::, 0] + np.ones([6, 8]))
126    assert np.all(output3.asnumpy() == input_np[3, 2, 5] + np.array(8, np.int32))
127    assert np.all(output4.asnumpy() == input_np[1] + np.ones([8, 10]))
128
129
130@pytest.mark.level0
131@pytest.mark.platform_arm_ascend_training
132@pytest.mark.platform_x86_ascend_training
133@pytest.mark.platform_x86_gpu_training
134@pytest.mark.env_onecard
135class NetWorkSliceStep(Cell):
136    def __init__(self):
137        super(NetWorkSliceStep, self).__init__()
138        self.tensor_ret1 = Tensor(np.ones([6, 5, 10], np.int32))
139        self.tensor_ret2 = Tensor(np.ones([3, 5, 5], np.int32))
140
141    def construct(self, tensor):
142        ret1 = tensor[::1, -5::, ::-1] + self.tensor_ret1
143        ret2 = tensor[::2, -5::, ::2] + self.tensor_ret2
144        return ret1, ret2
145
146
147@pytest.mark.level0
148# ascend op stridedslice has bug, and has not been fixed.
149@pytest.mark.platform_x86_gpu_training
150@pytest.mark.env_onecard
151def test_step_negative():
152    net = NetWorkSliceStep()
153    input_np = np.arange(6*8*10).reshape(6, 8, 10).astype(np.int32)
154    input_0 = Tensor(input_np)
155    output1, output2 = net(input_0)
156    assert np.all(output1.asnumpy() == input_np[::1, -5::, ::-1] + np.ones([6, 5, 10]))
157    assert np.all(output2.asnumpy() == input_np[::2, -5::, ::2] + np.ones([3, 5, 5]))
158
159
160class TensorGetItemByThreeTensors(Cell):
161    def __init__(self):
162        super(TensorGetItemByThreeTensors, self).__init__()
163        self.const0 = Tensor(np.ones((4, 5, 8, 10)), mstype.int32)
164        self.const1 = Tensor(np.ones((3, 4, 5, 10)), mstype.int32)
165        self.const2 = Tensor(np.ones((5, 3, 4, 5)), mstype.int32)
166
167    def construct(self, x, index_0, index_1, index_2):
168        ret0 = x[index_0] + self.const0
169        ret1 = x[index_0, index_1] + self.const1
170        ret2 = x[index_0, index_1, index_2] + self.const2
171        return ret0, ret1, ret2
172
173
174@pytest.mark.level1
175@pytest.mark.platform_arm_ascend_training
176@pytest.mark.platform_x86_ascend_training
177@pytest.mark.platform_x86_gpu_training
178@pytest.mark.env_onecard
179def test_getitem_by_tensors():
180    """This testcase may encounter a sync stream error occasionally"""
181    net = TensorGetItemByThreeTensors()
182    input_x = np.arange(6*8*10).reshape(6, 8, 10).astype(np.int32)
183    index_0 = np.random.randint(6, size=(3, 4, 5)).astype(np.int32)
184    index_1 = np.random.randint(6, size=(4, 5)).astype(np.int32)
185    index_2 = np.random.randint(6, size=(5, 3, 4, 5)).astype(np.int32)
186    input_x_ms = Tensor(input_x)
187    index_0_ms = Tensor(index_0)
188    index_1_ms = Tensor(index_1)
189    input_2_ms = Tensor(index_2)
190    output0, output1, output2 = net(input_x_ms, index_0_ms, index_1_ms, input_2_ms)
191    assert np.all(output0.asnumpy() == input_x[index_0] + np.ones([4, 5, 8, 10]))
192    assert np.all(output1.asnumpy() == input_x[index_0, index_1] + np.ones([3, 4, 5, 10]))
193    assert np.all(output2.asnumpy() == input_x[index_0, index_1, index_2] + np.ones([5, 3, 4, 5]))
194
195
196class TensorGetItemByMixedTensorsBasicCase(Cell):
197    def __init__(self, c0, c1, c2, c3, c4, c5):
198        super(TensorGetItemByMixedTensorsBasicCase, self).__init__()
199        self.const0 = Tensor(c0)
200        self.const1 = Tensor(c1)
201        self.const2 = Tensor(c2)
202        self.const3 = Tensor(c3)
203        self.const4 = Tensor(c4)
204        self.const5 = Tensor(c5)
205
206    def construct(self, tensor, index_0, index_1):
207        ret0 = tensor[index_0, index_1, 0:3] + self.const0
208        ret1 = tensor[0:3, index_0, ...] + self.const1
209        ret2 = tensor[0, index_0, index_1] + self.const2
210        ret3 = tensor[..., index_0, 0:3] + self.const3
211        ret4 = tensor[0:2, index_0, index_1] + self.const4
212        ret5 = tensor[..., index_0, index_1] + self.const5
213        return ret0, ret1, ret2, ret3, ret4, ret5
214
215
216@pytest.mark.level1
217@pytest.mark.platform_arm_ascend_training
218@pytest.mark.platform_x86_ascend_training
219@pytest.mark.platform_x86_gpu_training
220@pytest.mark.env_onecard
221def test_getitem_by_mixed_tensors():
222    const0 = np.ones((3, 4, 5, 3), np.float32)
223    const1 = np.ones((3, 3, 4, 5, 5), np.float32)
224    const2 = np.ones((3, 4, 5), np.float32)
225    const3 = np.ones((3, 3, 4, 5, 3), np.float32)
226    const4 = np.ones((2, 3, 4, 5), np.float32)
227    const5 = np.ones((3, 3, 4, 5), np.float32)
228    net = TensorGetItemByMixedTensorsBasicCase(const0, const1, const2, const3, const4, const5)
229    input_np = np.arange(3 * 4 * 5).reshape((3, 4, 5)).astype(np.float32)
230    input_ms = Tensor(input_np, mstype.float32)
231    index_np_0 = np.random.randint(3, size=(3, 4, 5)).astype(np.int32)
232    index_np_1 = np.random.randint(4, size=(4, 5)).astype(np.int32)
233    index_0 = Tensor(index_np_0, mstype.int32)
234    index_1 = Tensor(index_np_1, mstype.int32)
235    out0, out1, out2, out3, out4, out5 = net(input_ms, index_0, index_1)
236    assert np.all(out0.asnumpy() == (input_np[index_np_0, index_np_1, 0:3] + const0))
237    assert np.all(out1.asnumpy() == (input_np[0:3, index_np_0, ...] + const1))
238    assert np.all(out2.asnumpy() == (input_np[0, index_np_0, index_np_1] + const2))
239    assert np.all(out3.asnumpy() == (input_np[..., index_np_0, 0:3] + const3))
240    assert np.all(out4.asnumpy() == (input_np[0:2, index_np_0, index_np_1] + const4))
241    assert np.all(out5.asnumpy() == (input_np[..., index_np_0, index_np_1] + const5))
242
243
244class TensorItemByNone(Cell):
245    def construct(self, tensor):
246        ret = tensor.item()
247        return ret
248
249
250@pytest.mark.level1
251@pytest.mark.platform_arm_ascend_training
252@pytest.mark.platform_x86_ascend_training
253@pytest.mark.platform_x86_gpu_training
254@pytest.mark.env_onecard
255def test_item_by_none():
256    net = TensorItemByNone()
257    input_1d_np = np.ndarray([1]).astype(np.float32)
258    input_1d_ms = Tensor(input_1d_np, mstype.float32)
259    input_3d_np = np.random.randint(3, size=(3, 4, 5)).astype(np.int32)
260    input_3d_ms = Tensor(input_3d_np, mstype.float32)
261
262    output_ms = net(input_1d_ms)
263    assert np.all(output_ms.asnumpy() == input_1d_np.item())
264
265    with pytest.raises(ValueError):
266        net(input_3d_ms)
267
268
269class TensorItemByItem(Cell):
270    def construct(self, tensor, index):
271        ret = tensor.item(index)
272        return ret
273
274
275@pytest.mark.level1
276@pytest.mark.platform_arm_ascend_training
277@pytest.mark.platform_x86_ascend_training
278@pytest.mark.platform_x86_gpu_training
279@pytest.mark.env_onecard
280def test_item_by_int():
281    net = TensorItemByItem()
282    input_1d_np = np.ndarray([1]).astype(np.float32)
283    input_1d_ms = Tensor(input_1d_np, mstype.float32)
284
285    input_3d_np = np.random.randint(3, size=(3, 4, 5)).astype(np.int32)
286    input_3d_ms = Tensor(input_3d_np, mstype.float32)
287
288    index_np_1, index_np_2, index_np_3, index_np_4 = 0, 1.0, 30, 60
289
290    output_1d_ms = net(input_1d_ms, index_np_1)
291    output_3d_ms_1 = net(input_3d_ms, index_np_1)
292    output_3d_ms_2 = net(input_3d_ms, index_np_3)
293
294    assert np.all(output_1d_ms.asnumpy() == input_1d_np.item(index_np_1))
295    assert np.all(output_3d_ms_1.asnumpy() == input_3d_np.item(index_np_1))
296    assert np.all(output_3d_ms_2.asnumpy() == input_3d_np.item(index_np_3))
297
298    with pytest.raises(TypeError):
299        net(input_1d_ms, index_np_2)
300
301    with pytest.raises(IndexError):
302        net(input_1d_ms, index_np_3)
303
304    with pytest.raises(TypeError):
305        net(input_3d_ms, index_np_2)
306
307    with pytest.raises(IndexError):
308        net(input_3d_ms, index_np_4)
309
310
311@pytest.mark.level1
312@pytest.mark.platform_arm_ascend_training
313@pytest.mark.platform_x86_ascend_training
314@pytest.mark.platform_x86_gpu_training
315@pytest.mark.env_onecard
316def test_item_by_tuple():
317    net = TensorItemByItem()
318    input_1d_np = np.ndarray([1]).astype(np.float32)
319    input_1d_ms = Tensor(input_1d_np, mstype.float32)
320    input_3d_np = np.random.randint(3, size=(3, 4, 5)).astype(np.int32)
321    input_3d_ms = Tensor(input_3d_np, mstype.float32)
322
323    index_np_1 = (0,)
324    index_np_2 = (1, 2)
325    index_np_3 = (1, 2, 3)
326    index_np_4 = (3, 4, 4)
327    index_np_5 = (1, 2, 3, 4)
328
329    output_1d_ms = net(input_1d_ms, index_np_1)
330    output_3d_ms = net(input_3d_ms, index_np_3)
331    assert np.all(output_1d_ms.asnumpy() == input_1d_np.item(index_np_1))
332    assert np.all(output_3d_ms.asnumpy() == input_3d_np.item(index_np_3))
333
334    with pytest.raises(ValueError):
335        net(input_1d_ms, index_np_2)
336
337    with pytest.raises(ValueError):
338        net(input_3d_ms, index_np_2)
339
340    with pytest.raises(IndexError):
341        net(input_3d_ms, index_np_4)
342
343    with pytest.raises(ValueError):
344        net(input_3d_ms, index_np_5)
345
346
347class TensorSetItemByMixedTensors_0(Cell):
348    def __init__(self, value):
349        super(TensorSetItemByMixedTensors_0, self).__init__()
350        self.const = Tensor(np.ones((3, 4, 5), np.float32))
351        self.param = Parameter(Tensor(np.arange(3 * 4 * 5).reshape((3, 4, 5)),
352                                      mstype.float32),
353                               name="x")
354        self.value = value
355
356    def construct(self, index_0, index_1, index_2):
357        self.param[0:2, index_0, index_1] = self.value
358        ret = self.param + self.const
359        return ret
360
361
362@pytest.mark.level1
363@pytest.mark.platform_arm_ascend_training
364@pytest.mark.platform_x86_ascend_training
365@pytest.mark.platform_x86_gpu_training
366@pytest.mark.env_onecard
367def test_setitem_by_mixed_tensors_0():
368    value = 88.0
369    net = TensorSetItemByMixedTensors_0(value)
370    index_0 = np.random.randint(3, size=(3, 4, 5))
371    index_1 = np.random.randint(4, size=(4, 5))
372    index_2 = np.random.randint(3, size=(2, 1, 4, 5))
373    index_0_ms = Tensor(index_0, mstype.int32)
374    index_1_ms = Tensor(index_1, mstype.int32)
375    index_2_ms = Tensor(index_2, mstype.int32)
376    input_np = np.arange(3 * 4 * 5).reshape((3, 4, 5)).astype(np.float32)
377    const = np.ones((3, 4, 5), np.float32)
378    out = net(index_0_ms, index_1_ms, index_2_ms)
379    input_np[0:2, index_0, index_1] = value
380    assert np.all(out.asnumpy() == (input_np + const))
381
382
383@pytest.mark.level0
384@pytest.mark.platform_arm_ascend_training
385@pytest.mark.platform_x86_ascend_training
386@pytest.mark.platform_x86_gpu_training
387@pytest.mark.env_onecard
388class TensorSetItemByMixedTensors_1(Cell):
389    def __init__(self, value):
390        super(TensorSetItemByMixedTensors_1, self).__init__()
391        self.const = Tensor(np.ones((3, 4, 5), np.float32))
392        self.param = Parameter(Tensor(np.arange(3 * 4 * 5).reshape((3, 4, 5)), mstype.float32),
393                               name="x")
394        self.value = value
395
396    def construct(self, index_0, index_1, index_2):
397        self.param[0:2, index_0, ...] = self.value
398        ret = self.param + self.const
399        return ret
400
401
402@pytest.mark.level1
403@pytest.mark.platform_arm_ascend_training
404@pytest.mark.platform_x86_ascend_training
405@pytest.mark.platform_x86_gpu_training
406@pytest.mark.env_onecard
407def test_setitem_by_mixed_tensors_1():
408    value = 88.0
409    net = TensorSetItemByMixedTensors_1(value)
410    index_0 = np.random.randint(3, size=(3, 4, 5))
411    index_1 = np.random.randint(4, size=(4, 5))
412    index_2 = np.random.randint(3, size=(2, 1, 4, 5))
413    index_0_ms = Tensor(index_0, mstype.int32)
414    index_1_ms = Tensor(index_1, mstype.int32)
415    index_2_ms = Tensor(index_2, mstype.int32)
416    input_np = np.arange(3 * 4 * 5).reshape((3, 4, 5)).astype(np.float32)
417    const = np.ones((3, 4, 5), np.float32)
418    out = net(index_0_ms, index_1_ms, index_2_ms)
419    input_np[0:2, index_0, ...] = value
420    assert np.all(out.asnumpy() == (input_np + const))
421
422
423@pytest.mark.level0
424@pytest.mark.platform_arm_ascend_training
425@pytest.mark.platform_x86_ascend_training
426@pytest.mark.platform_x86_gpu_training
427@pytest.mark.env_onecard
428class TensorSetItemByMixedTensors_2(Cell):
429    def __init__(self, value):
430        super(TensorSetItemByMixedTensors_2, self).__init__()
431        self.const = Tensor(np.ones((3, 4, 5), np.float16))
432        self.param = Parameter(Tensor(np.arange(3 * 4 * 5).reshape((3, 4, 5)), mstype.float16),
433                               name="x")
434        self.value = value
435
436    def construct(self, index_0, index_1, index_2):
437        self.param[..., index_0, 1] = self.value
438        ret = self.param + self.const
439        return ret
440
441
442@pytest.mark.level1
443@pytest.mark.platform_arm_ascend_training
444@pytest.mark.platform_x86_ascend_training
445@pytest.mark.platform_x86_gpu_training
446@pytest.mark.env_onecard
447def test_setitem_by_mixed_tensors_2():
448    value = 88.0
449    net = TensorSetItemByMixedTensors_2(value)
450    index_0 = np.random.randint(3, size=(3, 4, 5))
451    index_1 = np.random.randint(4, size=(4, 5))
452    index_2 = np.random.randint(3, size=(2, 1, 4, 5))
453    index_0_ms = Tensor(index_0, mstype.int32)
454    index_1_ms = Tensor(index_1, mstype.int32)
455    index_2_ms = Tensor(index_2, mstype.int32)
456    input_np = np.arange(3 * 4 * 5).reshape((3, 4, 5)).astype(np.float32)
457    const = np.ones((3, 4, 5), np.float32)
458    out = net(index_0_ms, index_1_ms, index_2_ms)
459    input_np[..., index_0, 1] = value
460    assert np.all(out.asnumpy() == (input_np + const))
461
462
463class TensorGetItemByMixedTensorsIndexError(Cell):
464    def construct(self, x, index_0, index_1):
465        ret = x[index_0, index_1, 0:3, ..., 0:5, [1, 2, 3, 4]]
466        return ret
467
468
469@pytest.mark.level1
470@pytest.mark.platform_arm_ascend_training
471@pytest.mark.platform_x86_ascend_training
472@pytest.mark.platform_x86_gpu_training
473@pytest.mark.env_onecard
474def test_getitem_by_mixed_tensor_exception():
475    input_ms = Tensor(np.arange(3 * 4 * 5 * 6 * 7 * 8 * 9).reshape((3, 4, 5, 6, 7, 8, 9)), mstype.int32)
476    index_0 = Tensor(np.random.randint(3, size=(3, 4, 5)), mstype.int32)
477    index_1 = Tensor(np.random.randint(4, size=(3, 4, 5)), mstype.int32)
478    net1 = TensorGetItemByMixedTensorsIndexError()
479    with pytest.raises(IndexError):
480        net1(input_ms, index_0, index_1)
481
482
483class TensorSetItemByOneTensorWithNumber(Cell):
484    def __init__(self, value):
485        super(TensorSetItemByOneTensorWithNumber, self).__init__()
486        self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
487        self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
488        self.value = value
489
490    def construct(self, index):
491        self.param[index] = self.value
492        ret = self.param + self.const
493        return ret
494
495
496@pytest.mark.level1
497@pytest.mark.platform_arm_ascend_training
498@pytest.mark.platform_x86_ascend_training
499@pytest.mark.platform_x86_gpu_training
500@pytest.mark.env_onecard
501def test_setitem_one_tensor_with_number():
502    value = 0.0
503    net = TensorSetItemByOneTensorWithNumber(value)
504    index_np = np.random.randint(4, size=(5, 4))
505    index = Tensor(index_np, mstype.int32)
506    input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8))
507    const = np.ones((6, 7, 8)).astype(np.float32)
508    out = net(index)
509    input_data[index_np] = value
510    assert np.all(out.asnumpy() == (input_data + const))
511
512
513class TensorSetItemByOneTensorWithTensor(Cell):
514    def __init__(self):
515        super(TensorSetItemByOneTensorWithTensor, self).__init__()
516        self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
517        self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
518
519    def construct(self, index, value):
520        self.param[index] = value
521        ret = self.param + self.const
522        return ret
523
524
525@pytest.mark.level1
526@pytest.mark.platform_arm_ascend_training
527@pytest.mark.platform_x86_ascend_training
528@pytest.mark.platform_x86_gpu_training
529@pytest.mark.env_onecard
530def test_setitem_by_one_tensor_with_tensor():
531    net = TensorSetItemByOneTensorWithTensor()
532    index_np = np.random.randint(4, size=(5, 4))
533    index = Tensor(index_np, mstype.int32)
534    input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8))
535    const = np.ones((6, 7, 8)).astype(np.float32)
536    value = np.zeros((4, 7, 8)).astype(np.float32)
537    value_ms = Tensor(value, mstype.float32)
538    out = net(index, value_ms)
539    input_data[index_np] = value
540    assert np.all(out.asnumpy() == (input_data + const))
541
542
543class TensorSetItemByOneTensorWithTupleOfNumber(Cell):
544    def __init__(self, value):
545        super(TensorSetItemByOneTensorWithTupleOfNumber, self).__init__()
546        self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
547        self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
548        self.value = value
549
550    def construct(self, index):
551        self.param[index] = self.value
552        ret = self.param + self.const
553        return ret
554
555
556@pytest.mark.level1
557@pytest.mark.platform_arm_ascend_training
558@pytest.mark.platform_x86_ascend_training
559@pytest.mark.platform_x86_gpu_training
560@pytest.mark.env_onecard
561def test_setitem_by_one_tensor_with_tuple_number():
562    value = (0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7)
563    net = TensorSetItemByOneTensorWithTupleOfNumber(value)
564    input_np = np.random.randint(5, size=(5, 4))
565    input_ms = Tensor(input_np, mstype.int32)
566    input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8)).astype(np.float32)
567    const = np.ones((6, 7, 8)).astype(np.float32)
568    out = net(input_ms)
569    input_data[input_np] = value
570    assert np.all(out.asnumpy() == (input_data + const))
571
572
573class TensorSetItemByOneTensorWithTupleOfTensor(Cell):
574    def __init__(self):
575        super(TensorSetItemByOneTensorWithTupleOfTensor, self).__init__()
576        self.const = Tensor(np.ones((6, 3, 8)), mstype.float32)
577        self.param = Parameter(Tensor(np.arange(6 * 3 * 8).reshape((6, 3, 8)), mstype.float32), name="x")
578
579    def construct(self, index, value_0, value_1, value_2):
580        self.param[index] = (value_0, value_1, value_2)
581        ret = self.param + self.const
582        return ret
583
584
585@pytest.mark.level1
586@pytest.mark.platform_arm_ascend_training
587@pytest.mark.platform_x86_ascend_training
588@pytest.mark.platform_x86_gpu_training
589@pytest.mark.env_onecard
590def test_setitem_by_one_tensor_with_tuple_tensors():
591    net = TensorSetItemByOneTensorWithTupleOfTensor()
592    input_np = np.random.randint(6, size=(5, 4)).astype(np.int32)
593    input_ms = Tensor(input_np, mstype.int32)
594    input_data = np.arange(6 * 3 * 8).reshape((6, 3, 8)).astype(np.float32)
595    value_0_np = np.zeros((8,), np.float32)
596    value_1_np = np.ones((8,), np.float32)
597    value_2_np = np.ones((8,), np.float32)*2
598    value_0 = Tensor(value_0_np)
599    value_1 = Tensor(value_1_np)
600    value_2 = Tensor(value_2_np)
601    const = np.ones((6, 3, 8)).astype(np.float32)
602    out = net(input_ms, value_0, value_1, value_2)
603    input_data[input_np] = (value_0_np, value_1_np, value_2_np)
604    assert np.all(out.asnumpy() == (input_data + const))
605
606
607class TensorSetItemByTensorsWithNumber(Cell):
608    def __init__(self, value):
609        super(TensorSetItemByTensorsWithNumber, self).__init__()
610        self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
611        self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
612        self.value = value
613
614    def construct(self, index_0, index_1, index_2):
615        self.param[index_0, index_1, index_2] = self.value
616        ret = self.param + self.const
617        return ret
618
619
620@pytest.mark.level1
621@pytest.mark.platform_arm_ascend_training
622@pytest.mark.platform_x86_ascend_training
623@pytest.mark.platform_x86_gpu_training
624@pytest.mark.env_onecard
625@pytest.mark.level0
626def test_setitem_by_tensors_with_number():
627    value = 0.0
628    net = TensorSetItemByTensorsWithNumber(value)
629    index_0 = np.random.randint(6, size=(3, 4, 5))
630    index_1 = np.random.randint(7, size=(4, 5))
631    index_2 = np.random.randint(8, size=(5, 3, 4, 5))
632    index_0_ms = Tensor(index_0, mstype.int32)
633    index_1_ms = Tensor(index_1, mstype.int32)
634    index_2_ms = Tensor(index_2, mstype.int32)
635    out = net(index_0_ms, index_1_ms, index_2_ms)
636    const = np.ones((6, 7, 8)).astype(np.float32)
637    input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8)).astype(np.float32)
638    input_data[index_0, index_1, index_2] = value
639    assert np.all(out.asnumpy() == (input_data + const))
640
641
642class TensorSetItemByTensorsWithTensor(Cell):
643    def __init__(self):
644        super(TensorSetItemByTensorsWithTensor, self).__init__()
645        self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
646        self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
647
648    def construct(self, index_0, index_1, index_2, value):
649        self.param[index_0, index_1, index_2] = value
650        ret = self.param + self.const
651        return ret
652
653
654@pytest.mark.level0
655@pytest.mark.platform_arm_ascend_training
656@pytest.mark.platform_x86_ascend_training
657@pytest.mark.platform_x86_gpu_training
658@pytest.mark.env_onecard
659def test_setitem_by_tensors_with_tensor():
660    net = TensorSetItemByTensorsWithTensor()
661    index_0 = np.random.randint(6, size=(3, 4, 5))
662    index_1 = np.random.randint(7, size=(4, 5))
663    index_2 = np.random.randint(8, size=(5, 3, 4, 5))
664    value = np.zeros((4, 5)).astype(np.float32)
665    index_0_ms = Tensor(index_0, mstype.int32)
666    index_1_ms = Tensor(index_1, mstype.int32)
667    index_2_ms = Tensor(index_2, mstype.int32)
668    value_ms = Tensor(value, mstype.float32)
669    out = net(index_0_ms, index_1_ms, index_2_ms, value_ms)
670    const = np.ones((6, 7, 8)).astype(np.float32)
671    input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8)).astype(np.float32)
672    input_data[index_0, index_1, index_2] = value
673    assert np.all(out.asnumpy() == (input_data + const))
674
675
676class TensorSetItemByTensorsWithTensorNumberError(Cell):
677    def __init__(self):
678        super(TensorSetItemByTensorsWithTensorNumberError, self).__init__()
679        self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
680        self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
681
682    def construct(self, index_0, index_1, index_2, index_3, value):
683        self.param[index_0, index_1, index_2, index_3] = value
684        ret = self.param + self.const
685        return ret
686
687
688@pytest.mark.level1
689@pytest.mark.platform_arm_ascend_training
690@pytest.mark.platform_x86_ascend_training
691@pytest.mark.platform_x86_gpu_training
692@pytest.mark.env_onecard
693def test_setitem_by_tensors_with_tensor_error():
694    index_0 = Tensor(np.random.randint(6, size=(3, 4, 5)), mstype.int32)
695    index_1 = Tensor(np.random.randint(7, size=(4, 5)), mstype.int32)
696    index_2 = Tensor(np.random.randint(8, size=(5, 3, 4, 5)), mstype.int32)
697    index_3 = Tensor(np.random.randint(8, size=(1, 3, 4, 5)), mstype.int32)
698    value = Tensor(np.zeros((2, 5)), mstype.float32)
699    net = TensorSetItemByTensorsWithTensorNumberError()
700    with pytest.raises(IndexError):
701        net(index_0, index_1, index_2, index_3, value)
702
703
704class TensorSetItemByTensorsWithTupleOfNumber(Cell):
705    def __init__(self, value):
706        super(TensorSetItemByTensorsWithTupleOfNumber, self).__init__()
707        self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
708        self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
709        self.value = value
710
711    def construct(self, index_0, index_1, index_2):
712        self.param[index_0, index_1, index_2] = self.value
713        ret = self.param + self.const
714        return ret
715
716
717@pytest.mark.level1
718@pytest.mark.platform_arm_ascend_training
719@pytest.mark.platform_x86_ascend_training
720# GPU op has bug, and has not been fixed.
721@pytest.mark.env_onecard
722def test_setitem_by_tensors_with_tuple_of_number():
723    value = (0.0, 1.1, 2.2, 3.3, 4.4)
724    net = TensorSetItemByTensorsWithTupleOfNumber(value)
725    index_0 = np.random.randint(6, size=(3, 4, 5))
726    index_1 = np.random.randint(7, size=(4, 5))
727    index_2 = np.random.randint(8, size=(5, 3, 4, 5))
728    index_0_ms = Tensor(index_0, mstype.int32)
729    index_1_ms = Tensor(index_1, mstype.int32)
730    index_2_ms = Tensor(index_2, mstype.int32)
731    input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8)).astype(np.float32)
732    input_data[index_0, index_1, index_2] = value
733    const = np.ones((6, 7, 8)).astype(np.float32)
734    out = net(index_0_ms, index_1_ms, index_2_ms)
735    assert np.all(out.asnumpy() == (input_data + const))
736
737
738class TensorSetItemByTensorsWithTupleOfTensor(Cell):
739    def __init__(self):
740        super(TensorSetItemByTensorsWithTupleOfTensor, self).__init__()
741        self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
742        self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
743
744    def construct(self, index_0, index_1, index_2, value_0, value_1, value_2):
745        self.param[index_0, index_1, index_2] = (value_0, value_1, value_2)
746        ret = self.param + self.const
747        return ret
748
749
750@pytest.mark.level1
751@pytest.mark.platform_arm_ascend_training
752@pytest.mark.platform_x86_ascend_training
753# GPU op has bug, and has not been fixed.
754@pytest.mark.env_onecard
755def test_setitem_by_tensors_with_tuple_of_tensor():
756    value_0 = np.zeros((4, 5))
757    value_1 = np.ones((4, 5))
758    value_2 = np.ones((4, 5)) * 2
759    value_0_ms = Tensor(value_0, mstype.float32)
760    value_1_ms = Tensor(value_1, mstype.float32)
761    value_2_ms = Tensor(value_2, mstype.float32)
762    net = TensorSetItemByTensorsWithTupleOfTensor()
763    index_0 = np.random.randint(6, size=(3, 4, 5))
764    index_1 = np.random.randint(7, size=(4, 5))
765    index_2 = np.random.randint(8, size=(5, 3, 4, 5))
766    index_0_ms = Tensor(index_0, mstype.int32)
767    index_1_ms = Tensor(index_1, mstype.int32)
768    index_2_ms = Tensor(index_2, mstype.int32)
769    input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8)).astype(np.float32)
770    input_data[index_0, index_1, index_2] = (value_0, value_1, value_2)
771    const = np.ones((6, 7, 8)).astype(np.float32)
772    out = net(index_0_ms, index_1_ms, index_2_ms, value_0_ms, value_1_ms, value_2_ms)
773    assert np.all(out.asnumpy() == (input_data + const))
774
775
776class TensorSetItemByTensorsWithTupleOfTensorNumberError(Cell):
777    def __init__(self):
778        super(TensorSetItemByTensorsWithTupleOfTensorNumberError, self).__init__()
779        self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)
780        self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name="x")
781
782    def construct(self, index_0, index_1, index_2, value_0, value_1):
783        self.param[index_0, index_1, index_2] = (value_0, value_1)
784        ret = self.param + self.const
785        return ret
786
787
788@pytest.mark.level1
789@pytest.mark.platform_arm_ascend_training
790@pytest.mark.platform_x86_ascend_training
791@pytest.mark.platform_x86_gpu_training
792@pytest.mark.env_onecard
793def test_setitem_by_tensor_with_tuple_of_tensor_error():
794    net = TensorSetItemByTensorsWithTupleOfTensorNumberError()
795    index_0_ms = Tensor(np.random.randint(6, size=(3, 4, 5)), mstype.int32)
796    index_1_ms = Tensor(np.random.randint(7, size=(4, 5)), mstype.int32)
797    index_2_ms = Tensor(np.random.randint(8, size=(5, 3, 4, 5)), mstype.int32)
798    value_0 = np.zeros((4, 5))
799    value_1 = np.ones((4, 5))
800    value_0_ms = Tensor(value_0, mstype.float32)
801    value_1_ms = Tensor(value_1, mstype.float32)
802    with pytest.raises(ValueError):
803        net(index_0_ms, index_1_ms, index_2_ms, value_0_ms, value_1_ms)
804
805
806@pytest.mark.level1
807@pytest.mark.platform_arm_ascend_training
808@pytest.mark.platform_x86_ascend_training
809@pytest.mark.platform_x86_gpu_training
810@pytest.mark.env_onecard
811def test_setitem_grad():
812    class Net(Cell):
813        def __init__(self):
814            super(Net, self).__init__()
815            self.weight = Parameter(
816                Tensor(np.ones([4, 4, 5]), dtype=mstype.float32), "b1", requires_grad=True)
817
818        def construct(self, a, b):
819            a[1:3:1, ::] = b
820            c = a + self.weight
821            return c
822
823    class GradNet(Cell):
824        def __init__(self, net):
825            super(GradNet, self).__init__()
826            self.net = net
827            self.weights = ParameterTuple(net.trainable_params())
828
829        def construct(self, x, y, sens):
830            return grad_by_list_with_sens(self.net, self.weights)(x, y, sens)
831    net = GradNet(Net())
832    x = Tensor(np.ones([4, 4, 5]).astype(np.float32), mstype.float32)
833    y = Tensor(np.array([3]).astype(np.float32), mstype.float32)
834    sens = Tensor(np.ones([4, 4, 5]).astype(np.float32), mstype.float32)
835    net(x, y, sens)
836
837
838class TensorAssignWithSliceError1(Cell):
839    def construct(self, a, b):
840        a[1:3:-1, ::] = b
841        return a
842
843
844class TensorAssignWithSliceError2(Cell):
845    def construct(self, a, b):
846        a[1:3:-1] = b
847        return a
848
849
850class TensorAssignWithSlice2(Cell):
851    def construct(self, a, b, ck):
852        a[1:5] = b
853        a[3:4] = 5
854        a[-1:1:-1] = b
855        a[-1:3:-1] = 5
856        a[::] = b
857        a[::] = 9
858        z = a + ck
859        return z
860
861
862class TensorAssignWithSlice(Cell):
863    def __init__(self):
864        super(TensorAssignWithSlice, self).__init__()
865        self.c = 2.0
866
867    def construct(self, a, b, ck):
868        a[1:3, ::] = b
869        a[2:3:, 3:] = b
870        a[::] = b
871        a[::] = self.c
872        a[::, ::] = b
873        a[::, ::] = self.c
874        a[2:3:, 0:, 4:1:-1] = b
875        a[2:3:, 0:, 4:1:-1] = self.c
876        z = a + ck
877        return z
878
879
880@pytest.mark.level1
881@pytest.mark.platform_arm_ascend_training
882@pytest.mark.platform_x86_ascend_training
883@pytest.mark.platform_x86_gpu_training
884@pytest.mark.env_onecard
885def test_tensor_assign_slice_value_1():
886    net = TensorAssignWithSlice()
887    a = np.arange(60).reshape(3, 4, 5)
888    b = np.array([1]).astype(np.float32)  # Tensor([1], dtype=mstype.float32)
889    ck = np.arange(60).reshape(3, 4, 5)
890    ta = Tensor(a, dtype=mstype.float32)
891    tb = Tensor(b, dtype=mstype.float32)
892    tck = Tensor(ck, dtype=mstype.float32)
893    out = net(ta, tb, tck)
894    a[1:3, ::] = b
895    a[2:3:, 3:] = b
896    a[::] = b
897    a[::] = 2.0
898    a[::, ::] = b
899    a[::, ::] = 2.0
900    a[2:3:, 0:, 4:1:-1] = b
901    a[2:3:, 0:, 4:1:-1] = 2.0
902    z = a + ck
903    assert np.all(z == out.asnumpy())
904
905
906@pytest.mark.level1
907@pytest.mark.platform_arm_ascend_training
908@pytest.mark.platform_x86_ascend_training
909@pytest.mark.platform_x86_gpu_training
910@pytest.mark.env_onecard
911def test_tensor_assign_slice_value_2():
912    net2 = TensorAssignWithSlice2()
913    a = np.array([1, 2, 3, 4, 5, 6, 7, 8])
914    ck = np.array([1, 2, 3, 4, 5, 6, 7, 8])
915    b = np.array([1]).astype(np.float32)  # Tensor([1], dtype=mstype.float32)
916    tb = Tensor(b, dtype=mstype.float32)
917    ta = Tensor(a, dtype=mstype.float32)
918    tck = Tensor(ck, dtype=mstype.float32)
919    out = net2(ta, tb, tck)
920    a[1:5] = b
921    a[3:4] = 5
922    a[-1:1:-1] = b
923    a[-1:3:-1] = 5
924    a[::] = b
925    a[::] = 9
926    z = a + ck
927    assert np.all(z == out.asnumpy())
928
929
930@pytest.mark.level1
931@pytest.mark.platform_arm_ascend_training
932@pytest.mark.platform_x86_ascend_training
933@pytest.mark.platform_x86_gpu_training
934@pytest.mark.env_onecard
935def test_tensor_assign_exception():
936    net = TensorAssignWithSlice()
937    net2 = TensorAssignWithSlice2()
938    # The test case is no longer appropriate since x[1:3:-1] = np.array(2) does
939    # not incur an error in numpy, which leaves the original array unchanged after
940    # the assign operation.
941    # net_e1 = TensorAssignWithSliceError1()
942    # net_e2 = TensorAssignWithSliceError2()
943    a = np.arange(60).reshape(3, 4, 5)
944    ck = np.arange(60).reshape(3, 4, 5)
945    b = Tensor([1], dtype=mstype.float32)
946    Ta = Tensor(a, dtype=mstype.float32)
947    Tck = Tensor(ck, dtype=mstype.float32)
948    Ta4d = Tensor(a.reshape(1, 3, 4, 5), dtype=mstype.float32)
949    Ta4d_ck = Tensor(ck.reshape(1, 3, 4, 5), dtype=mstype.float32)
950    Tb = Tensor([1, 3], dtype=mstype.float32)
951    Tc = Tensor([], dtype=mstype.float32)
952    t = Tensor([1, 2, 3, 4, 5, 6, 7, 8], dtype=mstype.float32)
953    tck = Tensor([1, 2, 3, 4, 5, 6, 7, 8], dtype=mstype.float32)
954    # Error for A[Slice] = Number
955    # 1. A[Slice] = Number,  Slice error
956    # with pytest.raises(ValueError):
957    #     net_e2(t, 2)
958
959    # Error for A[Slice] = U, U is a Tensor
960    # 1. A[Slice] = U,  u.size is error
961    with pytest.raises(ValueError):
962        net2(t, Tb, tck)
963    # 2. A[Slice] = U, U is empty
964    with pytest.raises(ValueError):
965        net2(t, Tc, tck)
966    # 3. A[Slice] = U, U.size error
967    with pytest.raises(ValueError):
968        net2(t, Tb, tck)
969
970    # Error for A[Tuple(Slice...)] = Tensor
971    # 1. A[Tuple(Slice...)] = U, U is empty
972    with pytest.raises(ValueError):
973        net(Ta, Tc, Tck)
974    # 2. A[Tuple(Slice...)] = U, U.size error
975    with pytest.raises(ValueError):
976        net(Ta, Tb, Tck)
977    # 3. A[Tuple(Slice...)] = U,  Slice error
978    # with pytest.raises(IndexError):
979    #     net_e1(Ta, b)
980
981    # Error for A[Tuple(Slice...)] = Number
982    # 1. A[Tuple(Slice...)] = Number,  Slice error
983    # with pytest.raises(IndexError):
984    #     net_e1(Ta, 2)
985
986    net = TensorAssignWithInteger()
987    # Error for A[Number] = scalar/Tensor
988    # 1. A[Number] = U, U is a Tensor, u.size not match
989    with pytest.raises(ValueError):
990        net(Ta, Tb, Tck)
991    with pytest.raises(ValueError):
992        net(Ta, Tc, Tck)
993    # 2. A[Number] = U, the number index error
994    with pytest.raises(IndexError):
995        net(Ta4d, b, Ta4d_ck)
996
997    # Error for A[(n,m)] = scalar/Tensor
998    # 1. A[(n,m)] = U, U is a tensor. u.size not match
999    net = TensorAssignWithTupleInteger()
1000    with pytest.raises(ValueError):
1001        net(Ta, Tc, Tck)
1002    with pytest.raises(ValueError):
1003        net(Ta, Tb, Tck)
1004    # 2. A[(n,m)] = U, the number index error
1005    with pytest.raises(IndexError):
1006        net(Ta4d, b, Ta4d_ck)
1007
1008    # Error for  A[...] = U or A[1:, ...] = u
1009    # 1. A[...] = scalar/tensor
1010    net = TensorAssignWithEllipsis()
1011    net(Ta, Ta4d)
1012    with pytest.raises(ValueError):
1013        net(Ta, Tc)
1014    with pytest.raises(ValueError):
1015        net(Ta, Tb)
1016    # 2. A[::, 1:, ...] = scalar/tensor
1017    net = TensorAssignWithTupleEllipsis()
1018    net(Ta, b)
1019    with pytest.raises(ValueError):
1020        net(Ta, Tb)
1021
1022
1023class TensorAssignWithTupleEllipsis2(Cell):
1024    def construct(self, a, b):
1025        a[1:, ..., ::] = b
1026        return a
1027
1028
1029class TensorAssignWithTupleEllipsis(Cell):
1030    def construct(self, a, b):
1031        a[:2, ...] = 1.0
1032        a[1:, ...] = b
1033        return a
1034
1035
1036class TensorAssignWithEllipsis(Cell):
1037    def construct(self, a, b):
1038        a[...] = 1
1039        a[...] = b
1040        return a
1041
1042
1043class TensorAssignWithInteger(Cell):
1044    def construct(self, a, b, ck):
1045        a[1] = 1
1046        a[0] = b
1047        z = a + ck
1048        return z
1049
1050
1051class TensorAssignWithTupleInteger(Cell):
1052    def construct(self, a, b, ck):
1053        a[(1)] = 1
1054        a[(1)] = b
1055        a[(1, 1)] = b
1056        a[(1, 1)] = 1
1057        z = a + ck
1058        return z
1059
1060
1061class TensorAssignWithBoolTensorIndex(Cell):
1062    def __init__(self):
1063        super(TensorAssignWithBoolTensorIndex, self).__init__()
1064        self.t = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)
1065        self.u_scalar = 5
1066
1067    def construct(self, a, b, c, u_tensor):
1068        a[c] = self.u_scalar
1069        a[b] = u_tensor
1070        z = a + self.t
1071        return z
1072
1073
1074class TensorAssignWithBoolTensorIndexError(Cell):
1075    def construct(self, a, b, c, u_tensor):
1076        a[b][c] = u_tensor
1077        return a
1078
1079
1080class TensorAssignWithBoolTensorIndex2(Cell):
1081    def __init__(self):
1082        super(TensorAssignWithBoolTensorIndex2, self).__init__()
1083        self.t = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)
1084        self.u_scalar = 5
1085
1086    def construct(self, a, u_tensor):
1087        a[a > 8] = u_tensor
1088        a[a >= 6] = self.u_scalar
1089        a[a < 3] = self.u_scalar
1090        a[a <= 5] = u_tensor
1091        a[a == 5] = self.u_scalar
1092        z = a + self.t
1093        return z
1094
1095
1096class TensorAssignWithBoolTensorIndex2Error(Cell):
1097    def construct(self, a, u_tensor):
1098        a[a > 8][a > 5] = u_tensor
1099        return a
1100
1101
1102@pytest.mark.level1
1103@pytest.mark.platform_arm_ascend_training
1104@pytest.mark.platform_x86_ascend_training
1105@pytest.mark.platform_x86_gpu_training
1106@pytest.mark.env_onecard
1107def test_tensor_assign_bool_index_0():
1108    a = np.arange(60).reshape(3, 4, 5)
1109    b = a > 5
1110    c = a < 3
1111    Ta = Tensor(a, dtype=mstype.float32)
1112    Tb = Tensor(b)
1113    Tc = Tensor(c)
1114    u_tensor = Tensor([1], dtype=mstype.float32)
1115    net1 = TensorAssignWithBoolTensorIndex()
1116    out = net1(Ta, Tb, Tc, u_tensor)
1117    res = np.arange(60).reshape(3, 4, 5)
1118    res[c] = 5
1119    res[b] = 1
1120    res = res + np.ones([3, 4, 5])
1121    assert np.all(out.asnumpy() == res)
1122
1123
1124@pytest.mark.level1
1125@pytest.mark.platform_arm_ascend_training
1126@pytest.mark.platform_x86_ascend_training
1127@pytest.mark.platform_x86_gpu_training
1128@pytest.mark.env_onecard
1129def test_tensor_assign_bool_index_1():
1130    a = np.arange(60).reshape(3, 4, 5)
1131    Ta = Tensor(a, dtype=mstype.float32)
1132    u_tensor = Tensor([1], dtype=mstype.float32)
1133    net2 = TensorAssignWithBoolTensorIndex2()
1134    out = net2(Ta, u_tensor)
1135    res = np.arange(60).reshape(3, 4, 5)
1136    res[res > 8] = 1
1137    res[res >= 6] = 5
1138    res[res < 3] = 5
1139    res[res <= 5] = 1
1140    res[res == 5] = 5
1141    res = res + np.ones([3, 4, 5])
1142    assert np.all(out.asnumpy() == res)
1143
1144
1145@pytest.mark.level1
1146@pytest.mark.platform_arm_ascend_training
1147@pytest.mark.platform_x86_ascend_training
1148@pytest.mark.platform_x86_gpu_training
1149@pytest.mark.env_onecard
1150def test_tensor_assign_bool_index_exception():
1151    a = np.arange(60).reshape(3, 4, 5)
1152    b = a > 5
1153    c = a < 3
1154    Ta = Tensor(a, dtype=mstype.float32)
1155    Tb = Tensor(b)
1156    Tc = Tensor(c)
1157    Td = Tensor([True, True])
1158    u_tensor = Tensor([1], dtype=mstype.float32)
1159    u_tensor_error = Tensor([1, 2], dtype=mstype.float32)
1160    u_scalar = 5
1161    net1 = TensorAssignWithBoolTensorIndex()
1162    net2 = TensorAssignWithBoolTensorIndex2()
1163    with pytest.raises(ValueError):
1164        net1(Ta, Td, Tc, u_tensor)
1165    with pytest.raises(IndexError):
1166        net1(Ta, u_tensor, Tc, u_tensor)
1167    with pytest.raises(ValueError):
1168        net1(Ta, Tb, Td, u_tensor)
1169    with pytest.raises(IndexError):
1170        net1(Ta, Tb, Ta, u_tensor)
1171    with pytest.raises(ValueError):
1172        net1(Ta, Tb, Tc, u_tensor_error)
1173    # net1(Ta, u_tensor, Tc, u_tensor_error, u_scalar)
1174    with pytest.raises(ValueError):
1175        net2(Ta, u_tensor_error)
1176    net3 = TensorAssignWithBoolTensorIndexError()
1177    with pytest.raises(IndexError):
1178        net3(Ta, Tb, Tc, u_tensor)
1179    with pytest.raises(IndexError):
1180        net3(Ta, Tb, Tc, u_scalar)
1181    net4 = TensorAssignWithBoolTensorIndex2Error()
1182    with pytest.raises(IndexError):
1183        net4(Ta, u_tensor)
1184    with pytest.raises(IndexError):
1185        net4(Ta, u_scalar)
1186
1187
1188@pytest.mark.level0
1189@pytest.mark.platform_arm_ascend_training
1190@pytest.mark.platform_x86_ascend_training
1191@pytest.mark.platform_x86_gpu_training
1192@pytest.mark.env_onecard
1193def test_tensor_slice_reduce_out_of_bounds_neg():
1194    class NetWork(Cell):
1195        def __init__(self):
1196            super(NetWork, self).__init__()
1197            self.tensor_ret = Tensor(np.array(9, np.int32))
1198
1199        def construct(self, tensor):
1200            ret = tensor[-7, 3, 4]
1201            return ret
1202
1203    input_tensor = Tensor(np.ones([6, 8, 10], np.int32))
1204    net = NetWork()
1205    with pytest.raises(IndexError) as ex:
1206        net(input_tensor)
1207    assert "'begin' should be in [-6, 6) when 'shrink_axis_mask' is greater than 0, " \
1208           "but got 'shrink_axis_mask': 7, 'strides': 1, 'begin': -7." in str(ex.value)
1209
1210
1211@pytest.mark.level1
1212@pytest.mark.platform_arm_ascend_training
1213@pytest.mark.platform_x86_ascend_training
1214@pytest.mark.platform_x86_gpu_training
1215@pytest.mark.env_onecard
1216def test_tensor_slice_reduce_out_of_bounds_positive():
1217    class NetWork(Cell):
1218        def __init__(self):
1219            super(NetWork, self).__init__()
1220            self.tensor_ret = Tensor(np.array(9, np.int32))
1221
1222        def construct(self, tensor):
1223            ret = tensor[6, 3, 4]
1224            return ret
1225
1226    input_tensor = Tensor(np.ones([6, 8, 10], np.int32))
1227    net = NetWork()
1228    with pytest.raises(IndexError) as ex:
1229        net(input_tensor)
1230    assert "'begin' should be in [-6, 6) when 'shrink_axis_mask' is greater than 0, " \
1231           "but got 'shrink_axis_mask': 7, 'strides': 1, 'begin': 6." in str(ex.value)
1232
1233
1234@pytest.mark.level0
1235@pytest.mark.platform_arm_ascend_training
1236@pytest.mark.platform_x86_ascend_training
1237@pytest.mark.platform_x86_gpu_training
1238@pytest.mark.env_onecard
1239def test_tensor_range():
1240    a = np.arange(4*5*6).reshape(4, 5, 6).astype(np.float32)
1241    ta = Tensor(a, mstype.float32)
1242    ms_out = []
1243    for item in ta:
1244        ms_out.append(item)
1245    np_out = []
1246    for item in a:
1247        np_out.append(item)
1248    for i, elem in enumerate(ms_out):
1249        assert np.all(elem.asnumpy() == np_out[i])
1250