• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2020-2021 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15"""array operations, the function docs are adapted from Numpy API."""
16from __future__ import absolute_import
17
18import operator
19
20from mindspore.common import dtype as mstype
21from mindspore.common import Tensor, mutable
22from mindspore.ops import operations as P
23from mindspore.ops import functional as F
24from mindspore.ops.primitive import constexpr, _primexpr
25from mindspore.nn import Cell
26from mindspore import ops
27
28from mindspore.numpy.utils import _convert_list_tensor_to_tuple_tensor, _expand, _broadcast_to_shape, \
29    _check_input_tensor, _broadcast_to, _to_tensor, _callable
30from mindspore.numpy.utils_const import _check_axes_range, _check_start_normalize, \
31    _raise_type_error, _raise_value_error, _infer_out_shape, _empty, _promote, \
32    _check_same_type, _check_axis_valid, _add_unit_axes, _broadcast_tuples, \
33    _check_is_float, _check_axis_in_range, _check_axis_type, _canonicalize_axis, \
34    _list_comprehensions, _check_element_int, _is_shape_empty, _type_convert, \
35    _tuple_slice, _expanded_shape, _seq_prod, _tuple_setitem, _iota, \
36    _raise_unimplemented_error, _cumprod, _get_device, _check_is_int
37
38
39# According to official numpy reference, the dimension of a numpy array must be less
40# than 32
41MAX_NUMPY_DIMS = 32
42
43
44def expand_dims(a, axis):
45    """
46    Expands the shape of a tensor.
47
48    Inserts a new axis that will appear at the axis position in the expanded tensor shape.
49
50    Args:
51        a (Tensor): Input tensor array.
52        axis (Union[int, list(int), tuple(int)]): Position in the expanded axes where
53            the new axis is placed,
54
55    Returns:
56        Tensor, with the number of dimensions increased at specified axis.
57
58    Raises:
59        TypeError: If input arguments have types not specified above.
60        ValueError: If axis exceeds a.ndim.
61
62    Supported Platforms:
63        ``Ascend`` ``GPU`` ``CPU``
64
65    Examples:
66        >>> import mindspore.numpy as np
67        >>> x = np.ones((2,2))
68        >>> x = np.expand_dims(x,0)
69        >>> print(x.shape)
70        (1, 2, 2)
71    """
72    _check_input_tensor(a)
73    if not isinstance(axis, (int, tuple, list)):
74        _raise_type_error("axis must be tuple, list or int, but got ", axis)
75    if isinstance(axis, int):
76        return F.expand_dims(a, axis)
77    ndim = a.ndim + len(axis)
78    axis = _canonicalize_axis(axis, ndim)
79    for ax in axis:
80        a = F.expand_dims(a, ax)
81    return a
82
83
84def squeeze(a, axis=None):
85    """
86    Removes single-dimensional entries from the shape of a tensor.
87
88    Args:
89        a (Tensor): Input tensor array.
90        axis (Union[None, int, list(int), tuple(list)]): The axis(axes) to squeeze,
91            default: ``None`` .
92
93    Returns:
94        Tensor, with all or a subset of the dimensions of length :math:`1` removed.
95
96    Raises:
97        TypeError: If input arguments have types not specified above.
98        ValueError: If specified axis has shape entry :math:`> 1`.
99
100    Supported Platforms:
101        ``Ascend`` ``GPU`` ``CPU``
102
103    Examples:
104        >>> import mindspore.numpy as np
105        >>> x = np.ones((1,2,2,1))
106        >>> x = np.squeeze(x)
107        >>> print(x.shape)
108        (2, 2)
109    """
110    _check_input_tensor(a)
111    return a.squeeze(axis)
112
113
114def transpose(a, axes=None):
115    """
116    Reverses or permutes the axes of a tensor; returns the modified tensor.
117
118    Args:
119        a (Tensor): a tensor to be transposed
120        axes (Union[None, tuple, list]): the axes order, if `axes` is `None`, transpose
121            the entire tensor. Default: ``None`` .
122
123    Returns:
124        Tensor, the transposed tensor array.
125
126    Raises:
127        TypeError: If input arguments have types not specified above.
128        ValueError: If the number of `axes` is not equal to a.ndim.
129
130    Supported Platforms:
131        ``Ascend`` ``GPU`` ``CPU``
132
133    Examples:
134        >>> import mindspore.numpy as np
135        >>> x = np.ones((1,2,3))
136        >>> x = np.transpose(x)
137        >>> print(x.shape)
138        (3, 2, 1)
139    """
140    _check_input_tensor(a)
141    return a.transpose(axes)
142
143
144def rollaxis(x, axis, start=0):
145    """
146    Rolls the specified axis backwards, until it lies in the given position.
147    The positions of the other axes do not change relative to one another.
148
149    Args:
150        x (Tensor): A Tensor to be transposed.
151        axis (int): The axis to be rolled.
152        start (int): Default: ``0`` .
153            If :math:`start <= axis`, the axis is rolled back until it lies in this position (`start`).
154            If :math:`start > axis`: the axis is rolled until it lies before this position (`start`).
155            If :math:`start < 0`, the start will be normalized as a non-negative number (more details
156            can be seen in the source code.)
157
158            .. table
159                +===========+=================+
160                |start      |Normalized start |
161                +===========+=================+
162                |-(x.ndim+1)| raise ValueError|
163                +-----------+-----------------+
164                |-x.ndim    |0                |
165                +-----------+-----------------+
166                |...        |...              |
167                +-----------+-----------------+
168                |-1         |x.ndim-1         |
169                +-----------+-----------------+
170                |...        |...              |
171                +-----------+-----------------+
172                |x.ndim     |x.ndim           |
173                +-----------+-----------------+
174                |x.ndim+1   |raise ValueError |
175                +===========+=================+
176            ..
177
178    Returns:
179        Transposed Tensor. Has the same data type as the original tensor `x`.
180
181    Supported Platforms:
182        ``Ascend`` ``GPU`` ``CPU``
183
184    Raises:
185        TypeError: If `axis` or `start` is not integer, or `x` is not tensor.
186        ValueError: If `axis` is not in the range of :math:`[-ndim, ndim-1]` or
187            `start` is not in the range of :math:`[-ndim, ndim]`.
188
189    Examples:
190        >>> import mindspore.numpy as np
191        >>> x = np.ones((2,3,4))
192        >>> output = np.rollaxis(x, 0, 2)
193        >>> print(output.shape)
194        (3, 2, 4)
195    """
196    _check_input_tensor(x)
197    if not isinstance(axis, int):
198        _raise_type_error("integer argument expected, but got ", axis)
199    if not isinstance(start, int):
200        _raise_type_error("integer argument expected, but got ", start)
201
202    shape = F.shape(x)
203    ndim = F.tuple_len(shape)
204
205    axis = _check_axes_range(axis, ndim)
206    start = _check_start_normalize(start, ndim)
207    if start - axis >= 0 and start - axis <= 1:
208        return x
209    perm = F.make_range(0, ndim)
210    new_perm = None
211    if start < axis:
212        if axis + 1 < ndim:
213            new_perm = perm[0:start] + perm[axis:axis + 1] + \
214                perm[start:axis] + perm[axis+1:]
215        else:
216            new_perm = perm[0:start] + perm[axis:axis + 1] + perm[start:axis]
217    if start > axis:
218        if start < ndim:
219            new_perm = perm[0:axis] + perm[axis + 1:start] + \
220                perm[axis:axis + 1] + perm[start:]
221        else:
222            new_perm = perm[0:axis] + perm[axis+1:start] + \
223                perm[axis:axis + 1]
224
225    return F.transpose(x, new_perm)
226
227
228def swapaxes(x, axis1, axis2):
229    """
230    Interchanges two axes of a tensor.
231
232    Args:
233        x (Tensor): A tensor to be transposed.
234        axis1 (int): First axis.
235        axis2 (int): Second axis.
236
237    Returns:
238        Transposed tensor, has the same data type as the original tensor `x`.
239
240    Raises:
241        TypeError: If `axis1` or `axis2` is not integer, or `x` is not tensor.
242        ValueError: If `axis1` or `axis2` is not in the range of :math:`[-ndim, ndim-1]`.
243
244    Supported Platforms:
245        ``Ascend`` ``GPU`` ``CPU``
246
247    Examples:
248        >>> import mindspore.numpy as np
249        >>> x = np.ones((2,3,4))
250        >>> output = np.swapaxes(x, 0, 2)
251        >>> print(output.shape)
252        (4,3,2)
253    """
254    _check_input_tensor(x)
255    return x.swapaxes(axis1, axis2)
256
257
258def reshape(x, new_shape):
259    """
260    Reshapes a tensor without changing its data.
261
262    Args:
263        x (Tensor): A tensor to be reshaped.
264        new_shape (Union[int, list(int), tuple(int)]): The new shape should be
265            compatible with the original shape. If the tuple has only one element,
266            the result will be a 1-D tensor of that length. One shape dimension
267            can be :math:`-1`. In this case, the value is inferred from the length of
268            the tensor and remaining dimensions.
269
270    Returns:
271        Reshaped Tensor. Has the same data type as the original tensor `x`.
272
273    Raises:
274        TypeError: If new_shape is not integer, list or tuple, or `x` is not tensor.
275        ValueError: If new_shape is not compatible with the original shape.
276
277    Supported Platforms:
278        ``Ascend`` ``GPU`` ``CPU``
279
280    Examples:
281        >>> import mindspore.numpy as np
282        >>> x = np.asarray([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]])
283        >>> output = np.reshape(x, (3, 2))
284        >>> print(output)
285        [[-0.1  0.3]
286         [ 3.6  0.4]
287         [ 0.5 -3.2]]
288        >>> output = np.reshape(x, (3, -1))
289        >>> print(output)
290        [[-0.1  0.3]
291         [ 3.6  0.4]
292         [ 0.5 -3.2]]
293        >>> output = np.reshape(x, (6, ))
294        >>> print(output)
295        [-0.1  0.3  3.6  0.4  0.5 -3.2]
296    """
297    _check_input_tensor(x)
298    return x.reshape(new_shape)
299
300
301def ravel(x):
302    """
303    Returns a contiguous flattened tensor.
304
305    A 1-D tensor, containing the elements of the input, is returned.
306
307    Args:
308        x (Tensor): A tensor to be flattened.
309
310    Returns:
311        Flattened tensor, has the same data type as the original tensor `x`.
312
313    Raises:
314        TypeError: If `x` is not tensor.
315
316    Supported Platforms:
317        ``Ascend`` ``GPU`` ``CPU``
318
319    Examples:
320        >>> import mindspore.numpy as np
321        >>> x = np.ones((2,3,4))
322        >>> output = np.ravel(x)
323        >>> print(output.shape)
324        (24,)
325    """
326    _check_input_tensor(x)
327    return x.ravel()
328
329
330@_primexpr
331def _move_axes_for_concatenate(arr_shape, axis):
332    """
333    Moves axis 0 to the desiganated position, while keeps other axes' relative
334    positions unchanged, only used if a single tensor is concatenated.
335    """
336
337    original_axes = tuple(range(len(arr_shape)))
338    new_axes = original_axes[1:axis + 1] + (0,) + original_axes[axis + 1:]
339    new_shape = arr_shape[1:axis + 1] + (arr_shape[0] * arr_shape[axis + 1],) + \
340        arr_shape[axis + 2:]
341    return new_axes, new_shape
342
343
344def _promote_type_for_concatenate(tuple_of_tensors):
345    """
346    Checks dtype for all tensors in the tuple. If dtypes are not the same, promote
347    them to the `highest` dtype in the tuple, so that they are ready for the concat
348    operator.
349
350    Args:
351        tuple_of_tensors(tuple(tensor)): A tuple of tensors
352
353    Returns:
354        tuple of tensors, with each tensor promoted to ths same dtype.
355    """
356    need_cast = False
357    final_type = tuple_of_tensors[0].dtype
358
359    for tensor in tuple_of_tensors:
360        if not _check_same_type(final_type, tensor.dtype):
361            need_cast = True
362        final_type = _promote(final_type, tensor.dtype)
363
364    if not need_cast:
365        return tuple_of_tensors
366    tuple_of_casted_tensors = ()
367    for tensor in tuple_of_tensors:
368        tuple_of_casted_tensors += (tensor.astype(final_type, False),)
369    return tuple_of_casted_tensors
370
371
372def concatenate(arrays, axis=0):
373    """
374    Joins a sequence of tensors along an existing axis.
375
376    Note:
377        To match Numpy behaviour, :math:`axis >= 32` will not cause value error, the
378        `axis` will be treated as ``None`` instead.
379
380    Args:
381        arrays (Union[Tensor, tuple(Tensor), list(Tensor)]): a tensor or a list
382            of tensors to be concatenated.
383        axis (Union[None, int], optional): The axis along which the tensors will be joined,
384            if `axis` is ``None``, tensors are flattened before use. Default: ``0`` .
385
386    Returns:
387        A tensor concatenated from a tensor or a list of tensors.
388
389    Raises:
390        TypeError: If input arguments have types not specified above.
391        ValueError: If `axis` is not in the range of :math:`[-ndim, ndim-1]`, and less than 32.
392
393    Supported Platforms:
394        ``Ascend`` ``GPU`` ``CPU``
395
396    Examples:
397        >>> import mindspore.numpy as np
398        >>> x1 = np.ones((1,2,3))
399        >>> x2 = np.ones((1,2,1))
400        >>> x = np.concatenate((x1, x2), axis=-1)
401        >>> print(x.shape)
402        (1, 2, 4)
403    """
404    if isinstance(arrays, Tensor):
405        # if only one tensor is provided, it is treated as a tuple along the
406        # first dimension. For example, a tensor of shape (3,4,5) will be treated
407        # as: tuple(tensor_1(4,5), tensor_2(4,5), tensor_3(4,5))
408        if axis is None or axis >= MAX_NUMPY_DIMS:
409            return ravel(arrays)
410        arr_shape = F.shape(arrays)
411        _check_axes_range((axis,), len(arr_shape))
412        # move axis 0 to the disiganated position, while keep other axes' relative
413        # positions unchanged
414        new_axes, new_shape = _move_axes_for_concatenate(arr_shape, axis)
415        arrays = transpose(arrays, new_axes)
416        arrays = reshape(arrays, new_shape)
417        return arrays
418
419    flattened_arrays = ()
420    if axis is None or axis >= MAX_NUMPY_DIMS:
421        for arr in arrays:
422            flattened_arrays += (ravel(arr),)
423        axis = -1
424        flattened_arrays = _promote_type_for_concatenate(flattened_arrays)
425        return P.Concat(axis)(flattened_arrays)
426
427    # convert a list of tensor to a tuple of tensor
428    arrays = _convert_list_tensor_to_tuple_tensor(arrays)
429
430    arr_shape = F.shape(arrays[0])
431    _check_axes_range((axis,), len(arr_shape))
432
433    # if only one tensor in the tuple/list, return the tensor itself
434    if len(arrays) == 1:
435        return arrays[0]
436
437    arrays = _promote_type_for_concatenate(arrays)
438    return P.Concat(axis)(arrays)
439
440
441def append(arr, values, axis=None):
442    """
443    Appends values to the end of a tensor.
444
445    Args:
446        arr (Tensor): Values are appended to a copy of this tensor.
447        values (Tensor): These values are appended to a copy of `arr`. It must be of
448            the correct shape (the same shape as `arr`, excluding `axis`). If `axis` is
449            not specified, `values` can be any shape and will be flattened before use.
450        axis (None, int, optional): The `axis` along which values are appended. If `axis` is not
451            given, both `arr` and `values` are flattened before use, default is ``None``.
452
453    Returns:
454        Tensor, a copy of tensor with values appended to axis.
455
456    Raises:
457        TypeError: If input arguments have types not specified above.
458        ValueError: If specified axis exceeds `arr.ndim`.
459
460    Supported Platforms:
461        ``Ascend`` ``GPU`` ``CPU``
462
463    Examples:
464        >>> import mindspore.numpy as np
465        >>> a = np.ones((2, 3))
466        >>> b = np.ones((2, 1))
467        >>> print(np.append(a, b, axis=1).shape)
468        (2, 4)
469    """
470    _check_input_tensor(arr)
471    _check_input_tensor(values)
472    if axis is None:
473        arr = arr.ravel()
474        values = values.ravel()
475    else:
476        _check_axis_in_range(axis, arr.ndim)
477    if F.rank(arr) != F.rank(values):
478        _raise_value_error("all tensors must have same number of dimensions")
479    return concatenate((arr, values), axis)
480
481
482def column_stack(tup):
483    """
484    Stacks 1-D tensors as columns into a 2-D tensor. 2-D tensors are stacked as-is,
485    like np.hstack.
486
487    Args:
488        tup (Union[Tensor, tuple, list]): A sequence of 1-D or 2-D tensors. All
489            of them must have the same shape except the axis to be concatenated.
490
491    Returns:
492        2-D Tensor, formed by stacking the given tensors.
493
494    Supported Platforms:
495        ``Ascend`` ``GPU`` ``CPU``
496
497    Raises:
498        TypeError: If `tup` is not Tensor, list or tuple.
499        ValueError: If `tup` is empty.
500
501    Examples:
502        >>> import mindspore.numpy as np
503        >>> x1 = np.array([1, 2, 3]).astype('int32')
504        >>> x2 = np.array([4, 5, 6]).astype('int32')
505        >>> output = np.column_stack((x1, x2))
506        >>> print(output)
507        [[1 4]
508         [2 5]
509         [3 6]]
510    """
511    if isinstance(tup, Tensor):
512        return tup
513    if not isinstance(tup, (list, tuple)):
514        _raise_type_error("Tensor or, list or tuple of tensors are required, but got ", tup)
515
516    trans_tup = ()
517    for tensor in tup:
518        if tensor.ndim < 1:
519            tensor = F.expand_dims(tensor, 0)
520        if tensor.ndim == 1:
521            tensor = F.expand_dims(tensor, 1)
522        trans_tup += (tensor,)
523    if not trans_tup:
524        _raise_value_error("Need at least one tensor to concatenate.")
525    return P.Concat(1)(trans_tup)
526
527
528def vstack(tup):
529    """
530    Stacks tensors in sequence vertically.
531    This is equivalent to concatenation along the first axis. 1-D tensors should firstly be reshaped to `(1, N)`,
532    and then be concatenated along the first axis.
533
534    Args:
535        tup (Union[Tensor, tuple, list]): A sequence of 1-D or 2-D tensors. The tensors must have the same shape
536            along all but the first axis. 1-D tensors must have the same shape.
537
538    Returns:
539        Stacked Tensor, formed by stacking the given tensors.
540
541    Supported Platforms:
542        ``Ascend`` ``GPU`` ``CPU``
543
544    Raises:
545        TypeError: If `tup` is not Tensor, list or tuple.
546        ValueError: If `tup` is empty.
547
548    Examples:
549        >>> import mindspore.numpy as np
550        >>> x1 = np.array([1, 2, 3]).astype('int32')
551        >>> x2 = np.array([4, 5, 6]).astype('int32')
552        >>> output = np.vstack((x1, x2))
553        >>> print(output)
554        [[1 2 3]
555         [4 5 6]]
556    """
557    if isinstance(tup, Tensor):
558        return tup
559    if not isinstance(tup, (list, tuple)):
560        _raise_type_error("Tensor or, list or tuple of tensors are required, but got", tup)
561
562    trans_tup = ()
563    for tensor in tup:
564        if tensor.ndim <= 1:
565            tensor = _expand(tensor, 2, 0)
566        trans_tup += (tensor,)
567    if not trans_tup:
568        _raise_value_error("Need at least one tensor to concatenate.")
569    return P.Concat(0)(trans_tup)
570
571
572def hstack(tup):
573    """
574    Stacks tensors in sequence horizontally.
575    This is equivalent to concatenation along the second axis, except for 1-D tensors
576    where it concatenates along the first axis.
577
578    Args:
579        tup (Union[Tensor, tuple, list]): A sequence of 1-D or 2-D tensors. The
580            tensors must have the same shape along all but the second axis, except
581            1-D tensors which can be any length.
582
583    Returns:
584        Stacked Tensor, formed by stacking the given tensors.
585
586    Supported Platforms:
587        ``Ascend`` ``GPU`` ``CPU``
588
589    Raises:
590        TypeError: If `tup` is not Tensor, list or tuple.
591        ValueError: If `tup` is empty.
592
593    Examples:
594        >>> import mindspore.numpy as np
595        >>> x1 = np.array([1, 2, 3]).astype('float32')
596        >>> x2 = np.array([4, 5, 6]).astype('float32')
597        >>> output = np.hstack((x1, x2))
598        >>> print(output)
599        [1. 2. 3. 4. 5. 6.]
600    """
601    if isinstance(tup, Tensor):
602        return tup
603    if not isinstance(tup, (list, tuple)):
604        _raise_type_error("Tensor or, list or tuple of tensors are required, but got", tup)
605
606    tuple_of_tensor = ()
607    for tensor in tup:
608        if tensor.ndim < 1:
609            tensor = F.expand_dims(tensor, 0)
610        tuple_of_tensor += (tensor,)
611    if not tuple_of_tensor:
612        _raise_value_error("Need at least one tensor to concatenate.")
613    if tuple_of_tensor[0].ndim <= 1:
614        return P.Concat(0)(tuple_of_tensor)
615    return P.Concat(1)(tuple_of_tensor)
616
617
618def dstack(tup):
619    """
620    Stacks tensors in sequence depth wise (along the third axis).
621    This is equivalent to concatenation along the third axis. 1-D tensors :math:`(N,)` should be
622    reshaped to :math:`(1,N,1)`.
623    2-D tensors :math:`(M,N)` should be reshaped to :math:`(M,N,1)` before concatenation.
624
625    Args:
626        tup (Union[Tensor, tuple, list]): A sequence of tensors. The tensors must have the same shape along all but
627            the third axis. 1-D or 2-D tensors must have the same shape.
628
629    Returns:
630        Stacked Tensor, formed by stacking the given tensors.
631
632    Supported Platforms:
633        ``Ascend`` ``GPU`` ``CPU``
634
635    Raises:
636        TypeError: If `tup` is not Tensor, list or tuple.
637        ValueError: If `tup` is empty.
638
639    Examples:
640        >>> import mindspore.numpy as np
641        >>> x1 = np.array([1, 2, 3]).astype('float32')
642        >>> x2 = np.array([4, 5, 6]).astype('float32')
643        >>> output = np.dstack((x1, x2))
644        >>> print(output)
645        [[[1. 4.]
646          [2. 5.]
647          [3. 6.]]]
648    """
649    if isinstance(tup, Tensor):
650        return tup
651    if not isinstance(tup, (list, tuple)):
652        _raise_type_error("Tensor or list or tuple of tensors are required, but got", tup)
653
654    trans_tup = ()
655    for tensor in tup:
656        if tensor.ndim <= 1:
657            tensor = _expand(tensor, 2, 0)
658        if tensor.ndim == 2:
659            tensor = F.expand_dims(tensor, 2)
660        trans_tup += (tensor,)
661    if not trans_tup:
662        _raise_value_error("Need at least one tensor to concatenate.")
663    return P.Concat(2)(trans_tup)
664
665
666def where(condition, x=None, y=None):
667    """
668    Returns elements chosen from `x` or `y` depending on `condition`.
669
670    Note:
671        As nonzero is not supported, both `x` and `y` must be provided Tensor
672    input.
673
674    Args:
675        condition (Tensor): where True, yield `x`, otherwise yield `y`.
676        x (Tensor): Values from which to choose. Default: ``None`` .
677        y (Tensor): Values from which to choose. `x`, `y` and `condition` need
678            to be broadcastable to some shape. Default: ``None`` .
679
680    Returns:
681        Tensor or scalar, with elements from `x` where `condition` is True, and
682        elements from `y` elsewhere.
683
684    Raises:
685        ValueError: If operands cannot be broadcast.
686
687    Supported Platforms:
688        ``Ascend`` ``GPU`` ``CPU``
689
690    Examples:
691        >>> import mindspore.numpy as np
692        >>> condition = np.full((1, 1, 2), [False, True])
693        >>> x = np.full((1, 3, 2), 5)
694        >>> y = np.full((2, 1, 1), 7)
695        >>> output = np.where(condition, x, y)
696        >>> print(output)
697        [[[7 5]
698        [7 5]
699        [7 5]]
700        [[7 5]
701        [7 5]
702        [7 5]]]
703    """
704    condition, x, y = _to_tensor(condition, x, y)
705    # type promotes input tensors
706    dtype1 = F.dtype(x)
707    dtype2 = F.dtype(y)
708    dtype = _promote(dtype1, dtype2)
709    if not _check_same_type(dtype1, dtype):
710        x = F.cast(x, dtype)
711    if not _check_same_type(dtype2, dtype):
712        y = F.cast(y, dtype)
713    is_bool = _check_same_type(dtype1, mstype.bool_) and _check_same_type(dtype2, mstype.bool_)
714    if is_bool:
715        # select does not support bool type for x or y
716        x = F.cast(x, mstype.float32)
717        y = F.cast(y, mstype.float32)
718
719    dynamic = F.is_sequence_value_unknown(F.shape(condition)) or F.is_sequence_value_unknown(F.shape(x))\
720              or F.is_sequence_value_unknown(F.shape(y))
721    # As select op currently does not support broadcast, broadcasts input tensors
722    if not dynamic:
723        shape_out = _infer_out_shape(F.shape(condition),
724                                     F.shape(x), F.shape(y))
725        condition = _broadcast_to_shape(condition, shape_out)
726        x = _broadcast_to_shape(x, shape_out)
727        y = _broadcast_to_shape(y, shape_out)
728    else:
729        # Get the broadcast shape through broadcast calculation
730        add_x_y = x + y
731        add_out = condition + F.cast(add_x_y, condition.dtype)
732        shape_out = P.Shape()(add_out)
733        condition = ops.broadcast_to(condition, shape_out)
734        x = ops.broadcast_to(x, shape_out)
735        y = ops.broadcast_to(y, shape_out)
736
737    if not _check_same_type(F.dtype(condition), mstype.bool_):
738        condition = F.cast(condition, mstype.bool_)
739    res = F.select(condition, x, y)
740    if is_bool:
741        res = F.cast(res, mstype.bool_)
742    return res
743
744
745def _atleast_xd(ndim, arys):
746    """Returns arys with at least ndim."""
747    _check_input_tensor(*arys)
748    res = []
749    for arr in arys:
750        arr = _expand(arr, ndim)
751        res.append(arr)
752    if len(res) == 1:
753        return res[0]
754    return res
755
756
757def atleast_1d(*arys):
758    """
759    Converts inputs to arrays with at least one dimension.
760
761    Scalar inputs are converted to 1-dimensional arrays, whilst
762    higher-dimensional inputs are preserved.
763
764    Note:
765        In graph mode, returns a tuple of tensor instead of a list of
766        tensors.
767
768    Args:
769        *arys (Tensor): one or more input tensors.
770
771    Returns:
772        Tensor, or list of tensors, each with ``a.ndim >= 1``.
773
774    Raises:
775        TypeError: If the input is not a tensor.
776
777    Supported Platforms:
778        ``Ascend`` ``GPU`` ``CPU``
779
780    Examples:
781        >>> import mindspore.numpy as np
782        >>> a = np.ones((2, 3))
783        >>> b = np.ones(())
784        >>> c = np.ones(5)
785        >>> output = np.atleast_1d(a, b, c)
786        >>> print(output)
787            [Tensor(shape=[2, 3], dtype=Float32, value=
788            [[1.00000000e+00, 1.00000000e+00, 1.00000000e+00],
789            [1.00000000e+00, 1.00000000e+00, 1.00000000e+00]]),
790            Tensor(shape=[1], dtype=Float32, value= [1.00000000e+00]),
791            Tensor(shape=[5], dtype=Float32,
792            value= [1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
793            1.00000000e+00, 1.00000000e+00])]
794    """
795    return _atleast_xd(1, arys)
796
797
798def atleast_2d(*arys):
799    """
800    Reshapes inputs as arrays with at least two dimensions.
801
802    Note:
803        In graph mode, returns a tuple of tensor instead of a list of
804        tensors.
805    Args:
806        *arys (Tensor): one or more input tensors.
807
808    Returns:
809        Tensor, or list of tensors, each with ``a.ndim >= 2``.
810
811    Raises:
812        TypeError: If the input is not a tensor.
813
814    Supported Platforms:
815        ``Ascend`` ``GPU`` ``CPU``
816
817    Examples:
818        >>> import mindspore.numpy as np
819        >>> a = np.ones((2, 3))
820        >>> b = np.ones(())
821        >>> c = np.ones(5)
822        >>> output = np.atleast_2d(a, b, c)
823        >>> print(output)
824            [Tensor(shape=[2, 3], dtype=Float32, value=
825            [[1.00000000e+00, 1.00000000e+00, 1.00000000e+00],
826            [1.00000000e+00, 1.00000000e+00, 1.00000000e+00]]),
827            Tensor(shape=[1, 1], dtype=Float32, value= [[1.00000000e+00]]),
828            Tensor(shape=[1, 5], dtype=Float32,
829            value= [[1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
830            1.00000000e+00, 1.00000000e+00]])]
831    """
832    return _atleast_xd(2, arys)
833
834
835def atleast_3d(*arys):
836    """
837    Reshapes inputs as arrays with at least three dimensions.
838
839    Note:
840        In graph mode, returns a tuple of tensor instead of a list of
841        tensors.
842
843    Args:
844        *arys (Tensor): one or more input tensors.
845
846    Returns:
847        Tensor, or list of tensors, each with ``a.ndim >= 3``. For example,
848        a 1-D array of shape `(N,)` becomes a tensor of shape `(1, N, 1)`, and
849        a 2-D array of shape `(M, N)` becomes a tensor of shape `(M, N, 1)`.
850
851    Raises:
852        TypeError: If the input is not a tensor.
853
854    Supported Platforms:
855        ``Ascend`` ``GPU`` ``CPU``
856
857    Examples:
858        >>> import mindspore.numpy as np
859        >>> a = np.ones((2, 3))
860        >>> b = np.ones(())
861        >>> c = np.ones(5)
862        >>> output = np.atleast_3d(a, b, c)
863        >>> print(output)
864            [Tensor(shape=[2, 3, 1], dtype=Float32, value=
865            [[[1.00000000e+00], [1.00000000e+00], [1.00000000e+00]],
866            [[1.00000000e+00], [1.00000000e+00], [1.00000000e+00]]]),
867            Tensor(shape=[1, 1, 1], dtype=Float32, value= [[[1.00000000e+00]]]),
868            Tensor(shape=[1, 5, 1], dtype=Float32,
869            value= [[[1.00000000e+00], [1.00000000e+00], [1.00000000e+00],
870            [1.00000000e+00], [1.00000000e+00]]])]
871    """
872    res = []
873    for arr in arys:
874        ndim = F.rank(arr)
875        if ndim == 0:
876            arr = F.reshape(arr, (1, 1, 1))
877        elif ndim == 1:
878            arr = F.reshape(arr, (1, F.size(arr), 1))
879        elif ndim == 2:
880            arr = F.reshape(arr, F.shape(arr) + (1,))
881        res.append(arr)
882    if len(res) == 1:
883        return res[0]
884    return res
885
886
887def stack(arrays, axis=0):
888    """
889    Joins a sequence of arrays along a new axis.
890
891    The `axis` parameter specifies the index of the new axis in the
892    dimensions of the result. For example, if ``axis=0`` it will be the
893    first dimension and if ``axis=-1`` it will be the last dimension.
894
895    Note:
896        Numpy argument out is not supported.
897
898    Args:
899        arrays (sequence of Tensor): Each array must have the same shape.
900        axis (int, optional): The axis in the result array along which the
901            input arrays are stacked. Default: ``0`` .
902
903    Returns:
904        Tensor, The stacked array has one more dimension than the input
905        arrays.
906
907    Raises:
908        ValueError: If input is not Tensor, tuple, or list.
909
910    Supported Platforms:
911        ``Ascend`` ``GPU`` ``CPU``
912
913    Examples:
914        >>> import mindspore.numpy as np
915        >>> arrays = [np.ones((3, 4)) for _ in range(10)]
916        >>> output = np.stack(arrays, axis=0)
917        >>> print(output.shape)
918        (10, 3, 4)
919        >>> output = np.stack(arrays, axis=1)
920        >>> print(output.shape)
921        (3, 10, 4)
922        >>> output = np.stack(arrays, axis=2)
923        >>> print(output.shape)
924        (3, 4, 10)
925    """
926
927    if isinstance(arrays, Tensor):
928        shape = F.shape(arrays)
929        ndim = F.rank(arrays)
930        axis = axis % ndim
931        axes = F.make_range(ndim)
932        perm = axes[1:axis+1] + (0,) + axes[axis+1:]
933        if _is_shape_empty(shape):
934            return _empty(mstype.float32, shape[1:axis + 1] + (shape[0],) + shape[axis + 1:])
935        return transpose(arrays, perm)
936
937    if isinstance(arrays, (list, tuple)):
938        shape = (len(arrays),) + F.shape(arrays[0])
939        ndim = len(shape)
940        axis = axis % ndim
941        if _is_shape_empty(shape):
942            return _empty(mstype.float32, shape[1:axis + 1] + (shape[0],) + shape[axis + 1:])
943        seq = ()
944        for arr in arrays:
945            seq += (F.expand_dims(arr, axis),)
946        return concatenate(seq, axis)
947    return _raise_value_error('input arrays must be Tensor, tuple, or list')
948
949
950class UniqueNet(Cell):
951    """The operation is wrapped inside a model. """
952
953    def __init__(self):
954        super(UniqueNet, self).__init__()
955        self.unique = P.Unique()
956
957    def construct(self, x):
958        return self.unique(x)
959
960
961def unique(x, return_inverse=False):
962    """
963    Finds the unique elements of a tensor. The input tensor will be flattened first
964    when it has more than one dimension.
965
966    Note:
967        Numpy arguments `axis`, `return_index` and `return_counts` are not supported.
968        On CPU, this operator must be executed in graph mode.
969
970    Args:
971        x (Tensor): The input tensor to be processed.
972        return_inverse (bool): If `True`, also return the indices of the unique tensor.
973            Default: ``False`` .
974
975    Returns:
976        Tensor or tuple of Tensors.
977        If `return_inverse` is `False`, return the unique tensor, otherwise return tuple of tensors.
978
979    Supported Platforms:
980        ``Ascend`` ``GPU`` ``CPU``
981
982    Raises:
983        TypeError: If `x` is not tensor.
984
985    Examples:
986        >>> import mindspore.numpy as np
987        >>> import mindspore as ms
988        >>> ms.set_context(mode=ms.GRAPH_MODE)
989        >>> input_x = np.asarray([1, 2, 2, 2, 3, 4, 5]).astype('int32')
990        >>> output_x = np.unique(input_x)
991        >>> print(output_x)
992        [1 2 3 4 5]
993        >>> output_x = np.unique(input_x, return_inverse=True)
994        >>> print(output_x)
995        (Tensor(shape=[5], dtype=Int32, value= [ 1, 2, 3, 4, 5]), Tensor(shape=[7], dtype=Int32,
996            value= [0, 1, 1, 1, 2, 3, 4]))
997    """
998    _check_input_tensor(x)
999    if F.tuple_len(F.shape(x)) > 1:
1000        x = ravel(x)
1001    uniq = UniqueNet()
1002    res = uniq(x)
1003    if not return_inverse:
1004        return res[0]
1005    return res
1006
1007
1008def roll_along_axis(a, shift, axis):
1009    """
1010    Rolls a tensor along a given axis. This is a helper function of np.roll.
1011
1012    Args:
1013        a (Tensor): Input tensor.
1014        shift (int): The number of places the tensor is shifted.
1015        axis (int): The designated axis for shifting.
1016
1017    Returns:
1018        Shifted tensor.
1019    """
1020    _check_axis_in_range(axis, a.ndim)
1021    _check_element_int((shift, axis))
1022    if axis < 0:
1023        axis += a.ndim
1024    shift = -(shift % a.shape[axis])
1025    # if shift is 0, we do not need to roll at all
1026    if shift == 0:
1027        return a
1028    begin1 = ()
1029    begin2 = ()
1030    end1 = ()
1031    end2 = ()
1032    stride = _list_comprehensions(a.ndim, 1, True)
1033    for i in F.make_range(a.ndim):
1034        if i != axis:
1035            begin1 += (0,)
1036            end1 += (a.shape[i],)
1037            begin2 += (0,)
1038            end2 += (a.shape[i],)
1039        else:
1040            begin1 += (shift,)
1041            end1 += (a.shape[i],)
1042            begin2 += (0,)
1043            end2 += (shift,)
1044    return append(F.strided_slice(a, begin1, end1, stride),
1045                  F.strided_slice(a, begin2, end2, stride), axis=axis)
1046
1047
1048def roll(a, shift, axis=None):
1049    """
1050    Rolls a tensor along given axes.
1051
1052    Elements that rolls beyond the last position are re-introduced at the first.
1053
1054    Args:
1055        a (Tensor): Input tensor.
1056        shift (Union[int, tuple(int)]): The number of places by which elements are
1057            shifted. If a tuple, then axis must be a tuple of the same size, and
1058            each of the given axes is shifted by the corresponding number. If shift
1059            is an int while axis is a tuple of integers, then the same value is used
1060            for all given axes.
1061        axis (Union[int, tuple(int)], optional): Axis or axes along which elements
1062            are shifted. By default, the array is flattened before shifting, after
1063            which the original shape is restored. Default: ``None`` .
1064
1065    Returns:
1066        Tensor, with the same shape as a.
1067
1068    Supported Platforms:
1069        ``Ascend`` ``GPU`` ``CPU``
1070
1071    Raises:
1072        TypeError: If input arguments have types not specified above.
1073        ValueError: If axis exceeds `a.ndim`, or `shift` and `axis` cannot broadcast.
1074
1075    Examples:
1076        >>> import mindspore.numpy as np
1077        >>> a = np.reshape(np.arange(12), (3, 4))
1078        >>> print(np.roll(a, [2,-3], [0,-1]))
1079            [[ 7  4  5  6]
1080             [11  8  9 10]
1081             [ 3  0  1  2]]
1082    """
1083    _check_input_tensor(a)
1084    original_shape = a.shape
1085    original_dtype = a.dtype
1086    restore_shape = False
1087    # F.strided_slice only supports float on cpu, this will change once more supports
1088    # are added.
1089    if not _check_is_float(original_dtype):
1090        if not original_dtype in (mstype.complex64, mstype.complex128):
1091            a = a.astype(mstype.float32)
1092    if axis is None:
1093        restore_shape = True
1094        axis = 0
1095        a = a.ravel()
1096    # Broadcast shift and axis to the same length
1097    shift, axis = _broadcast_tuples(shift, axis)
1098    for shift_each, axis_each in zip(shift, axis):
1099        a = roll_along_axis(a, shift_each, axis_each)
1100    if restore_shape:
1101        a = a.reshape(original_shape)
1102    if not _check_is_float(original_dtype):
1103        if not original_dtype in (mstype.complex64, mstype.complex128):
1104            a = a.astype(original_dtype)
1105    return a
1106
1107
1108@_primexpr
1109def _get_moved_perm(ndim, source, destination):
1110    """
1111    Helper function for moveaxis, returns permutation after moving axes
1112    from source to destination.
1113    """
1114    dest_sorted_idx = [i for i, _ in sorted(enumerate(destination), key=operator.itemgetter(1))]
1115    axes_orig = mutable([], True)
1116    for i in range(ndim):
1117        if i not in source:
1118            axes_orig = axes_orig + [i]
1119
1120    k = 0
1121    m = 0
1122    perm = []
1123    for i in dest_sorted_idx:
1124        # inserts an axis that has been moved, denoted by n, and axes that remain
1125        # in their original position, indexed from k to k + n - m, into index m in
1126        # the list of permuted axes
1127        n = destination[i]
1128        j = k + n - m
1129        perm += axes_orig[k:j]
1130        perm.append(source[i])
1131        k += n - m
1132        m = n + 1
1133    perm += axes_orig[k:]
1134    return tuple(perm)
1135
1136
1137@_primexpr
1138def _get_moved_shape(shape, perm):
1139    """
1140    Helper function for moveaxis, returns the permuted shape after
1141    applying perm.
1142    """
1143    return tuple([shape[i] for i in perm])
1144
1145
1146def moveaxis(a, source, destination):
1147    """
1148    Moves axes of an array to new positions.
1149
1150    Other axes remain in their original order.
1151
1152    Args:
1153        a (Tensor): The array whose axes should be reordered.
1154        source (int or sequence of ints): Original positions of the
1155            axes to move. These must be unique.
1156        destination (int or sequence of ints): Destination positions
1157            for each of the original axes. These must also be unique.
1158
1159    Returns:
1160        Tensor, array with moved axes.
1161
1162    Raises:
1163        ValueError: If axes are out of the range of ``[-a.ndim, a.ndim)``, or
1164            if the axes contain duplicates.
1165
1166    Supported Platforms:
1167        ``Ascend`` ``GPU`` ``CPU``
1168
1169    Examples:
1170        >>> import mindspore.numpy as np
1171        >>> x = np.zeros((3, 4, 5))
1172        >>> output = np.moveaxis(x, 0, -1)
1173        >>> print(output.shape)
1174        (4, 5, 3)
1175        >>> output = np.moveaxis(x, -1, 0)
1176        >>> print(output.shape)
1177        (5, 3, 4)
1178        >>> output = np.moveaxis(x, [0, 1, 2], [-1, -2, -3])
1179        >>> print(output.shape)
1180        (5, 4, 3)
1181    """
1182    ndim = F.rank(a)
1183    source = _check_axis_valid(source, ndim)
1184    destination = _check_axis_valid(destination, ndim)
1185    if len(source) != len(destination):
1186        _raise_value_error('`source` and `destination` arguments must have the same number of elements')
1187    perm = _get_moved_perm(ndim, source, destination)
1188
1189    return F.transpose(a, perm)
1190
1191
1192def tile(a, reps):
1193    """
1194    Constructs an array by repeating `a` the number of times given by `reps`.
1195
1196    If `reps` has length `d`, the result will have dimension of ``max(d, a.ndim)``.
1197    If ``a.ndim < d``, `a` is promoted to be d-dimensional by prepending new axes.
1198    So a shape (3,) array is promoted to (1, 3) for 2-D replication, or
1199    shape (1, 1, 3) for 3-D replication. If this is not the desired behavior,
1200    promote `a` to d-dimensions manually before calling this function.
1201    If ``a.ndim > d``, `reps` is promoted to ``a.ndim`` by pre-pending 1's to it. Thus
1202    for an `a` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as (1, 1, 2, 2).
1203
1204    Args:
1205        a (Tensor): The input array.
1206        reps (int or sequence of ints): The number of repetitions of `a` along
1207            each axis.
1208
1209    Returns:
1210        Tensor, the tiled output array.
1211
1212    Raises:
1213        TypeError: If the input is not a tensor.
1214
1215    Supported Platforms:
1216        ``Ascend`` ``GPU`` ``CPU``
1217
1218    Examples:
1219        >>> import mindspore.numpy as np
1220        >>> a = np.array([0, 1, 2])
1221        >>> output = np.tile(a, 2)
1222        >>> print(output)
1223        [0 1 2 0 1 2]
1224        >>> output = np.tile(a, (2, 2))
1225        >>> print(output)
1226        [[0 1 2 0 1 2]
1227        [0 1 2 0 1 2]]
1228        >>> output = np.tile(a, (2, 1, 2))
1229        >>> print(output)
1230        [[[0 1 2 0 1 2]]
1231        [[0 1 2 0 1 2]]]
1232    """
1233    _check_input_tensor(a)
1234    ndim = F.rank(a)
1235    shape = F.shape(a)
1236    reps = _add_unit_axes(reps, ndim)
1237    if _is_shape_empty(shape) or _is_shape_empty(reps):
1238        shape = _add_unit_axes(shape, len(reps))
1239        return _empty(F.dtype(a), _seq_prod(shape, reps))
1240    return F.tile(a, reps)
1241
1242
1243@_primexpr
1244def _check_can_broadcast_to(shape, target_shape):
1245    """Determines if shape can be broadcast to target_shape."""
1246    ndim = len(shape)
1247    ndim_target = len(target_shape)
1248    if ndim > ndim_target:
1249        return False
1250    for i, j in zip(reversed(shape), reversed(target_shape)):
1251        if i not in (1, j):
1252            return False
1253    return True
1254
1255
1256def broadcast_to(array, shape):
1257    """
1258    Broadcasts an array to a new shape.
1259
1260    Args:
1261        array (Tensor): The array to broadcast.
1262        shape (tuple): The shape of the desired array.
1263
1264    Returns:
1265        Tensor, original array broadcast to the given shape.
1266
1267    Raises:
1268        ValueError: If array cannot be broadcast to shape.
1269
1270    Supported Platforms:
1271        ``Ascend`` ``GPU`` ``CPU``
1272
1273    Example:
1274        >>> import mindspore.numpy as np
1275        >>> x = np.array([1, 2, 3])
1276        >>> output = np.broadcast_to(x, (3, 3))
1277        >>> print(output)
1278        [[1 2 3]
1279        [1 2 3]
1280        [1 2 3]]
1281    """
1282    def _check(shape_a, shape):
1283        if not _check_can_broadcast_to(shape_a, shape):
1284            _raise_value_error('cannot broadcast with ', shape)
1285    shape_a = F.shape(array)
1286    _check(shape_a, shape)
1287    return _broadcast_to_shape(array, shape)
1288
1289
1290def broadcast_arrays(*args):
1291    """
1292    Broadcasts any number of arrays against each other.
1293
1294    Note:
1295        Numpy argument `subok` is not supported.
1296        In graph mode, returns a tuple of Tensor instead of a list
1297        of Tensor.
1298
1299    Args:
1300        *args (Tensor): The arrays to broadcast.
1301
1302    Returns:
1303        List of Tensor.
1304
1305    Raises:
1306        ValueError: If arrays cannot be broadcast.
1307
1308    Supported Platforms:
1309        ``Ascend`` ``GPU`` ``CPU``
1310
1311    Example:
1312        >>> import mindspore.numpy as np
1313        >>> x = np.array([[1,2,3]])
1314        >>> y = np.array([[4],[5]])
1315        >>> output = np.broadcast_arrays(x, y)
1316        >>> print(output)
1317        [Tensor(shape=[2, 3], dtype=Int32, value=
1318        [[1, 2, 3],
1319        [1, 2, 3]]), Tensor(shape=[2, 3], dtype=Int32, value=
1320        [[4, 4, 4],
1321        [5, 5, 5]])]
1322    """
1323    shapes = map(F.shape, args)
1324    out_shape = _infer_out_shape(*shapes)
1325    res = []
1326    for arr in args:
1327        res.append(broadcast_to(arr, out_shape))
1328    return res
1329
1330
1331def array_split(x, indices_or_sections, axis=0):
1332    """
1333    Splits a tensor into multiple sub-tensors.
1334
1335    Note:
1336        Currently, array_split only supports :class:`mindspore.float32` on ``CPU``.
1337
1338    The only difference between ``np.split`` and ``np.array_split`` is that
1339    ``np.array_split`` allows indices_or_sections to be an integer that does not
1340    equally divide the axis. For a tensor of length l that should be split into
1341    n sections, it returns :math:`l % n` sub-arrays of size :math:`l//n + 1` and
1342    the rest of size :math:`l//n`.
1343
1344    Args:
1345        x (Tensor): A Tensor to be divided.
1346        indices_or_sections (Union[int, tuple(int), list(int)]):
1347            If integer, :math:`N`, the tensor will be divided into
1348            :math:`N` tensors along axis.
1349            If tuple(int), list(int) or of sorted integers,
1350            the entries indicate where along axis the array is split.
1351            For example, :math:`[2, 3]` would, for :math:`axis=0`, result in
1352            three sub-tensors :math:`x[:2]`, :math:`x[2:3]`and :math:`x[3:]`.
1353            If an index exceeds the dimension of the array along axis,
1354            an empty sub-array is returned correspondingly.
1355        axis (int): The axis along which to split. Default: ``0`` .
1356
1357    Returns:
1358        A list of sub-tensors.
1359
1360    Raises:
1361        TypeError: If argument `indices_or_sections` is not integer,
1362            tuple(int) or list(int) or argument `axis` is not integer.
1363        ValueError: If argument `axis` is out of range of :math:`[-x.ndim, x.ndim)`.
1364
1365    Supported Platforms:
1366        ``Ascend`` ``GPU`` ``CPU``
1367
1368    Examples:
1369        >>> import mindspore.numpy as np
1370        >>> input_x = np.arange(9).astype("float32")
1371        >>> output = np.array_split(input_x, 4)
1372        >>> print(output)
1373        (Tensor(shape=[3], dtype=Float32,
1374            value= [ 0.00000000e+00,  1.00000000e+00,  2.00000000e+00]),
1375        Tensor(shape=[2], dtype=Float32,
1376            value= [ 3.00000000e+00,  4.00000000e+00]),
1377        Tensor(shape=[2], dtype=Float32,
1378            value= [ 5.00000000e+00,  6.00000000e+00]),
1379        Tensor(shape=[2], dtype=Float32,
1380            value= [ 7.00000000e+00,  8.00000000e+00]))
1381    """
1382    return _split(x, indices_or_sections, opname="array_split", axis=axis)
1383
1384
1385def split(x, indices_or_sections, axis=0):
1386    """
1387    Splits a tensor into multiple sub-tensors along the given axis.
1388
1389    Args:
1390        x (Tensor): A Tensor to be divided.
1391        indices_or_sections (Union[int, tuple(int), list(int)]):
1392            If integer, :math:`N`, the tensor will be divided into
1393            :math:`N` equal tensors along axis.
1394            If tuple(int), list(int) or of sorted integers,
1395            the entries indicate where along axis the array is split.
1396            For example, :math:`[2, 3]` would, for :math:`axis=0`, result in
1397            three sub-tensors :math:`x[:2]`, :math:`x[2:3]`and :math:`x[3:]`.
1398            If an index exceeds the dimension of the array along axis,
1399            an empty sub-array is returned correspondingly.
1400        axis (int): The axis along which to split. Default: ``0`` .
1401
1402    Returns:
1403        A tuple of sub-tensors.
1404
1405    Raises:
1406        TypeError: If argument `indices_or_sections` is not integer,
1407            tuple(int) or list(int) or argument `axis` is not integer.
1408        ValueError: If argument `axis` is out of range of :math:`[-x.ndim, x.ndim)`.
1409
1410    Supported Platforms:
1411        ``Ascend`` ``GPU`` ``CPU``
1412
1413    Examples:
1414        >>> import mindspore.numpy as np
1415        >>> input_x = np.arange(9).astype("float32")
1416        >>> output = np.split(input_x, 3)
1417        >>> print(output)
1418        (Tensor(shape=[3], dtype=Float32,
1419          value= [ 0.00000000e+00,  1.00000000e+00,  2.00000000e+00]),
1420         Tensor(shape=[3], dtype=Float32,
1421          value= [ 3.00000000e+00,  4.00000000e+00,  5.00000000e+00]),
1422         Tensor(shape=[3], dtype=Float32,
1423          value= [ 6.00000000e+00,  7.00000000e+00,  8.00000000e+00]))
1424    """
1425    return _split(x, indices_or_sections, opname="split", axis=axis)
1426
1427
1428def _split(x, indices_or_sections, opname, axis=0):
1429    """Splits a tensor based on ``np.split`` or ``np.array_split``."""
1430    _check_input_tensor(x)
1431    _ = _check_axis_type(axis, True, False, False)
1432    axis_new = _canonicalize_axis(axis, x.ndim)
1433    res = None
1434    arr_shape = x.shape
1435    length_along_dim = arr_shape[axis_new]
1436    if isinstance(indices_or_sections, int):
1437        if indices_or_sections > length_along_dim:
1438            _raise_value_error("empty tensor encountered.")
1439        if opname == "split" or length_along_dim % indices_or_sections == 0:
1440            res = P.Split(axis_new, indices_or_sections)(x)
1441        else:
1442            num_long_tensor = length_along_dim % indices_or_sections
1443            num_short_tensor = indices_or_sections - num_long_tensor
1444            length1 = num_long_tensor * (length_along_dim // indices_or_sections + 1)
1445            length2 = length_along_dim - length1
1446            start1 = _list_comprehensions(F.rank(x), 0, True)
1447            size1 = _tuple_setitem(arr_shape, axis_new, length1)
1448            start2 = _tuple_setitem(start1, axis_new, length1)
1449            size2 = _tuple_setitem(arr_shape, axis_new, length2)
1450            res = P.Split(axis_new, num_long_tensor)(F.tensor_slice(x, start1, size1)) + \
1451                P.Split(axis_new, num_short_tensor)(F.tensor_slice(x, start2, size2))
1452
1453    elif isinstance(indices_or_sections, (list, tuple)) and _check_element_int(indices_or_sections):
1454        res = _split_sub_tensors(x, indices_or_sections, axis_new)
1455    else:
1456        _raise_type_error("Argument `indices_or_sections` in `mindspore.numpy.split`\
1457            should be integer, tuple(int) or list(int), but got", indices_or_sections)
1458    return res
1459
1460
1461@constexpr
1462def convert_neg_indices(indices, ndim):
1463    """converts negative values in tuple/list indices"""
1464    def canonicalizer(ax):
1465        return ax + ndim if ax < 0 else ax
1466    indices = tuple([canonicalizer(axis) for axis in indices])
1467    return indices
1468
1469
1470def _split_sub_tensors(x, indices, axis):
1471    """
1472    Splits the input tensor `x` into multiple sub-tensors
1473    along the axis according to the given indices.
1474    """
1475    length_along_dim = x.shape[axis]
1476    indices = convert_neg_indices(indices, length_along_dim)
1477    indices += (length_along_dim,)
1478
1479    sub_tensors = []
1480    strides = _list_comprehensions(x.ndim, 1, True)
1481    begin = _list_comprehensions(x.ndim, 0)
1482    end = _list_comprehensions(x.shape)
1483    for i, idx in enumerate(indices):
1484        begin[axis] = 0 if i == 0 else indices[i-1]
1485        end[axis] = idx
1486        if end[axis] <= begin[axis]:
1487            _raise_value_error("empty sub-tensor encountered.")
1488        sliced_tensor = F.strided_slice(x, _type_convert(tuple, begin), _type_convert(tuple, end), strides)
1489        sub_tensors.append(sliced_tensor)
1490    return sub_tensors
1491
1492
1493def vsplit(x, indices_or_sections):
1494    """
1495    Splits a tensor into multiple sub-tensors vertically (row-wise).
1496    It is equivalent to split with :math:`axis=0` (default), the array is always
1497    split along the first axis regardless of the array dimension.
1498
1499    Args:
1500        x (Tensor): A Tensor to be divided.
1501        indices_or_sections (Union[int, tuple(int), list(int)]):
1502            If integer, :math:`N`, the tensor will be divided into
1503            :math:`N` equal tensors along axis.
1504            If tuple(int), list(int) or of sorted integers,
1505            the entries indicate where along axis the array is split.
1506            For example, :math:`[2, 3]` would, for :math:`axis=0`, result in
1507            three sub-tensors :math:`x[:2]`, :math:`x[2:3]`and :math:`x[3:]`.
1508            If an index exceeds the dimension of the array along axis,
1509            an empty sub-array is returned correspondingly.
1510
1511    Returns:
1512        A list of sub-tensors.
1513
1514    Raises:
1515        TypeError: If argument `indices_or_sections` is not integer.
1516
1517    Supported Platforms:
1518        ``Ascend`` ``GPU`` ``CPU``
1519
1520    Examples:
1521        >>> import mindspore.numpy as np
1522        >>> input_x = np.arange(9).reshape((3, 3)).astype('float32')
1523        >>> output = np.vsplit(input_x, 3)
1524        >>> print(output)
1525        (Tensor(shape=[1, 3], dtype=Float32,
1526          value=[[ 0.00000000e+00,  1.00000000e+00,  2.00000000e+00]]),
1527         Tensor(shape=[1, 3], dtype=Float32,
1528          value=[[ 3.00000000e+00,  4.00000000e+00,  5.00000000e+00]]),
1529         Tensor(shape=[1, 3], dtype=Float32,
1530          value=[[ 6.00000000e+00,  7.00000000e+00,  8.00000000e+00]]))
1531    """
1532    return split(x, indices_or_sections, 0)
1533
1534
1535def hsplit(x, indices_or_sections):
1536    """
1537    Splits a tensor into multiple sub-tensors horizontally (column-wise).
1538    It is equivalent to split with :math:`axis=1` (default), the array is always
1539    split along the second axis regardless of the array dimension.
1540
1541    Args:
1542        x (Tensor): A Tensor to be divided.
1543        indices_or_sections (Union[int, tuple(int), list(int)]):
1544            If integer, :math:`N`, the tensor will be divided into
1545            :math:`N` equal tensors along axis.
1546            If tuple(int), list(int) or of sorted integers,
1547            the entries indicate where along axis the array is split.
1548            For example, :math:`[2, 3]` would, for :math:`axis=0`, result in
1549            three sub-tensors :math:`x[:2]`, :math:`x[2:3]`and :math:`x[3:]`.
1550            If an index exceeds the dimension of the array along axis,
1551            an empty sub-array is returned correspondingly.
1552
1553    Returns:
1554        A list of sub-tensors.
1555
1556    Raises:
1557        TypeError: If argument `indices_or_sections` is not integer.
1558
1559    Supported Platforms:
1560        ``Ascend`` ``GPU`` ``CPU``
1561
1562    Examples:
1563        >>> import mindspore.numpy as np
1564        >>> input_x = np.arange(6).reshape((2, 3)).astype('float32')
1565        >>> output = np.hsplit(input_x, 3)
1566        >>> print(output)
1567        (Tensor(shape=[2, 1], dtype=Float32,
1568        value=[[ 0.00000000e+00],
1569               [ 3.00000000e+00]]),
1570        Tensor(shape=[2, 1], dtype=Float32,
1571        value=[[ 1.00000000e+00],
1572               [ 4.00000000e+00]]),
1573        Tensor(shape=[2, 1], dtype=Float32,
1574        value=[[ 2.00000000e+00],
1575               [ 5.00000000e+00]]))
1576    """
1577    return split(x, indices_or_sections, 1)
1578
1579
1580def dsplit(x, indices_or_sections):
1581    """
1582    Splits a tensor into multiple sub-tensors along the 3rd axis (depth).
1583    It is equivalent to split with :math:`axis=2` (default), the array is always
1584    split along the third axis regardless of the array dimension.
1585
1586    Args:
1587        x (Tensor): A Tensor to be divided.
1588        indices_or_sections (Union[int, tuple(int), list(int)]):
1589            If integer, :math:`N`, the tensor will be divided into
1590            :math:`N` equal tensors along axis.
1591            If tuple(int), list(int) or of sorted integers,
1592            the entries indicate where along axis the array is split.
1593            For example, :math:`[2, 3]` would, for :math:`axis=0`, result in
1594            three sub-tensors :math:`x[:2]`, :math:`x[2:3]`and :math:`x[3:]`.
1595            If an index exceeds the dimension of the array along axis,
1596            an empty sub-array is returned correspondingly.
1597
1598    Returns:
1599        A list of sub-tensors.
1600
1601    Raises:
1602        TypeError: If argument `indices_or_sections` is not integer.
1603
1604    Supported Platforms:
1605        ``Ascend`` ``GPU`` ``CPU``
1606
1607    Examples:
1608        >>> import mindspore.numpy as np
1609        >>> input_x = np.arange(6).reshape((1, 2, 3)).astype('float32')
1610        >>> output = np.dsplit(input_x, 3)
1611        >>> print(output)
1612        (Tensor(shape=[1, 2, 1], dtype=Float32,
1613        value=[[[ 0.00000000e+00],
1614                [ 3.00000000e+00]]]),
1615        Tensor(shape=[1, 2, 1], dtype=Float32,
1616        value=[[[ 1.00000000e+00],
1617                [ 4.00000000e+00]]]),
1618        Tensor(shape=[1, 2, 1], dtype=Float32,
1619        value=[[[ 2.00000000e+00],
1620                [ 5.00000000e+00]]]))
1621    """
1622    return split(x, indices_or_sections, 2)
1623
1624
1625@_primexpr
1626def _get_flip_start(ndim, shape, axes):
1627    return tuple([shape[i] - 1 if i in axes else 0 for i in range(ndim)])
1628
1629
1630@_primexpr
1631def _get_flip_end(ndim, shape, axes):
1632    return tuple([-shape[i] - 1 if i in axes else shape[i] + 1 for i in range(ndim)])
1633
1634
1635@_primexpr
1636def _get_flip_strides(ndim, axes):
1637    return tuple([-1 if i in axes else 1 for i in range(ndim)])
1638
1639
1640def flip(m, axis=None):
1641    """
1642    Reverses the order of elements in an array along the given axis.
1643
1644    The shape of the array is preserved, but the elements are reordered.
1645
1646    Args:
1647        m (Tensor): Input array.
1648        axis (None or int or tuple of integers, optional): Axis or axes along which
1649            to flip over. The default, ``axis=None``, will flip over all of the axes
1650            of the input array. If `axis` is negative it counts from the last to
1651            the first axis. If `axis` is a tuple of integers, flipping is performed on
1652            all of the axes specified in the tuple.
1653
1654    Returns:
1655        Tensor, with the entries of `axis` reversed.
1656
1657    Raises:
1658        TypeError: If the input is not a tensor.
1659
1660    Supported Platforms:
1661        ``GPU`` ``CPU``
1662
1663    Example:
1664        >>> import mindspore.numpy as np
1665        >>> A = np.arange(8.0).reshape((2,2,2))
1666        >>> output = np.flip(A)
1667        >>> print(output)
1668        [[[7. 6.]
1669        [5. 4.]]
1670        [[3. 2.]
1671        [1. 0.]]]
1672        >>> output = np.flip(A, (0, 2))
1673        >>> print(output)
1674        [[[5. 4.]
1675        [7. 6.]]
1676        [[1. 0.]
1677        [3. 2.]]]
1678    """
1679    _check_input_tensor(m)
1680    ndim = F.rank(m)
1681    axes = _check_axis_valid(axis, ndim)
1682    shape = F.shape(m)
1683    dtype = F.dtype(m)
1684    if _is_shape_empty(shape):
1685        return m
1686    if not _check_is_float(dtype):
1687        m = m.astype(mstype.float32)
1688    start = _get_flip_start(ndim, shape, axes)
1689    end = _get_flip_end(ndim, shape, axes)
1690    strides = _get_flip_strides(ndim, axes)
1691    res = F.strided_slice(m, start, end, strides)
1692    if not _check_same_type(F.dtype(res), dtype):
1693        res = F.cast(res, dtype)
1694    return res
1695
1696
1697def flipud(m):
1698    """
1699    Flips the entries in each column in the up/down direction.
1700    Rows are preserved, but appear in a different order than before.
1701
1702    Args:
1703        m (Tensor): Input array.
1704
1705    Returns:
1706        Tensor.
1707
1708    Raises:
1709        TypeError: If the input is not a tensor.
1710
1711    Supported Platforms:
1712        ``GPU`` ``CPU``
1713
1714    Example:
1715        >>> import mindspore.numpy as np
1716        >>> A = np.arange(8.0).reshape((2,2,2))
1717        >>> output = np.flipud(A)
1718        >>> print(output)
1719        [[[4. 5.]
1720        [6. 7.]]
1721        [[0. 1.]
1722        [2. 3.]]]
1723    """
1724    return flip(m, 0)
1725
1726
1727def fliplr(m):
1728    """
1729    Flips the entries in each row in the left/right direction.
1730    Columns are preserved, but appear in a different order than before.
1731
1732    Args:
1733        m (Tensor): Input array.
1734
1735    Returns:
1736        Tensor.
1737
1738    Raises:
1739        TypeError: If the input is not a tensor.
1740
1741    Supported Platforms:
1742        ``GPU`` ``CPU``
1743
1744    Example:
1745        >>> import mindspore.numpy as np
1746        >>> A = np.arange(8.0).reshape((2,2,2))
1747        >>> output = np.fliplr(A)
1748        >>> print(output)
1749        [[[2. 3.]
1750        [0. 1.]]
1751        [[6. 7.]
1752        [4. 5.]]]
1753    """
1754    return flip(m, 1)
1755
1756
1757def take_along_axis(arr, indices, axis):
1758    """
1759    Takes values from the input array by matching 1d index and data slices.
1760
1761    This iterates over matching 1d slices oriented along the specified axis in the
1762    index and data arrays, and uses the former to look up values in the latter.
1763    These slices can be different lengths.
1764
1765    Args:
1766        arr (Tensor): Source array with shape `(Ni…, M, Nk…)`.
1767        indices (Tensor): Indices with shape `(Ni…, J, Nk…)` to take along each 1d
1768            slice of `arr`. This must match the dimension of `arr`, but dimensions `Ni`
1769            and `Nj` only need to broadcast against `arr`.
1770        axis (int): The axis to take 1d slices along. If `axis` is None, the input
1771            array is treated as if it had first been flattened to 1d.
1772
1773    Returns:
1774        Tensor, the indexed result, with shape `(Ni…, J, Nk…)`.
1775
1776    Raises:
1777        ValueError: If input array and indices have different number of dimensions.
1778        TypeError: If the input is not a Tensor.
1779
1780    Supported Platforms:
1781        ``Ascend`` ``GPU`` ``CPU``
1782
1783    Example:
1784        >>> import mindspore.numpy as np
1785        >>> x = np.arange(12).reshape(3, 4)
1786        >>> indices = np.arange(3).reshape(1, 3)
1787        >>> output = np.take_along_axis(x, indices, 1)
1788        >>> print(output)
1789        [[ 0  1  2]
1790        [ 4  5  6]
1791        [ 8  9 10]]
1792    """
1793    _check_input_tensor(arr, indices)
1794    if axis is None:
1795        arr = ravel(arr)
1796        axis = 0
1797    ndim = F.rank(arr)
1798    if ndim != F.rank(indices):
1799        _raise_value_error('`indices` and `arr` must have the same number of dimensions')
1800    axis = _check_axis_in_range(axis, ndim)
1801
1802    shape_arr = F.shape(arr)
1803    shape_indices = F.shape(indices)
1804    # broadcasts indices against the shape of arr except at axis
1805    indices = _broadcast_to(indices, _tuple_slice(shape_indices, None, axis),
1806                            _tuple_slice(shape_arr, None, axis), ndim)
1807    indices = _broadcast_to(indices, _tuple_slice(shape_arr, None, axis + 1) +
1808                            _tuple_slice(shape_indices, axis + 1, None), shape_arr, ndim)
1809    arr = _broadcast_to(arr, shape_arr, indices.shape, ndim)
1810    return F.gather_d(arr, axis, indices)
1811
1812
1813def _mod(x, y):
1814    """Computes x mod y."""
1815    quotient = F.tensor_floordiv(x, y)
1816    prod = F.tensor_mul(y, quotient)
1817    return F.tensor_sub(x, prod)
1818
1819
1820def _check_indices(dims, indices, mode, allow_negative_index=True):
1821    """Checks whether indices are out of bounds."""
1822    shape = F.shape(indices)
1823    dtype = F.dtype(indices)
1824    if not allow_negative_index:
1825        lowerbounds = F.fill(dtype, shape, 0)
1826    else:
1827        lowerbounds = F.fill(dtype, shape, -dims)
1828    upperbounds = F.fill(dtype, shape, dims - 1)
1829    out_of_lowerbounds = F.tensor_lt(indices, lowerbounds)
1830    out_of_upperbounds = F.tensor_gt(indices, upperbounds)
1831    if mode == 'raise':
1832        _raise_unimplemented_error('"raise" mode is not implemented')
1833    if mode == 'wrap':
1834        return _mod(indices, F.fill(mstype.float32, shape, dims)).astype(dtype)
1835    if mode != 'clip':
1836        _raise_value_error('invalid mode. Expected "raise", "wrap", or "clip"')
1837    zeros = F.fill(dtype, shape, 0)
1838    clipped = F.select(out_of_lowerbounds, zeros, indices)
1839    clipped = F.select(out_of_upperbounds, upperbounds, clipped)
1840    return clipped
1841
1842
1843def take(a, indices, axis=None, mode='clip'):
1844    """
1845    Takes elements from an array along an axis.
1846
1847    When axis is not None, this function does the same thing as "fancy" indexing
1848    (indexing arrays using arrays); however, it can be easier to use if you need
1849    elements along a given axis. A call such as ``np.take(arr, indices, axis=3)`` is
1850    equivalent to ``arr[:,:,:,indices,...]``.
1851
1852    Note:
1853        Numpy argument out is not supported.
1854        ``mode = 'raise'`` is not supported, and the default mode is 'clip' instead.
1855
1856    Args:
1857        a (Tensor): Source array with shape `(Ni…, M, Nk…)`.
1858        indices (Tensor): The indices with shape `(Nj...)` of the values to extract.
1859        axis (int, optional): The axis over which to select values. By default,
1860            the flattened input array is used. Default: ``None`` .
1861        mode ('raise', 'wrap', 'clip', optional): Specifies how out-of-bounds
1862            indices will behave. Default: ``'clip'`` .
1863
1864            'raise' – raise an error;
1865
1866            'wrap' – wrap around;
1867
1868            'clip' – clip to the range. 'clip' mode means that all indices that are
1869            too large are replaced by the index that addresses the last element
1870            along that axis. Note that this disables indexing with negative numbers.
1871
1872    Returns:
1873        Tensor, the indexed result.
1874
1875    Raises:
1876        ValueError: If axis is out of range.
1877        TypeError: If the input is not a Tensor.
1878
1879    Supported Platforms:
1880        ``Ascend`` ``GPU`` ``CPU``
1881
1882    Examples:
1883        >>> import mindspore.numpy as np
1884        >>> a = np.array([4, 3, 5, 7, 6, 8])
1885        >>> indices = np.array([0, 1, 4])
1886        >>> output = np.take(a, indices)
1887        >>> print(output)
1888        [4 3 6]
1889        >>> indices = np.array([[0, 1], [2, 3]])
1890        >>> output = np.take(a, indices)
1891        >>> print(output)
1892        [[4 3]
1893        [5 7]]
1894    """
1895    _check_input_tensor(a, indices)
1896    return a.take(indices, axis, mode)
1897
1898
1899def repeat(a, repeats, axis=None):
1900    """
1901    Repeats elements of an array.
1902
1903    Args:
1904        a (Tensor): Input array.
1905        repeats (int or sequence of ints): The number of repetitions for each element.
1906            `repeats` is broadcasted to fit the shape of the given axis.
1907        axis (int, optional): The axis along which to repeat values. By default,
1908            use the flattened input array, and return a flat output array. Default: ``None`` .
1909
1910    Returns:
1911        Tensor, output array which has the same shape as `a`, except along the given
1912        axis.
1913
1914    Raises:
1915        ValueError: If axis is out of range.
1916        TypeError: If input `a` is not a Tensor.
1917
1918    Supported Platforms:
1919        ``Ascend`` ``GPU`` ``CPU``
1920
1921    Examples:
1922        >>> import mindspore.numpy as np
1923        >>> output = np.repeat(np.array(3), 4)
1924        >>> print(output)
1925        [3 3 3 3]
1926        >>> x = np.array([[1,2],[3,4]])
1927        >>> output = np.repeat(x, 2)
1928        >>> print(output)
1929        [1 1 2 2 3 3 4 4]
1930        >>> output = np.repeat(x, 3, axis=1)
1931        >>> print(output)
1932        [[1 1 1 2 2 2]
1933        [3 3 3 4 4 4]]
1934        >>> output = np.repeat(x, [1, 2], axis=0)
1935        >>> print(output)
1936        [[1 2]
1937        [3 4]
1938        [3 4]]
1939    """
1940    a = _to_tensor(a)
1941    return a.repeat(repeats, axis)
1942
1943
1944def rot90(a, k=1, axes=(0, 1)):
1945    """
1946    Rotates a tensor by 90 degrees in the plane specified by axes.
1947    Rotation direction is from the first towards the second axis.
1948
1949    Args:
1950        a (Tensor): Input tensor of two or more dimensions.
1951        k (int): Number of times the tensor is rotated by 90 degrees. Default: ``1`` .
1952        axes (Union[tuple(int), list(int)]): The tensor is rotated in the plane
1953            defined by the axes. Default: ``(0, 1)`` .
1954            Axes must be different and with the shape of `(2,)`.
1955
1956    Returns:
1957        Tensor.
1958
1959    Raises:
1960        TypeError: If input `a` is not a Tensor or
1961            the argument `k` is not integer or
1962            the argument `axes` is not tuple of integers or list of ints.
1963        ValueError: If any axis is out of range or
1964            the length of `axes` is not `2`.
1965
1966    Supported Platforms:
1967        ``GPU``
1968
1969    Examples:
1970        >>> import mindspore.numpy as np
1971        >>> a = np.arange(24).reshape((2, 3, 4))
1972        >>> output = np.rot90(a)
1973        >>> print(output)
1974        [[[ 8  9 10 11]
1975          [20 21 22 23]]
1976         [[ 4  5  6  7]
1977          [16 17 18 19]]
1978         [[ 0  1  2  3]
1979          [12 13 14 15]]]
1980        >>> output = np.rot90(a, 3, (1, 2))
1981        >>> print(output)
1982        [[[ 8  4  0]
1983          [ 9  5  1]
1984          [10  6  2]
1985          [11  7  3]]
1986         [[20 16 12]
1987          [21 17 13]
1988          [22 18 14]
1989          [23 19 15]]]
1990    """
1991    _check_input_tensor(a)
1992
1993    if not isinstance(k, int):
1994        _raise_type_error("integer argument expected, but got ", k)
1995    k = k % 4 if k >= 0 else 4 - (-k % 4)
1996
1997    if not isinstance(axes, (tuple, list)):
1998        _raise_type_error("tuple(ints) or list(ints) expected, but got ", axes)
1999    if len(axes) != 2:
2000        _raise_value_error("len(axes) must be 2.")
2001    axis1_tmp, axis2_tmp = axes[0], axes[1]
2002    axis1 = _canonicalize_axis(axis1_tmp, a.ndim)
2003    axis2 = _canonicalize_axis(axis2_tmp, a.ndim)
2004    if axis1 == axis2:
2005        _raise_value_error('Axes must be different.')
2006
2007    if k == 0:
2008        return a
2009    if k == 2:
2010        return flip(flip(a, axis1), axis2)
2011    perm = _list_comprehensions(a.ndim)
2012    perm[axis1], perm[axis2] = perm[axis2], perm[axis1]
2013    if k == 1:
2014        return flip(transpose(a, perm), axis1)
2015    return flip(transpose(a, perm), axis2)
2016
2017
2018def select(condlist, choicelist, default=0):
2019    """
2020    Returns an array drawn from elements in `choicelist`, depending on conditions.
2021
2022    Args:
2023        condlist (Union[int, float, bool, list, tuple, Tensor]): The list of conditions
2024            which determine from which array in `choicelist` the output elements are
2025            taken. When multiple conditions are satisfied, the first one encountered in
2026            `condlist` is used.
2027        choicelist (Union[int, float, bool, list, tuple, Tensor]): The list of arrays
2028            from which the output elements are taken. It has to be of the same length as
2029            `condlist`.
2030        default (scalar, optional): The element inserted in output when all conditions
2031            evaluate to `False`. Default: ``0`` .
2032
2033    Returns:
2034        Tensor, the output at position `m` is the `m-th` element of the array in
2035        `choicelist` where the `m-th` element of the corresponding array in `condlist`
2036        is `True`.
2037
2038    Raises:
2039        ValueError: If ``len(condlist) != len(choicelist)``.
2040
2041    Supported Platforms:
2042        ``Ascend`` ``GPU`` ``CPU``
2043
2044    Examples:
2045        >>> import mindspore.numpy as np
2046        >>> condlist = [[True, True, True, False, False], [False, False, True, False, True]]
2047        >>> choicelist = [[0, 1, 2, 3, 4], [0, 1, 4, 9, 16]]
2048        >>> output = np.select(condlist, choicelist)
2049        >>> print(output)
2050        [ 0  1  2  0 16]
2051    """
2052    condlist, choicelist = _to_tensor(condlist, choicelist)
2053    shape_cond = F.shape(condlist)
2054    shape_choice = F.shape(choicelist)
2055    if F.rank(condlist) == 0 or F.rank(choicelist) == 0:
2056        _raise_value_error('input cannot be scalars')
2057    case_num = shape_cond[0]
2058    if shape_choice[0] != case_num:
2059        _raise_value_error('list of cases must be same length as list of conditions')
2060
2061    case_size_cond = _tuple_slice(shape_cond, 1, None)
2062    case_size_choice = _tuple_slice(shape_choice, 1, None)
2063    # performs broadcast over the cases in condlist and choicelist
2064    case_size = _infer_out_shape(case_size_cond, case_size_choice)
2065    shape_broadcasted = (case_num,) + case_size
2066    ndim = len(shape_broadcasted)
2067    shape_cond_expanded = ((case_num,) + _list_comprehensions(ndim - F.rank(condlist), 1, True) +
2068                           case_size_cond)
2069    condlist = _broadcast_to_shape(F.reshape(condlist, shape_cond_expanded), shape_broadcasted)
2070    shape_choice_expanded = ((case_num,) + _list_comprehensions(ndim - F.rank(choicelist), 1, True) +
2071                             case_size_choice)
2072    choicelist = _broadcast_to_shape(F.reshape(choicelist, shape_choice_expanded), shape_broadcasted)
2073
2074    slice_start = _list_comprehensions(ndim - 1, 0, True)
2075    slice_size = (1,) + case_size
2076    dtype = F.dtype(choicelist)
2077    if isinstance(default, Tensor):
2078        default_slice = default.astype(F.dtype(choicelist)).reshape(slice_size)
2079    else:
2080        default_slice = F.fill(F.dtype(choicelist), slice_size, default)
2081    for i in range(case_num - 1, -1, -1):
2082        cond_slice = F.tensor_slice(condlist.astype(mstype.float32), (i,) + slice_start, slice_size)
2083        choice_slice = F.tensor_slice(choicelist, (i,) + slice_start, slice_size)
2084        default_slice = F.select(cond_slice.astype(mstype.bool_), choice_slice, default_slice)
2085    return F.reshape(default_slice, (case_size)).astype(dtype)
2086
2087
2088@_primexpr
2089def _get_grid(shape):
2090    """Returns a grid representing all the indices for an array with the given shape."""
2091    grids = []
2092    ndim = len(shape)
2093    for i in range(ndim):
2094        dim_grid = _iota(mstype.int32, shape[i])
2095        dim_shape = _expanded_shape(ndim, shape[i], i)
2096        dim_grid = _broadcast_to_shape(dim_grid.reshape(dim_shape), shape)
2097        grids.append(dim_grid)
2098    return stack(grids, -1)
2099
2100
2101def choose(a, choices, mode='clip'):
2102    """
2103    Construct an array from an index array and a list of arrays to choose from.
2104    Given an "index" array `a` of integers and a sequence of n arrays (choices),
2105    `a` and each choice array are first broadcast, as necessary, to arrays of a
2106    common shape; calling these `Ba` and `Bchoices[i], i = 0,…,n-1` we have that,
2107    necessarily, ``Ba.shape == Bchoices[i].shape`` for each `i`. Then, a new array
2108    with ``shape Ba.shape`` is created as follows:
2109
2110    - if ``mode='raise'`` (the default), then, first of all, each element of `a`
2111      (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that `i`
2112      (in that range) is the value at the `(j0, j1, ..., jm)` position in
2113      `Ba` - then the value at the same position in the new array is the
2114      value in ``Bchoices[i]`` at that same position;
2115
2116    - if ``mode='wrap'``, values in `a` (and thus `Ba`) may be any (signed)
2117      integer; modular arithmetic is used to map integers outside the
2118      range ``[0, n-1]`` back into that range; and then the new array is
2119      constructed as above;
2120
2121    - if ``mode='clip'``, values in `a` (and thus `Ba`) may be any (signed) integer;
2122      negative integers are mapped to 0; values greater than `n-1` are mapped to
2123      `n-1`; and then the new array is constructed as above.
2124
2125    Note:
2126        Numpy argument `out` is not supported.
2127        ``mode = 'raise'`` is not supported, and the default mode is 'clip' instead.
2128
2129    Args:
2130        a (int array): This array must contain integers in ``[0, n-1]``, where `n` is
2131            the number of choices, unless ``mode=wrap`` or ``mode=clip``, in which
2132            cases any integers are permissible.
2133        choices (sequence of arrays): Choice arrays. `a` and all of the `choices` must
2134            be broadcastable to the same shape. If `choices` is itself an array, then
2135            its outermost dimension (i.e., the one corresponding to ``choices.shape[0]``)
2136            is taken as defining the "sequence".
2137        mode ('raise', 'wrap', 'clip', optional): Specifies how indices outside
2138            ``[0, n-1]`` will be treated:
2139
2140            'raise' – raise an error;
2141
2142            'wrap' – wrap around;
2143
2144            'clip' – clip to the range. 'clip' mode means that all indices that are
2145            too large are replaced by the index that addresses the last element
2146            along that axis. Note that this disables indexing with negative numbers.
2147
2148    Returns:
2149        Tensor, the merged result.
2150
2151    Raises:
2152        ValueError: If `a` and any of the `choices` cannot be broadcast.
2153
2154    Supported Platforms:
2155        ``Ascend`` ``GPU`` ``CPU``
2156
2157    Examples:
2158        >>> import mindspore.numpy as np
2159        >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33]]
2160        >>> print(np.choose([2, 3, 1, 0], choices))
2161        [20 31 12  3]
2162        >>> print(np.choose([2, 4, 1, 0], choices, mode='clip'))
2163        [20 31 12  3]
2164        >>> print(np.choose([2, 4, 1, 0], choices, mode='wrap'))
2165        [20  1 12  3]
2166        >>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
2167        >>> choices = [-10, 10]
2168        >>> print(np.choose(a, choices))
2169        [[ 10 -10  10]
2170         [-10  10 -10]
2171         [ 10 -10  10]]
2172    """
2173    a = _to_tensor(a)
2174    if not _check_is_int(F.dtype(a)):
2175        _raise_value_error('`a` should be an int array')
2176    if isinstance(choices, (tuple, list)):
2177        # broadcasts choices to the same shape if choices is a sequence
2178        choices = _to_tensor(*choices)
2179        shapes = ()
2180        for choice in choices:
2181            shapes += (F.shape(choice),)
2182        shape_choice = _infer_out_shape(F.shape(a), *shapes)
2183        tmp = []
2184        for choice in choices:
2185            tmp.append(broadcast_to(choice, shape_choice))
2186        choices = stack(tmp)
2187    else:
2188        choices = _to_tensor(choices)
2189        shape_choice = _infer_out_shape(F.shape(a), F.shape(choices)[1:])
2190        choices = F.reshape(choices, choices.shape[:1] + _add_unit_axes(choices.shape[1:], len(shape_choice)))
2191        choices = broadcast_to(choices, (F.shape(choices)[0],) + shape_choice)
2192
2193    if F.rank(a) == 0 or F.rank(choices) == 0:
2194        _raise_value_error('input cannot be scalars')
2195    a = broadcast_to(a, shape_choice)
2196    a = _check_indices(F.shape(choices)[0], a, mode, allow_negative_index=False)
2197    grid = _get_grid(F.shape(a))
2198    indices = concatenate((a.reshape(F.shape(a) + (1,)), grid), -1)
2199    return F.gather_nd(choices, indices)
2200
2201
2202def size(a, axis=None):
2203    """
2204    Returns the number of elements along a given axis.
2205
2206    Args:
2207        a (Union[int, float, bool, list, tuple, Tensor]): Input data.
2208        axis (int): Axis along which the elements are counted. Default: ``None``.
2209            If None, give the total number of elements.
2210
2211    Returns:
2212        Number of elements along the specified axis.
2213
2214    Supported Platforms:
2215        ``Ascend`` ``GPU`` ``CPU``
2216
2217    Raises:
2218        TypeError: If input is not array_like or `axis` is not int.
2219        ValueError: If any axis is out of range or duplicate axes exist.
2220
2221    Examples:
2222        >>> import mindspore.numpy as np
2223        >>> x = np.arange(10).reshape(2, 5).astype('float32')
2224        >>> print(np.size(x))
2225        10
2226        >>> print(np.size(x, axis=1))
2227        5
2228    """
2229    a = _to_tensor(a)
2230    if axis is None:
2231        return a.size
2232    if not isinstance(axis, int):
2233        _raise_type_error("axis argument should be integer.")
2234    axis = _canonicalize_axis(axis, a.ndim)
2235    return a.shape[axis]
2236
2237
2238def array_str(a):
2239    """
2240    Returns a string representation of the data in an array.
2241
2242    The data in the array is returned as a single string.
2243    This function is similar to array_repr, the difference being that array_repr also
2244    returns information on the kind of array and its data type.
2245
2246    Note:
2247        Numpy argument `max_line_width`, `precision` and `suppress_small` are not supported.
2248        Graph mode does not support the function.
2249
2250    Args:
2251        a (Tensor): Input data.
2252
2253    Returns:
2254        String.
2255
2256    Supported Platforms:
2257        ``Ascend`` ``GPU`` ``CPU``
2258
2259    Raises:
2260        TypeError: If input is not tensor.
2261
2262    Examples:
2263        >>> import mindspore.numpy as np
2264        >>> x = np.arange(5)
2265        >>> np.array_str(x)
2266        '[0 1 2 3 4]'
2267    """
2268    if not isinstance(a, Tensor):
2269        _raise_type_error("Expect input to be tensor.")
2270    return a.__str__()
2271
2272
2273def apply_along_axis(func1d, axis, arr, *args, **kwargs):
2274    """
2275    Applies a function to 1-D slices along the given axis.
2276    Executes ``func1d(a, *args, **kwargs)`` where `func1d` operates on 1-D arrays and `a` is a
2277    1-D slice of arr along axis.
2278
2279    Args:
2280        func1d (function): Maps `(M,) -> (Nj…)`. This function should accept 1-D arrays. It is
2281            applied to 1-D slices of arr along the specified axis.
2282        axis (int): Axis along which arr is sliced.
2283        arr (Tensor): Input array with shape `(Ni…, M, Nk…)`.
2284        args (any): Additional arguments to `func1d`.
2285        kwargs (any): Additional named arguments to `func1d`.
2286
2287    Returns:
2288        Tensor with shape `(Ni…, Nj…, Nk…)`, the output array. Its shape is identical to the
2289        shape of `arr`, except along the `axis` dimension. This axis is removed, and replaced
2290        with new dimensions equal to the shape of the return value of `func1d`. So if `func1d`
2291        returns a scalar, the output will have one fewer dimensions than `arr`.
2292
2293    Supported Platforms:
2294        ``Ascend`` ``GPU`` ``CPU``
2295
2296    Raises:
2297        ValueError: If axis is out of the range.
2298
2299    Examples:
2300        >>> import mindspore.numpy as np
2301        >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
2302        >>> print(np.apply_along_axis(np.diag, -1, b))
2303        [[[1 0 0]
2304        [0 2 0]
2305        [0 0 3]]
2306        [[4 0 0]
2307        [0 5 0]
2308        [0 0 6]]
2309        [[7 0 0]
2310        [0 8 0]
2311        [0 0 9]]]
2312    """
2313    ndim = F.rank(arr)
2314    shape = F.shape(arr)
2315    axis = _check_axis_in_range(axis, ndim)
2316    arr = moveaxis(arr, axis, -1)
2317    arr = F.reshape(arr, (-1, F.shape(arr)[-1]))
2318    slices = []
2319    for i in range(F.shape(arr)[0]):
2320        slices.append(func1d(arr[i], *args, **kwargs))
2321    stacked_slices = stack(slices)
2322    shape_stacked = (_tuple_slice(shape, None, axis) + _tuple_slice(shape, axis + 1, None) +
2323                     _tuple_slice(F.shape(stacked_slices), 1, None))
2324    res = F.reshape(stacked_slices, shape_stacked)
2325
2326    # moves the dimensions returned by `func1d` back to `axis`
2327    ndim_func = F.rank(res) - ndim + 1
2328    if ndim_func >= 1:
2329        res = moveaxis(res, F.make_range(ndim - 1, F.rank(res)),
2330                       F.make_range(axis, axis + ndim_func))
2331    return res
2332
2333
2334def _stack_arrays(arrs):
2335    """Stacks a sequence of Tensor"""
2336    if isinstance(arrs, (tuple, list)):
2337        tensor_list = []
2338        for arr in arrs:
2339            tensor_list.append(_to_tensor(arr))
2340        return stack(tensor_list)
2341    return atleast_1d(_to_tensor(arrs))
2342
2343
2344def piecewise(x, condlist, funclist, *args, **kw):
2345    """
2346    Evaluates a piecewise-defined function.
2347    Given a set of conditions and corresponding functions, evaluate each function on the input
2348    data wherever its condition is true.
2349
2350    Args:
2351        x (Union[int, float, bool, list, tuple, Tensor]): The input domain.
2352        condlist (Union[bool, list of bool Tensor]): Each boolean array corresponds to a
2353            function in `funclist`. Wherever `condlist[i]` is True, `funclist[i](x)` is used as
2354            the output value. Each boolean array in `condlist` selects a piece of `x`, and
2355            should therefore be of the same shape as `x`. The length of `condlist` must
2356            correspond to that of `funclist`. If one extra function is given, i.e. if
2357            ``len(funclist) == len(condlist) + 1``, then that extra function is the default
2358            value, used wherever all conditions are false.
2359        funclist (Union[list of callables, list of scalars]): Each function is evaluated over
2360            `x` wherever its corresponding condition is True. It should take a 1d array as input
2361            and give an 1d array or a scalar value as output. If, instead of a callable, a scalar
2362            is provided then a constant function ``(lambda x: scalar)`` is assumed.
2363        args (any): Any further arguments given to `piecewise` are passed to the functions upon
2364            execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then each function is
2365            called as ``f(x, 1, 'a')``.
2366        kw (any): Keyword arguments used in calling `piecewise` are passed to the functions upon
2367            execution, i.e., if called ``piecewise(..., ..., alpha=1)``, then each function is
2368            called as ``f(x, alpha=1)``.
2369
2370    Returns:
2371        Tensor, the output is the same shape and type as `x` and is found by calling the
2372        functions in `funclist` on the appropriate portions of `x`, as defined by the boolean
2373        arrays in `condlist`. Portions not covered by any condition have a default value of 0.
2374
2375    Supported Platforms:
2376        ``Ascend`` ``GPU`` ``CPU``
2377
2378    Raises:
2379        ValueError: If length of `funclist` is not in ``(len(condlist), len(condlist) + 1)``
2380
2381    Examples:
2382        >>> import mindspore.numpy as np
2383        >>> x = np.linspace(-2.5, 2.5, 6)
2384        >>> print(np.piecewise(x, [x < 0, x >= 0], [-1, 1]))
2385        [-1 -1 -1  1  1  1]
2386    """
2387    x = _to_tensor(x)
2388    choicelist = funclist
2389    if isinstance(funclist, (tuple, list)):
2390        if _callable(x, funclist[0]):
2391            choicelist = []
2392            for func in funclist:
2393                choicelist.append(func(x, *args, **kw))
2394    condlist = _stack_arrays(condlist)
2395    choicelist = _stack_arrays(choicelist)
2396
2397    default = 0
2398    n1 = len(condlist)
2399    n2 = len(funclist)
2400    if n1 + 1 == n2:
2401        default = choicelist[-1]
2402        choicelist = choicelist[:-1]
2403    elif n1 != n2:
2404        _raise_value_error('the number of choices should be either equal to conditions or ', n1 + 1)
2405    return select(condlist, choicelist, default=default)
2406
2407
2408def unravel_index(indices, shape, order='C'):
2409    """
2410    Converts a flat index or array of flat indices into a tuple of coordinate arrays.
2411
2412    Note:
2413        Out-of-bound indices are clipped by the boundaries of `shape` instead of raising
2414        an error.
2415
2416    Args:
2417        indices (Union[int, float, bool, list, tuple, Tensor]): An integer array whose elements
2418            are indices into the flattened version of an array of dimensions shape.
2419        shape (tuple of integers): The shape of the array to use for unraveling indices.
2420        order (Union['C', 'F'], optional): Determines whether the indices should be viewed as
2421            indexing in row-major (C-style) or column-major (Fortran-style) order. Default: ``'C'`` .
2422
2423    Returns:
2424        Tensor, each array in the tuple has the same shape as the indices array.
2425
2426    Supported Platforms:
2427        ``Ascend`` ``GPU`` ``CPU``
2428
2429    Raises:
2430        ValueError: If `order` is not 'C' or 'F'.
2431
2432    Examples:
2433        >>> import mindspore.numpy as np
2434        >>> print(np.unravel_index([22, 41, 37], (7,6)))
2435        (Tensor(shape=[3], dtype=Int32, value= [3, 6, 6]),
2436        Tensor(shape=[3], dtype=Int32, value= [4, 5, 1]))
2437        >>> print(np.unravel_index([31, 41, 13], (7,6), order='F'))
2438        (Tensor(shape=[3], dtype=Int32, value= [3, 6, 6]),
2439        Tensor(shape=[3], dtype=Int32, value= [4, 5, 1]))
2440    """
2441    indices = _to_tensor(indices)
2442    if order not in ('C', 'F'):
2443        _raise_value_error('invalid order. Expected "C" or "F"')
2444    if isinstance(shape, int):
2445        shape = (shape,)
2446    ndim = F.rank(indices)
2447    if order == 'F':
2448        sizes = _cumprod(shape)
2449    else:
2450        sizes = _cumprod(shape[::-1])
2451    sizes = _to_tensor(sizes[::-1] + (1,))
2452    sizes = F.reshape(sizes, (-1,) + _list_comprehensions(ndim, 1, True))
2453    total_size = sizes[0]
2454    indices = where(indices > total_size - 1, total_size - 1, indices)
2455    if _get_device() == 'GPU':
2456        dtype = F.dtype(total_size)
2457        lowerbounds = (-(total_size.astype(mstype.float32))).astype(dtype)
2458    else:
2459        lowerbounds = -total_size
2460    indices = where(indices < lowerbounds, lowerbounds, indices)
2461    res = _mod(indices, sizes[:-1])//sizes[1:]
2462
2463    num = len(res)
2464    if ndim == 0 and num == 1:
2465        return res.ravel()
2466    if order == 'F':
2467        r = range(num - 1, -1, -1)
2468    else:
2469        r = range(num)
2470    subs = ()
2471    for i in r:
2472        subs += (res[i],)
2473    return subs
2474
2475
2476def apply_over_axes(func, a, axes):
2477    """
2478    Applies a function repeatedly over multiple axes.
2479
2480    `func` is called as `res = func(a, axis)`, where `axis` is the first element of `axes`.
2481    The result `res` of the function call must have either the same dimensions as `a` or
2482    one less dimension. If `res` has one less dimension than `a`, a dimension is inserted before `axis`.
2483    The call to `func` is then repeated for each axis in `axes`, with `res` as the first argument.
2484
2485    Args:
2486        func (function): This function must take two arguments, `func(a, axis)`.
2487        a (Union[int, float, bool, list, tuple, Tensor]): Input tensor.
2488        axes (Union[int, list, tuple]): Axes over which `func` is applied; the elements must be integers.
2489
2490    Returns:
2491        Tensor. The number of dimensions is the same as `a`, but the shape can be different.
2492        This depends on whether `func` changes the shape of its output with respect to its input.
2493
2494    Raises:
2495        TypeError: If input `a` is not array_like or `axes` is not int or sequence of ints.
2496        ValueError: If any axis is out of range or duplicate axes exist.
2497
2498    Supported Platforms:
2499        ``Ascend`` ``GPU`` ``CPU``
2500
2501    Examples:
2502        >>> import mindspore.numpy as np
2503        >>> x = np.arange(10).reshape(2, 5).astype('float32')
2504        >>> print(x)
2505        [[0. 1. 2. 3. 4.]
2506         [5. 6. 7. 8. 9.]]
2507        >>> print(np.apply_over_axes(np.sum, x, axes=0))
2508        [[ 5.  7.  9. 11. 13.]]
2509    """
2510    a = _to_tensor(a)
2511    if isinstance(axes, int):
2512        axes = (axes,)
2513    res = a
2514    for axis in axes:
2515        res = func(res, axis=axis)
2516        res = F.expand_dims(res, axis) if res.ndim != a.ndim else res
2517        if res.ndim != a.ndim:
2518            _raise_value_error("function is not returning a tensor of the correct shape")
2519    return res
2520
2521
2522def argwhere(a):
2523    """
2524    Find the indices of Tensor elements that are non-zero, grouped by element.
2525
2526    Args:
2527        a (Union[list, tuple, Tensor]): Input tensor.
2528
2529    Returns:
2530        Tensor. Indices of elements that are non-zero. Indices are grouped by element.
2531        This Tensor will have shape :math:`(N, a.ndim)` where N is the number of non-zero items.
2532
2533    Raises:
2534        TypeError: If input `a` is not array_like.
2535        ValueError: If dim of `a` equals to 0.
2536
2537    Supported Platforms:
2538        ``Ascend`` ``GPU`` ``CPU``
2539
2540    Examples:
2541        >>> import mindspore.numpy as np
2542        >>> x = np.array([[[1, 0], [-5, 0]]])
2543        >>> np.argwhere(x)
2544        Tensor(shape=[2, 3], dtype=Int64, value=[[0, 0, 0], [0, 1, 0]])
2545    """
2546    a = _to_tensor(a)
2547    return F.argwhere(a)
2548
2549
2550def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
2551    """
2552    Find the intersection of two Tensors.
2553    Return the sorted, unique values that are in both of the input Tensors.
2554
2555    Args:
2556        ar1 (Union[int, float, bool, list, tuple, Tensor]): Input tensor.
2557        ar2 (Union[int, float, bool, list, tuple, Tensor]): Input tensor.
2558        assume_unique (bool): If `True`, the input Tensors are assumed to be unique, which can speed up the calculation.
2559                              If `True` but `ar1` or `ar2` are not unique,
2560                              incorrect results and out-of-bounds indices could result.
2561                              Default: ``False``.
2562        return_indices (bool): If `True`, the indices which correspond to the intersection of two Tensors are returned.
2563                               The first instance of a value is used if there are multiple.
2564                               Default: ``False``.
2565
2566    Returns:
2567        Tensor or tuple of Tensors.
2568        If `return_indices` is ``False``, return the intersection tensor, otherwise return tuple of tensors.
2569
2570    Raises:
2571        TypeError: If input `ar1` or `ar2` is not array_like.
2572        TypeError: If `assume_unique` or `return_indices` is not bool.
2573
2574    Supported Platforms:
2575        ``Ascend`` ``GPU`` ``CPU``
2576
2577    Examples:
2578        >>> import mindspore.numpy as np
2579        >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1])
2580        Tensor(shape=[2], dtype=Int32, value=[1, 3])
2581    """
2582    def unique_w_ind(arr):
2583        array, sort_indices = arr.ravel().sort()
2584        array_type = array.dtype
2585        cmp_array1 = F.cat((array, Tensor([0], dtype=array_type)))
2586        cmp_array2 = F.cat((Tensor([0], dtype=array_type), array))
2587        mask = cmp_array1 != cmp_array2
2588        mask[0] = True
2589        array = F.masked_select(array, mask[:-1])
2590        ind = F.masked_select(sort_indices, mask[:-1])
2591        return array, ind
2592
2593    if not isinstance(assume_unique, bool) or not isinstance(return_indices, bool):
2594        _raise_type_error("assume_unique or return_indices is not bool type.")
2595    ar1, ar2 = _to_tensor(ar1, ar2)
2596    ind1 = F.fill(mstype.int32, (ar1.size,), -1)
2597    ind2 = F.fill(mstype.int32, (ar2.size,), -1)
2598    if not assume_unique:
2599        if return_indices:
2600            array1, ind1 = unique_w_ind(ar1)
2601            array2, ind2 = unique_w_ind(ar2)
2602        else:
2603            array1 = F.unique(ar1)[0]
2604            array2 = F.unique(ar2)[0]
2605    else:
2606        array1 = ar1.ravel()
2607        array2 = ar2.ravel()
2608    concat_array = concatenate((array1, array2))
2609    if return_indices:
2610        concat_sort_indices = F.argsort(concat_array)
2611        concat_array = concat_array[concat_sort_indices]
2612    else:
2613        concat_array, concat_sort_indices = concat_array.sort()
2614
2615    mask_res = concat_array[1:] == concat_array[:-1]
2616    res = F.masked_select(concat_array[1:], mask_res)
2617
2618    if return_indices:
2619        ar1_indices = F.masked_select(concat_sort_indices[:-1], mask_res)
2620        ar2_indices = F.masked_select(concat_sort_indices[1:], mask_res) - array1.size
2621        if not assume_unique:
2622            ar1_indices = ind1.index_select(0, ar1_indices)
2623            ar2_indices = ind2.index_select(0, ar2_indices)
2624        return res, ar1_indices, ar2_indices
2625    return res
2626