• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2020-2021 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15"""array operations, the function docs are adapted from Numpy API."""
16import operator
17
18from ..common import dtype as mstype
19from ..common import Tensor
20from ..ops import operations as P
21from ..ops import functional as F
22from ..ops.primitive import constexpr
23from ..nn import Cell
24
25from .utils import _convert_list_tensor_to_tuple_tensor, _expand, _broadcast_to_shape, \
26    _check_input_tensor, _broadcast_to, _to_tensor, _callable
27from .utils_const import _check_axes_range, _check_start_normalize, \
28    _raise_type_error, _raise_value_error, _infer_out_shape, _empty, _promote, \
29    _check_same_type, _check_axis_valid, _add_unit_axes, _broadcast_tuples, \
30    _check_is_float, _check_axis_in_range, _check_axis_type, _canonicalize_axis, \
31    _list_comprehensions, _check_element_int, _is_shape_empty, _type_convert, \
32    _tuple_slice, _expanded_shape, _seq_prod, _tuple_setitem, _iota, \
33    _raise_unimplemented_error, _cumprod, _get_device, _check_is_int
34
35# According to official numpy reference, the dimension of a numpy array must be less
36# than 32
37MAX_NUMPY_DIMS = 32
38
39
40def expand_dims(a, axis):
41    """
42    Expands the shape of a tensor.
43
44    Inserts a new axis that will appear at the axis position in the expanded tensor shape.
45
46    Args:
47        a (Tensor): Input tensor array.
48        axis (Union[int, list(int), tuple(int)]): Position in the expanded axes where
49            the new axis is placed,
50
51    Returns:
52        Tensor, with the number of dimensions increased at specified axis.
53
54    Raises:
55        TypeError: If input arguments have types not specified above.
56        ValueError: If axis exceeds a.ndim.
57
58    Supported Platforms:
59        ``Ascend`` ``GPU`` ``CPU``
60
61    Examples:
62        >>> import mindspore.numpy as np
63        >>> x = np.ones((2,2))
64        >>> x = np.expand_dims(x,0)
65        >>> print(x.shape)
66        (1, 2, 2)
67    """
68    _check_input_tensor(a)
69    if not isinstance(axis, (int, tuple, list)):
70        _raise_type_error("axis must be tuple, list or int, but got ", axis)
71    if isinstance(axis, int):
72        return F.expand_dims(a, axis)
73    ndim = a.ndim + len(axis)
74    axis = _canonicalize_axis(axis, ndim)
75    for ax in axis:
76        a = F.expand_dims(a, ax)
77    return a
78
79
80def squeeze(a, axis=None):
81    """
82    Removes single-dimensional entries from the shape of a tensor.
83
84    Args:
85        a (Tensor): Input tensor array.
86        axis (Union[None, int, list(int), tuple(list)]): The axis(axes) to squeeze,
87            default is None.
88
89    Returns:
90        Tensor, with all or a subset of the dimensions of length :math:`1` removed.
91
92    Raises:
93        TypeError: If input arguments have types not specified above.
94        ValueError: If specified axis has shape entry :math:`> 1`.
95
96    Supported Platforms:
97        ``Ascend`` ``GPU`` ``CPU``
98
99    Examples:
100        >>> import mindspore.numpy as np
101        >>> x = np.ones((1,2,2,1))
102        >>> x = np.squeeze(x)
103        >>> print(x.shape)
104        (2, 2)
105    """
106    _check_input_tensor(a)
107    return a.squeeze(axis)
108
109
110def transpose(a, axes=None):
111    """
112    Reverses or permutes the axes of a tensor; returns the modified tensor.
113
114    Args:
115        a (Tensor): a tensor to be transposed
116        axes (Union[None, tuple, list]): the axes order, if `axes` is `None`, transpose
117            the entire tensor. Default is `None`.
118
119    Returns:
120        Tensor, the transposed tensor array.
121
122    Raises:
123        TypeError: If input arguments have types not specified above.
124        ValueError: If the number of `axes` is not euqal to a.ndim.
125
126    Supported Platforms:
127        ``Ascend`` ``GPU`` ``CPU``
128
129    Examples:
130        >>> import mindspore.numpy as np
131        >>> x = np.ones((1,2,3))
132        >>> x = np.transpose(x)
133        >>> print(x.shape)
134        (3, 2, 1)
135    """
136    _check_input_tensor(a)
137    return a.transpose(axes)
138
139
140def rollaxis(x, axis, start=0):
141    """
142    Rolls the specified axis backwards, until it lies in the given position.
143    The positions of the other axes do not change relative to one another.
144
145    Args:
146        x (Tensor): A Tensor to be transposed.
147        axis (int): The axis to be rolled.
148        start (int): Default: 0.
149            If :math:`start <= axis`, the axis is rolled back until it lies in this position (`start`).
150            If :math:`start > axis`: the axis is rolled until it lies before this position (`start`).
151            If :math:`start < 0`, the start will be normalized as a non-negative number (more details
152            can be seen in the source code.)
153
154            .. table
155                +===========+=================+
156                |start      |Normalized start |
157                +===========+=================+
158                |-(x.ndim+1)| raise ValueError|
159                +-----------+-----------------+
160                |-x.ndim    |0                |
161                +-----------+-----------------+
162                |...        |...              |
163                +-----------+-----------------+
164                |-1         |x.ndim-1         |
165                +-----------+-----------------+
166                |...        |...              |
167                +-----------+-----------------+
168                |x.ndim     |x.ndim           |
169                +-----------+-----------------+
170                |x.ndim+1   |raise ValueError |
171                +===========+=================+
172            ..
173
174    Returns:
175        Transposed Tensor. Has the same data type as the original tensor `x`.
176
177    Supported Platforms:
178        ``Ascend`` ``GPU`` ``CPU``
179
180    Raises:
181        TypeError: If `axis` or `start` is not integer, or `x` is not tensor.
182        ValueError: If `axis` is not in the range of :math:`[-ndim, ndim-1]` or
183            `start` is not in the range of :math:`[-ndim, ndim]`.
184
185    Examples:
186        >>> import mindspore.numpy as np
187        >>> x = np.ones((2,3,4))
188        >>> output = np.rollaxis(x, 0, 2)
189        >>> print(output.shape)
190        (3, 2, 4)
191    """
192    _check_input_tensor(x)
193    if not isinstance(axis, int):
194        _raise_type_error("integer argument expected, but got ", axis)
195    if not isinstance(start, int):
196        _raise_type_error("integer argument expected, but got ", start)
197
198    shape = F.shape(x)
199    ndim = F.tuple_len(shape)
200
201    axis = _check_axes_range(axis, ndim)
202    start = _check_start_normalize(start, ndim)
203    if start - axis >= 0 and start - axis <= 1:
204        return x
205    perm = F.make_range(0, ndim)
206    new_perm = None
207    if start < axis:
208        if axis + 1 < ndim:
209            new_perm = perm[0:start] + perm[axis:axis+1] + \
210                perm[start:axis] + perm[axis+1:]
211        else:
212            new_perm = perm[0:start] + perm[axis:axis+1] + perm[start:axis]
213    if start > axis:
214        if start < ndim:
215            new_perm = perm[0:axis] + perm[axis+1:start] + \
216                perm[axis:axis+1] + perm[start:]
217        else:
218            new_perm = perm[0:axis] + perm[axis+1:start] + \
219                perm[axis:axis+1]
220
221    return F.transpose(x, new_perm)
222
223
224def swapaxes(x, axis1, axis2):
225    """
226    Interchanges two axes of a tensor.
227
228    Args:
229        x (Tensor): A tensor to be transposed.
230        axis1 (int): First axis.
231        axis2 (int): Second axis.
232
233    Returns:
234        Transposed tensor, has the same data type as the original tensor `x`.
235
236    Raises:
237        TypeError: If `axis1` or `axis2` is not integer, or `x` is not tensor.
238        ValueError: If `axis1` or `axis2` is not in the range of :math:`[-ndim, ndim-1]`.
239
240    Supported Platforms:
241        ``Ascend`` ``GPU`` ``CPU``
242
243    Examples:
244        >>> import mindspore.numpy as np
245        >>> x = np.ones((2,3,4))
246        >>> output = np.swapaxes(x, 0, 2)
247        >>> print(output.shape)
248        (4,3,2)
249    """
250    _check_input_tensor(x)
251    return x.swapaxes(axis1, axis2)
252
253
254def reshape(x, new_shape):
255    """
256    Reshapes a tensor without changing its data.
257
258    Args:
259        x (Tensor): A tensor to be reshaped.
260        new_shape (Union[int, list(int), tuple(int)]): The new shape should be
261            compatible with the original shape. If the tuple has only one element,
262            the result will be a 1-D tensor of that length. One shape dimension
263            can be :math:`-1`. In this case, the value is inferred from the length of
264            the tensor and remaining dimensions.
265
266    Returns:
267        Reshaped Tensor. Has the same data type as the original tensor `x`.
268
269    Raises:
270        TypeError: If new_shape is not integer, list or tuple, or `x` is not tensor.
271        ValueError: If new_shape is not compatible with the original shape.
272
273    Supported Platforms:
274        ``Ascend`` ``GPU`` ``CPU``
275
276    Examples:
277        >>> import mindspore.numpy as np
278        >>> x = np.asarray([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]])
279        >>> output = np.reshape(x, (3, 2))
280        >>> print(output)
281        [[-0.1  0.3]
282         [ 3.6  0.4]
283         [ 0.5 -3.2]]
284        >>> output = np.reshape(x, (3, -1))
285        >>> print(output)
286        [[-0.1  0.3]
287         [ 3.6  0.4]
288         [ 0.5 -3.2]]
289        >>> output = np.reshape(x, (6, ))
290        >>> print(output)
291        [-0.1  0.3  3.6  0.4  0.5 -3.2]
292    """
293    _check_input_tensor(x)
294    return x.reshape(new_shape)
295
296
297def ravel(x):
298    """
299    Returns a contiguous flattened tensor.
300
301    A 1-D tensor, containing the elements of the input, is returned.
302
303    Args:
304        x (Tensor): A tensor to be flattened.
305
306    Returns:
307        Flattened tensor, has the same data type as the original tensor `x`.
308
309    Raises:
310        TypeError: If `x` is not tensor.
311
312    Supported Platforms:
313        ``Ascend`` ``GPU`` ``CPU``
314
315    Examples:
316        >>> import mindspore.numpy as np
317        >>> x = np.ones((2,3,4))
318        >>> output = np.ravel(x)
319        >>> print(output.shape)
320        (24,)
321    """
322    _check_input_tensor(x)
323    return x.ravel()
324
325
326@constexpr
327def _move_axes_for_concatenate(arr_shape, axis):
328    """
329    Moves axis 0 to the desiganated position, while keeps other axes' relative
330    positions unchanged, only used if a single tensor is concatenated.
331    """
332
333    original_axes = tuple(range(len(arr_shape)))
334    new_axes = original_axes[1:axis+1] + (0,) + original_axes[axis+1:]
335    new_shape = arr_shape[1:axis+1] + (arr_shape[0] * arr_shape[axis+1],) + \
336        arr_shape[axis+2:]
337    return new_axes, new_shape
338
339
340def _promote_type_for_concatenate(tuple_of_tensors):
341    """
342    Checks dtype for all tensors in the tuple. If dtypes are not the same, promote
343    them to the `highest` dtype in the tuple, so that they are ready for the concat
344    operator.
345
346    Args:
347        tuple_of_tensors(tuple(tensor)): A tuple of tensors
348
349    Returns:
350        tuple of tensors, with each tensor promoted to ths same dtype.
351    """
352    need_cast = False
353    final_type = tuple_of_tensors[0].dtype
354
355    for tensor in tuple_of_tensors:
356        if not _check_same_type(final_type, tensor.dtype):
357            need_cast = True
358        final_type = _promote(final_type, tensor.dtype)
359
360    if not need_cast:
361        return tuple_of_tensors
362    tuple_of_casted_tensors = ()
363    for tensor in tuple_of_tensors:
364        tuple_of_casted_tensors += (tensor.astype(final_type, copy=False),)
365    return tuple_of_casted_tensors
366
367
368def concatenate(arrays, axis=0):
369    """
370    Joins a sequence of tensors along an existing axis.
371
372    Note:
373        To match Numpy behaviour, :math:`axis >= 32` will not cause value error, the
374        `axis` will be treated as :class:`None` instead.
375
376    Args:
377        arrays (Union[Tensor, tuple(Tensor), list(Tensor)]): a tensor or a list
378            of tensors to be concatenated.
379        axis (Union[None, int], optional): The axis along which the tensors will be joined,
380            if `axis` is :class:`None`, tensors are flattened before use. Default is 0.
381
382    Returns:
383        A tensor concatenated from a tensor or a list of tensors.
384
385    Raises:
386        TypeError: If input arguments have types not specified above.
387        ValueError: If `axis` is not in the range of :math:`[-ndim, ndim-1]`, and less than 32.
388
389    Supported Platforms:
390        ``Ascend`` ``GPU`` ``CPU``
391
392    Examples:
393        >>> import mindspore.numpy as np
394        >>> x1 = np.ones((1,2,3))
395        >>> x2 = np.ones((1,2,1))
396        >>> x = np.concatenate((x1, x2), axis=-1)
397        >>> print(x.shape)
398        (1, 2, 4)
399    """
400    if isinstance(arrays, Tensor):
401        # if only one tensor is provided, it is treated as a tuple along the
402        # first dimension. For example, a tensor of shape (3,4,5) will be treated
403        # as: tuple(tensor_1(4,5), tensor_2(4,5), tensor_3(4,5))
404        if axis is None or axis >= MAX_NUMPY_DIMS:
405            return ravel(arrays)
406        arr_shape = F.shape(arrays)
407        _check_axes_range((axis,), len(arr_shape))
408        # move axis 0 to the disiganated position, while keep other axes' relative
409        # positions unchanged
410        new_axes, new_shape = _move_axes_for_concatenate(arr_shape, axis)
411        arrays = transpose(arrays, new_axes)
412        arrays = reshape(arrays, new_shape)
413        return arrays
414
415    flattened_arrays = ()
416    if axis is None or axis >= MAX_NUMPY_DIMS:
417        for arr in arrays:
418            flattened_arrays += (ravel(arr),)
419        axis = -1
420        flattened_arrays = _promote_type_for_concatenate(flattened_arrays)
421        return P.Concat(axis)(flattened_arrays)
422
423    # convert a list of tensor to a tuple of tensor
424    arrays = _convert_list_tensor_to_tuple_tensor(arrays)
425
426    arr_shape = F.shape(arrays[0])
427    _check_axes_range((axis,), len(arr_shape))
428
429    # if only one tensor in the tuple/list, return the tensor itself
430    if len(arrays) == 1:
431        return arrays[0]
432
433    arrays = _promote_type_for_concatenate(arrays)
434    return P.Concat(axis)(arrays)
435
436
437def append(arr, values, axis=None):
438    """
439    Appends values to the end of a tensor.
440
441    Args:
442        arr (Tensor): Values are appended to a copy of this tensor.
443        values (Tensor): These values are appended to a copy of `arr`. It must be of
444            the correct shape (the same shape as `arr`, excluding `axis`). If `axis` is
445            not specified, `values` can be any shape and will be flattened before use.
446        axis (None, int, optional): The `axis` along which values are appended. If `axis` is not
447            given, both `arr` and `values` are flattened before use, default is :class:`None`.
448
449    Returns:
450        Tensor, a copy of tensor with values appended to axis.
451
452    Raises:
453        TypeError: If input arguments have types not specified above.
454        ValueError: If specified axis exceeds `arr.ndim`.
455
456    Supported Platforms:
457        ``Ascend`` ``GPU`` ``CPU``
458
459    Examples:
460        >>> import mindspore.numpy as np
461        >>> a = np.ones((2, 3))
462        >>> b = np.ones((2, 1))
463        >>> print(np.append(a, b, axis=1).shape)
464        (2, 4)
465    """
466    _check_input_tensor(arr)
467    _check_input_tensor(values)
468    if axis is None:
469        arr = arr.ravel()
470        values = values.ravel()
471    else:
472        _check_axis_in_range(axis, arr.ndim)
473    if F.rank(arr) != F.rank(values):
474        _raise_value_error("all tensors must have same number of dimensions")
475    return concatenate((arr, values), axis)
476
477
478def column_stack(tup):
479    """
480    Stacks 1-D tensors as columns into a 2-D tensor. 2-D tensors are stacked as-is,
481    like np.hstack.
482
483    Args:
484        tup (Union[Tensor, tuple, list]): A sequence of 1-D or 2-D tensors. All
485            of them must have the same shape except the axis to be concatenated.
486
487    Returns:
488        2-D Tensor, formed by stacking the given tensors.
489
490    Supported Platforms:
491        ``Ascend`` ``GPU`` ``CPU``
492
493    Raises:
494        TypeError: If `tup` is not Tensor, list or tuple.
495        ValueError: If `tup` is empty.
496
497    Examples:
498        >>> import mindspore.numpy as np
499        >>> x1 = np.array([1, 2, 3]).astype('int32')
500        >>> x2 = np.array([4, 5, 6]).astype('int32')
501        >>> output = np.column_stack((x1, x2))
502        >>> print(output)
503        [[1 4]
504         [2 5]
505         [3 6]]
506    """
507    if isinstance(tup, Tensor):
508        return tup
509    if not isinstance(tup, (list, tuple)):
510        _raise_type_error("Tensor or, list or tuple of tensors are required, but got ", tup)
511
512    trans_tup = ()
513    for tensor in tup:
514        if tensor.ndim < 1:
515            tensor = F.expand_dims(tensor, 0)
516        if tensor.ndim == 1:
517            tensor = F.expand_dims(tensor, 1)
518        trans_tup += (tensor,)
519    if not trans_tup:
520        _raise_value_error("Need at least one tensor to concatenate.")
521    return P.Concat(1)(trans_tup)
522
523
524def vstack(tup):
525    """
526    Stacks tensors in sequence vertically.
527    This is equivalent to concatenation along the first axis. 1-D tensors should firstly be reshaped to `(1, N)`,
528    and then be concatenated along the first axis.
529
530    Args:
531        tup (Union[Tensor, tuple, list]): A sequence of 1-D or 2-D tensors. The tensors must have the same shape
532            along all but the first axis. 1-D tensors must have the same shape.
533
534    Returns:
535        Stacked Tensor, formed by stacking the given tensors.
536
537    Supported Platforms:
538        ``Ascend`` ``GPU`` ``CPU``
539
540    Raises:
541        TypeError: If `tup` is not Tensor, list or tuple.
542        ValueError: If `tup` is empty.
543
544    Examples:
545        >>> import mindspore.numpy as np
546        >>> x1 = np.array([1, 2, 3]).astype('int32')
547        >>> x2 = np.array([4, 5, 6]).astype('int32')
548        >>> output = np.vstack((x1, x2))
549        >>> print(output)
550        [[1 2 3]
551         [4 5 6]]
552    """
553    if isinstance(tup, Tensor):
554        return tup
555    if not isinstance(tup, (list, tuple)):
556        _raise_type_error("Tensor or, list or tuple of tensors are required, but got", tup)
557
558    trans_tup = ()
559    for tensor in tup:
560        if tensor.ndim <= 1:
561            tensor = _expand(tensor, 2, 0)
562        trans_tup += (tensor,)
563    if not trans_tup:
564        _raise_value_error("Need at least one tensor to concatenate.")
565    return P.Concat(0)(trans_tup)
566
567
568def hstack(tup):
569    """
570    Stacks tensors in sequence horizontally.
571    This is equivalent to concatenation along the second axis, except for 1-D tensors
572    where it concatenates along the first axis.
573
574    Args:
575        tup (Union[Tensor, tuple, list]): A sequence of 1-D or 2-D tensors. The
576            tensors must have the same shape along all but the second axis, except
577            1-D tensors which can be any length.
578
579    Returns:
580        Stacked Tensor, formed by stacking the given tensors.
581
582    Supported Platforms:
583        ``Ascend`` ``GPU`` ``CPU``
584
585    Raises:
586        TypeError: If `tup` is not Tensor, list or tuple.
587        ValueError: If `tup` is empty.
588
589    Examples:
590        >>> import mindspore.numpy as np
591        >>> x1 = np.array([1, 2, 3]).astype('float32')
592        >>> x2 = np.array([4, 5, 6]).astype('float32')
593        >>> output = np.hstack((x1, x2))
594        >>> print(output)
595        [1. 2. 3. 4. 5. 6.]
596    """
597    if isinstance(tup, Tensor):
598        return tup
599    if not isinstance(tup, (list, tuple)):
600        _raise_type_error("Tensor or, list or tuple of tensors are required, but got", tup)
601
602    tuple_of_tensor = ()
603    for tensor in tup:
604        if tensor.ndim < 1:
605            tensor = F.expand_dims(tensor, 0)
606        tuple_of_tensor += (tensor,)
607    if not tuple_of_tensor:
608        _raise_value_error("Need at least one tensor to concatenate.")
609    if tuple_of_tensor[0].ndim <= 1:
610        return P.Concat(0)(tuple_of_tensor)
611    return P.Concat(1)(tuple_of_tensor)
612
613
614def dstack(tup):
615    """
616    Stacks tensors in sequence depth wise (along the third axis).
617    This is equivalent to concatenation along the third axis. 1-D tensors :math:`(N,)` should be
618    reshaped to :math:`(1,N,1)`.
619    2-D tensors :math:`(M,N)` should be reshaped to :math:`(M,N,1)` before concatenation.
620
621    Args:
622        tup (Union[Tensor, tuple, list]): A sequence of tensors. The tensors must have the same shape along all but
623            the third axis. 1-D or 2-D tensors must have the same shape.
624
625    Returns:
626        Stacked Tensor, formed by stacking the given tensors.
627
628    Supported Platforms:
629        ``Ascend`` ``GPU`` ``CPU``
630
631    Raises:
632        TypeError: If `tup` is not Tensor, list or tuple.
633        ValueError: If `tup` is empty.
634
635    Examples:
636        >>> import mindspore.numpy as np
637        >>> x1 = np.array([1, 2, 3]).astype('float32')
638        >>> x2 = np.array([4, 5, 6]).astype('float32')
639        >>> output = np.dstack((x1, x2))
640        >>> print(output)
641        [[[1. 4.]
642          [2. 5.]
643          [3. 6.]]]
644    """
645    if isinstance(tup, Tensor):
646        return tup
647    if not isinstance(tup, (list, tuple)):
648        _raise_type_error("Tensor or list or tuple of tensors are required, but got", tup)
649
650    trans_tup = ()
651    for tensor in tup:
652        if tensor.ndim <= 1:
653            tensor = _expand(tensor, 2, 0)
654        if tensor.ndim == 2:
655            tensor = F.expand_dims(tensor, 2)
656        trans_tup += (tensor,)
657    if not trans_tup:
658        _raise_value_error("Need at least one tensor to concatenate.")
659    return P.Concat(2)(trans_tup)
660
661
662def where(condition, x=None, y=None):
663    """
664    Returns elements chosen from `x` or `y` depending on `condition`.
665
666    Note:
667        As nonzero is not supported, both `x` and `y` must be provided Tensor
668    input.
669
670    Args:
671        condition (Tensor): where True, yield `x`, otherwise yield `y`.
672        x (Tensor): Values from which to choose. Defaults to None.
673        y (Tensor): Values from which to choose. `x`, `y` and `condition` need
674            to be broadcastable to some shape. Defaults to None.
675
676    Returns:
677        Tensor or scalar, with elements from `x` where `condition` is True, and
678        elements from `y` elsewhere.
679
680    Raises:
681        ValueError: if operands cannot be broadcast.
682
683    Supported Platforms:
684        ``Ascend`` ``GPU`` ``CPU``
685
686    Examples:
687        >>> import mindspore.numpy as np
688        >>> condition = np.full((1, 1, 2), [False, True])
689        >>> x = np.full((1, 3, 2), 5)
690        >>> y = np.full((2, 1, 1), 7)
691        >>> output = np.where(condition, x, y)
692        >>> print(output)
693        [[[7 5]
694        [7 5]
695        [7 5]]
696        [[7 5]
697        [7 5]
698        [7 5]]]
699    """
700    condition, x, y = _to_tensor(condition, x, y)
701    # type promotes input tensors
702    dtype1 = F.dtype(x)
703    dtype2 = F.dtype(y)
704    dtype = _promote(dtype1, dtype2)
705    if not _check_same_type(dtype1, dtype):
706        x = F.cast(x, dtype)
707    if not _check_same_type(dtype2, dtype):
708        y = F.cast(y, dtype)
709    is_bool = _check_same_type(dtype1, mstype.bool_) and _check_same_type(
710        dtype2, mstype.bool_)
711    if is_bool:
712        # select does not support bool type for x or y
713        x = F.cast(x, mstype.float32)
714        y = F.cast(y, mstype.float32)
715
716    # broadcasts input tensors
717    shape_out = _infer_out_shape(F.shape(condition),
718                                 F.shape(x), F.shape(y))
719    if not _check_same_type(F.dtype(condition), mstype.float32):
720        # tiling with bool is not supported on GPU
721        condition = F.cast(condition, mstype.float32)
722    condition = _broadcast_to_shape(condition, shape_out)
723    x = _broadcast_to_shape(x, shape_out)
724    y = _broadcast_to_shape(y, shape_out)
725
726    if not _check_same_type(F.dtype(condition), mstype.bool_):
727        condition = F.cast(condition, mstype.bool_)
728    res = F.select(condition, x, y)
729    if is_bool:
730        res = F.cast(res, mstype.bool_)
731    return res
732
733
734def _atleast_xd(ndim, arys):
735    """Returns arys with at least ndim."""
736    _check_input_tensor(*arys)
737    res = []
738    for arr in arys:
739        arr = _expand(arr, ndim)
740        res.append(arr)
741    if len(res) == 1:
742        return res[0]
743    return res
744
745
746def atleast_1d(*arys):
747    """
748    Converts inputs to arrays with at least one dimension.
749
750    Scalar inputs are converted to 1-dimensional arrays, whilst
751    higher-dimensional inputs are preserved.
752
753    Note:
754        In graph mode, returns a tuple of tensor instead of a list of
755        tensors.
756
757    Args:
758        *arys (Tensor): one or more input tensors.
759
760    Returns:
761        Tensor, or list of tensors, each with ``a.ndim >= 1``.
762
763    Raises:
764        TypeError: if the input is not a tensor.
765
766    Supported Platforms:
767        ``Ascend`` ``GPU`` ``CPU``
768
769    Examples:
770        >>> import mindspore.numpy as np
771        >>> a = np.ones((2, 3))
772        >>> b = np.ones(())
773        >>> c = np.ones(5)
774        >>> output = np.atleast_1d(a, b, c)
775        >>> print(output)
776            [Tensor(shape=[2, 3], dtype=Float32, value=
777            [[1.00000000e+00, 1.00000000e+00, 1.00000000e+00],
778            [1.00000000e+00, 1.00000000e+00, 1.00000000e+00]]),
779            Tensor(shape=[1], dtype=Float32, value= [1.00000000e+00]),
780            Tensor(shape=[5], dtype=Float32,
781            value= [1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
782            1.00000000e+00, 1.00000000e+00])]
783    """
784    return _atleast_xd(1, arys)
785
786
787def atleast_2d(*arys):
788    """
789    Reshapes inputs as arrays with at least two dimensions.
790
791    Note:
792        In graph mode, returns a tuple of tensor instead of a list of
793        tensors.
794    Args:
795        *arys (Tensor): one or more input tensors.
796
797    Returns:
798        Tensor, or list of tensors, each with ``a.ndim >= 2``.
799
800    Raises:
801        TypeError: if the input is not a tensor.
802
803    Supported Platforms:
804        ``Ascend`` ``GPU`` ``CPU``
805
806    Examples:
807        >>> import mindspore.numpy as np
808        >>> a = np.ones((2, 3))
809        >>> b = np.ones(())
810        >>> c = np.ones(5)
811        >>> output = np.atleast_2d(a, b, c)
812        >>> print(output)
813            [Tensor(shape=[2, 3], dtype=Float32, value=
814            [[1.00000000e+00, 1.00000000e+00, 1.00000000e+00],
815            [1.00000000e+00, 1.00000000e+00, 1.00000000e+00]]),
816            Tensor(shape=[1, 1], dtype=Float32, value= [[1.00000000e+00]]),
817            Tensor(shape=[1, 5], dtype=Float32,
818            value= [[1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
819            1.00000000e+00, 1.00000000e+00]])]
820    """
821    return _atleast_xd(2, arys)
822
823
824def atleast_3d(*arys):
825    """
826    Reshapes inputs as arrays with at least three dimensions.
827
828    Note:
829        In graph mode, returns a tuple of tensor instead of a list of
830        tensors.
831
832    Args:
833        *arys (Tensor): one or more input tensors.
834
835    Returns:
836        Tensor, or list of tensors, each with ``a.ndim >= 3``. For example,
837        a 1-D array of shape `(N,)` becomes a tensor of shape `(1, N, 1)`, and
838        a 2-D array of shape `(M, N)` becomes a tensor of shape `(M, N, 1)`.
839
840    Raises:
841        TypeError: if the input is not a tensor.
842
843    Supported Platforms:
844        ``Ascend`` ``GPU`` ``CPU``
845
846    Examples:
847        >>> import mindspore.numpy as np
848        >>> a = np.ones((2, 3))
849        >>> b = np.ones(())
850        >>> c = np.ones(5)
851        >>> output = np.atleast_3d(a, b, c)
852        >>> print(output)
853            [Tensor(shape=[2, 3, 1], dtype=Float32, value=
854            [[[1.00000000e+00], [1.00000000e+00], [1.00000000e+00]],
855            [[1.00000000e+00], [1.00000000e+00], [1.00000000e+00]]]),
856            Tensor(shape=[1, 1, 1], dtype=Float32, value= [[[1.00000000e+00]]]),
857            Tensor(shape=[1, 5, 1], dtype=Float32,
858            value= [[[1.00000000e+00], [1.00000000e+00], [1.00000000e+00],
859            [1.00000000e+00], [1.00000000e+00]]])]
860    """
861    res = []
862    for arr in arys:
863        ndim = F.rank(arr)
864        if ndim == 0:
865            arr = F.reshape(arr, (1, 1, 1))
866        elif ndim == 1:
867            arr = F.reshape(arr, (1, F.size(arr), 1))
868        elif ndim == 2:
869            arr = F.reshape(arr, F.shape(arr) + (1,))
870        res.append(arr)
871    if len(res) == 1:
872        return res[0]
873    return res
874
875
876def stack(arrays, axis=0):
877    """
878    Joins a sequence of arrays along a new axis.
879
880    The `axis` parameter specifies the index of the new axis in the
881    dimensions of the result. For example, if ``axis=0`` it will be the
882    first dimension and if ``axis=-1`` it will be the last dimension.
883
884    Note:
885        Numpy argument out is not supported.
886
887    Args:
888        arrays (sequence of Tensor): Each array must have the same shape.
889        axis (int, optional): The axis in the result array along which the
890            input arrays are stacked. Default: 0.
891
892    Returns:
893        Tensor, The stacked array has one more dimension than the input
894        arrays.
895
896    Raises:
897        ValueError: if input is not Tensor, tuple, or list.
898
899    Supported Platforms:
900        ``Ascend`` ``GPU`` ``CPU``
901
902    Examples:
903        >>> import mindspore.numpy as np
904        >>> arrays = [np.ones((3, 4)) for _ in range(10)]
905        >>> output = np.stack(arrays, axis=0)
906        >>> print(output.shape)
907        (10, 3, 4)
908        >>> output = np.stack(arrays, axis=1)
909        >>> print(output.shape)
910        (3, 10, 4)
911        >>> output = np.stack(arrays, axis=2)
912        >>> print(output.shape)
913        (3, 4, 10)
914    """
915
916    if isinstance(arrays, Tensor):
917        shape = F.shape(arrays)
918        ndim = F.rank(arrays)
919        axis = axis % ndim
920        axes = F.make_range(ndim)
921        perm = axes[1:axis+1] + (0,) + axes[axis+1:]
922        if _is_shape_empty(shape):
923            return _empty(mstype.float32, shape[1:axis+1] + (shape[0],) + shape[axis+1:])
924        return transpose(arrays, perm)
925
926    if isinstance(arrays, (list, tuple)):
927        shape = (len(arrays),) + F.shape(arrays[0])
928        ndim = len(shape)
929        axis = axis % ndim
930        if _is_shape_empty(shape):
931            return _empty(mstype.float32, shape[1:axis+1] + (shape[0],) + shape[axis+1:])
932        seq = ()
933        for arr in arrays:
934            seq += (F.expand_dims(arr, axis),)
935        return concatenate(seq, axis)
936    return _raise_value_error('input arrays must be Tensor, tuple, or list')
937
938
939class UniqueNet(Cell):
940    """The operation is wrapped inside a model. """
941
942    def __init__(self):
943        super(UniqueNet, self).__init__()
944        self.unique = P.Unique()
945
946    def construct(self, x):
947        return self.unique(x)
948
949
950def unique(x, return_inverse=False):
951    """
952    Finds the unique elements of a tensor. The input tensor will be flattened first
953    when it has more than one dimension.
954
955    Note:
956        Numpy arguments `axis`, `return_index` and `return_counts` are not supported.
957        On CPU, this operator must be executed in graph mode.
958
959    Args:
960        x (Tensor): The input tensor to be processed.
961        return_inverse (bool): If `True`, also return the indices of the unique tensor.
962            Default: `False`.
963
964    Returns:
965        Tensor or tuple of Tensors.
966        - If `return_inverse` is `False`, just return the unique tensor.
967        - If `return_inverse` is `True`, return tuple of tensors.
968
969    Supported Platforms:
970        ``Ascend`` ``GPU`` ``CPU``
971
972    Raises:
973        TypeError: If `x` is not tensor.
974
975    Examples:
976        >>> import mindspore.numpy as np
977        >>> from mindspore import context
978        >>> context.set_context(mode=context.GRAPH_MODE)
979        >>> input_x = np.asarray([1, 2, 2, 2, 3, 4, 5]).astype('int32')
980        >>> output_x = np.unique(input_x)
981        >>> print(output_x)
982        [1 2 3 4 5]
983        >>> output_x = np.unique(input_x, return_inverse=True)
984        >>> print(output_x)
985        (Tensor(shape=[5], dtype=Int32, value= [ 1, 2, 3, 4, 5]), Tensor(shape=[7], dtype=Int32,
986            value= [0, 1, 1, 1, 2, 3, 4]))
987    """
988    _check_input_tensor(x)
989    if F.tuple_len(F.shape(x)) > 1:
990        x = ravel(x)
991    uniq = UniqueNet()
992    res = uniq(x)
993    if not return_inverse:
994        return res[0]
995    return res
996
997
998def roll_along_axis(a, shift, axis):
999    """
1000    Rolls a tensor along a given axis. This is a helper function of np.roll.
1001
1002    Args:
1003        a (Tensor): Input tensor.
1004        shift (int): The number of places the tensor is shifted.
1005        axis (int): The designated axis for shifting.
1006
1007    Returns:
1008        Shifted tensor.
1009    """
1010    _check_axis_in_range(axis, a.ndim)
1011    _check_element_int((shift, axis))
1012    if axis < 0:
1013        axis += a.ndim
1014    shift = -(shift % a.shape[axis])
1015    # if shift is 0, we do not need to roll at all
1016    if shift == 0:
1017        return a
1018    begin1 = ()
1019    begin2 = ()
1020    end1 = ()
1021    end2 = ()
1022    stride = _list_comprehensions(a.ndim, 1, True)
1023    for i in F.make_range(a.ndim):
1024        if i != axis:
1025            begin1 += (0,)
1026            end1 += (a.shape[i],)
1027            begin2 += (0,)
1028            end2 += (a.shape[i],)
1029        else:
1030            begin1 += (shift,)
1031            end1 += (a.shape[i],)
1032            begin2 += (0,)
1033            end2 += (shift,)
1034    return append(F.strided_slice(a, begin1, end1, stride),
1035                  F.strided_slice(a, begin2, end2, stride), axis=axis)
1036
1037
1038def roll(a, shift, axis=None):
1039    """
1040    Rolls a tensor along given axes.
1041
1042    Elements that rolls beyond the last position are re-introduced at the first.
1043
1044    Args:
1045        a (Tensor): Input tensor.
1046        shift (Union[int, tuple(int)]: The number of places by which elements are
1047            shifted. If a tuple, then axis must be a tuple of the same size, and
1048            each of the given axes is shifted by the corresponding number. If shift
1049            is an int while axis is a tuple of integers, then the same value is used
1050            for all given axes.
1051        axis (Union[int, tuple(int)], optional): Axis or axes along which elements
1052            are shifted. By default, the array is flattened before shifting, after
1053            which the original shape is restored.
1054
1055    Returns:
1056        Tensor, with the same shape as a.
1057
1058    Supported Platforms:
1059        ``Ascend`` ``GPU`` ``CPU``
1060
1061    Raises:
1062        TypeError: If input arguments have types not specified above.
1063        ValueError: If axis exceeds `a.ndim`, or `shift` and `axis` cannot broadcast.
1064
1065    Examples:
1066        >>> import mindspore.numpy as np
1067        >>> a = np.reshape(np.arange(12), (3, 4))
1068        >>> print(np.roll(a, [2,-3], [0,-1]))
1069            [[ 7  4  5  6]
1070             [11  8  9 10]
1071             [ 3  0  1  2]]
1072    """
1073    _check_input_tensor(a)
1074    original_shape = a.shape
1075    original_dtype = a.dtype
1076    restore_shape = False
1077    # F.strided_slice only supports float on cpu, this will change once more supports
1078    # are added.
1079    if not _check_is_float(original_dtype):
1080        a = a.astype(mstype.float32)
1081    if axis is None:
1082        restore_shape = True
1083        axis = 0
1084        a = a.ravel()
1085    # Broadcast shift and axis to the same length
1086    shift, axis = _broadcast_tuples(shift, axis)
1087    for shift_each, axis_each in zip(shift, axis):
1088        a = roll_along_axis(a, shift_each, axis_each)
1089    if restore_shape:
1090        a = a.reshape(original_shape)
1091    if not _check_is_float(original_dtype):
1092        a = a.astype(original_dtype)
1093    return a
1094
1095
1096@constexpr
1097def _get_moved_perm(ndim, source, destination):
1098    """
1099    Helper function for moveaxis, returns permutation after moving axes
1100    from source to destination.
1101    """
1102    dest_sorted_idx = [i for i, _ in sorted(enumerate(destination),
1103                                            key=operator.itemgetter(1))]
1104    axes_orig = [i for i in range(ndim) if i not in source]
1105
1106    k = 0
1107    m = 0
1108    perm = []
1109    for i in dest_sorted_idx:
1110        # inserts an axis that has been moved, denoted by n, and axes that remain
1111        # in their original position, indexed from k to k + n - m, into index m in
1112        # the list of permuted axes
1113        n = destination[i]
1114        j = k + n - m
1115        perm += axes_orig[k:j]
1116        perm.append(source[i])
1117        k += n - m
1118        m = n + 1
1119    perm += axes_orig[k:]
1120    return tuple(perm)
1121
1122
1123@constexpr
1124def _get_moved_shape(shape, perm):
1125    """
1126    Helper function for moveaxis, returns the permuated shape after
1127    applying perm.
1128    """
1129    return tuple([shape[i] for i in perm])
1130
1131
1132def moveaxis(a, source, destination):
1133    """
1134    Moves axes of an array to new positions.
1135
1136    Other axes remain in their original order.
1137
1138    Args:
1139        a (Tensor): The array whose axes should be reordered.
1140        source (int or sequence of ints): Original positions of the
1141            axes to move. These must be unique.
1142        destination (int or sequence of ints): Destination positions
1143            for each of the original axes. These must also be unique.
1144
1145    Returns:
1146        Tensor, array with moved axes.
1147
1148    Raises:
1149        ValueError: if axes are out of the range of ``[-a.ndim, a.ndim)``, or
1150            if the axes contain duplicates.
1151
1152    Supported Platforms:
1153        ``Ascend`` ``GPU`` ``CPU``
1154
1155    Examples:
1156        >>> import mindspore.numpy as np
1157        >>> x = np.zeros((3, 4, 5))
1158        >>> output = np.moveaxis(x, 0, -1)
1159        >>> print(output.shape)
1160        (4, 5, 3)
1161        >>> output = np.moveaxis(x, -1, 0)
1162        >>> print(output.shape)
1163        (5, 3, 4)
1164        >>> output = np.moveaxis(x, [0, 1, 2], [-1, -2, -3])
1165        >>> print(output.shape)
1166        (5, 4, 3)
1167    """
1168    ndim = F.rank(a)
1169    source = _check_axis_valid(source, ndim)
1170    destination = _check_axis_valid(destination, ndim)
1171    if len(source) != len(destination):
1172        _raise_value_error('`source` and `destination` arguments must have the same number of elements')
1173    perm = _get_moved_perm(ndim, source, destination)
1174
1175    shape = F.shape(a)
1176    if _is_shape_empty(shape):
1177        return _empty(F.dtype(a), _get_moved_shape(shape, perm))
1178
1179    return F.transpose(a, perm)
1180
1181
1182def tile(a, reps):
1183    """
1184    Constructs an array by repeating `a` the number of times given by `reps`.
1185
1186    If `reps` has length `d`, the result will have dimension of ``max(d, a.ndim)``.
1187    If ``a.ndim < d``, `a` is promoted to be d-dimensional by prepending new axes.
1188    So a shape (3,) array is promoted to (1, 3) for 2-D replication, or
1189    shape (1, 1, 3) for 3-D replication. If this is not the desired behavior,
1190    promote `a` to d-dimensions manually before calling this function.
1191    If ``a.ndim > d``, `reps` is promoted to ``a.ndim`` by pre-pending 1’s to it. Thus
1192    for an `a` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as (1, 1, 2, 2).
1193
1194    Args:
1195        a (Tensor): The input array.
1196        reps (int or sequence of ints): The number of repetitions of `a` along
1197            each axis.
1198
1199    Returns:
1200        Tensor, the tiled output array.
1201
1202    Raises:
1203        TypeError: if the input is not a tensor.
1204
1205    Supported Platforms:
1206        ``Ascend`` ``GPU`` ``CPU``
1207
1208    Examples:
1209        >>> import mindspore.numpy as np
1210        >>> a = np.array([0, 1, 2])
1211        >>> output = np.tile(a, 2)
1212        >>> print(output)
1213        [0 1 2 0 1 2]
1214        >>> output = np.tile(a, (2, 2))
1215        >>> print(output)
1216        [[0 1 2 0 1 2]
1217        [0 1 2 0 1 2]]
1218        >>> output = np.tile(a, (2, 1, 2))
1219        >>> print(output)
1220        [[[0 1 2 0 1 2]]
1221        [[0 1 2 0 1 2]]]
1222    """
1223    _check_input_tensor(a)
1224    ndim = F.rank(a)
1225    shape = F.shape(a)
1226    reps = _add_unit_axes(reps, ndim)
1227    if _is_shape_empty(shape) or _is_shape_empty(reps):
1228        shape = _add_unit_axes(shape, len(reps))
1229        return _empty(F.dtype(a), _seq_prod(shape, reps))
1230    return F.tile(a, reps)
1231
1232
1233@constexpr
1234def _check_can_broadcast_to(shape, target_shape):
1235    """Determines if shape can be broadcast to target_shape."""
1236    ndim = len(shape)
1237    ndim_target = len(target_shape)
1238    if ndim > ndim_target:
1239        return False
1240    for i, j in zip(reversed(shape), reversed(target_shape)):
1241        if i not in (1, j):
1242            return False
1243    return True
1244
1245
1246def broadcast_to(array, shape):
1247    """
1248    Broadcasts an array to a new shape.
1249
1250    Args:
1251        array (Tensor): The array to broadcast.
1252        shape (tuple): The shape of the desired array.
1253
1254    Returns:
1255        Tensor, original array broadcast to the given shape.
1256
1257    Raises:
1258        ValueError: if array cannot be broadcast to shape.
1259
1260    Supported Platforms:
1261        ``Ascend`` ``GPU`` ``CPU``
1262
1263    Example:
1264        >>> import mindspore.numpy as np
1265        >>> x = np.array([1, 2, 3])
1266        >>> output = np.broadcast_to(x, (3, 3))
1267        >>> print(output)
1268        [[1 2 3]
1269        [1 2 3]
1270        [1 2 3]]
1271    """
1272    shape_a = F.shape(array)
1273    if not _check_can_broadcast_to(shape_a, shape):
1274        return _raise_value_error('cannot broadcast with ', shape)
1275    return _broadcast_to_shape(array, shape)
1276
1277
1278def broadcast_arrays(*args):
1279    """
1280    Broadcasts any number of arrays against each other.
1281
1282    Note:
1283        Numpy argument `subok` is not supported.
1284        In graph mode, returns a tuple of Tensor instead of a list
1285        of Tensor.
1286
1287    Args:
1288        *args (Tensor): The arrays to broadcast.
1289
1290    Returns:
1291        List of Tensor.
1292
1293    Raises:
1294        ValueError: if arrays cannot be broadcast.
1295
1296    Supported Platforms:
1297        ``Ascend`` ``GPU`` ``CPU``
1298
1299    Example:
1300        >>> import mindspore.numpy as np
1301        >>> x = np.array([[1,2,3]])
1302        >>> y = np.array([[4],[5]])
1303        >>> output = np.broadcast_arrays(x, y)
1304        >>> print(output)
1305        [Tensor(shape=[2, 3], dtype=Int32, value=
1306        [[1, 2, 3],
1307        [1, 2, 3]]), Tensor(shape=[2, 3], dtype=Int32, value=
1308        [[4, 4, 4],
1309        [5, 5, 5]])]
1310    """
1311    shapes = map(F.shape, args)
1312    out_shape = _infer_out_shape(*shapes)
1313    res = []
1314    for arr in args:
1315        res.append(broadcast_to(arr, out_shape))
1316    return res
1317
1318
1319def array_split(x, indices_or_sections, axis=0):
1320    """
1321    Splits a tensor into multiple sub-tensors.
1322
1323    Note:
1324        Currently, array_split only supports :class:`mindspore.float32` on ``CPU``.
1325
1326    The only difference between ``np.split`` and ``np.array_split`` is that
1327    ``np.array_split`` allows indices_or_sections to be an integer that does not
1328    equally divide the axis. For a tensor of length l that should be split into
1329    n sections, it returns :math:`l % n` sub-arrays of size :math:`l//n + 1` and
1330    the rest of size :math:`l//n`.
1331
1332    Args:
1333        x (Tensor): A Tensor to be divided.
1334        indices_or_sections (Union[int, tuple(int), list(int)]):
1335            If integer, :math:`N`, the tensor will be divided into
1336            :math:`N` tensors along axis.
1337            If tuple(int), list(int) or of sorted integers,
1338            the entries indicate where along axis the array is split.
1339            For example, :math:`[2, 3]` would, for :math:`axis=0`, result in
1340            three sub-tensors :math:`x[:2]`, :math:`x[2:3]`and :math:`x[3:]`.
1341            If an index exceeds the dimension of the array along axis,
1342            an empty sub-array is returned correspondingly.
1343        axis (int): The axis along which to split. Default: 0.
1344
1345    Returns:
1346        A list of sub-tensors.
1347
1348    Raises:
1349        TypeError: If argument `indices_or_sections` is not integer,
1350            tuple(int) or list(int) or argument `axis` is not integer.
1351        ValueError: If argument `axis` is out of range of :math:`[-x.ndim, x.ndim)`.
1352
1353    Supported Platforms:
1354        ``Ascend`` ``GPU`` ``CPU``
1355
1356    Examples:
1357        >>> import mindspore.numpy as np
1358        >>> input_x = np.arange(9).astype("float32")
1359        >>> output = np.array_split(input_x, 4)
1360        >>> print(output)
1361        (Tensor(shape=[3], dtype=Float32,
1362            value= [ 0.00000000e+00,  1.00000000e+00,  2.00000000e+00]),
1363        Tensor(shape=[2], dtype=Float32,
1364            value= [ 3.00000000e+00,  4.00000000e+00]),
1365        Tensor(shape=[2], dtype=Float32,
1366            value= [ 5.00000000e+00,  6.00000000e+00]),
1367        Tensor(shape=[2], dtype=Float32,
1368            value= [ 7.00000000e+00,  8.00000000e+00]))
1369    """
1370    return _split(x, indices_or_sections, opname="array_split", axis=axis)
1371
1372
1373def split(x, indices_or_sections, axis=0):
1374    """
1375    Splits a tensor into multiple sub-tensors along the given axis.
1376
1377    Args:
1378        x (Tensor): A Tensor to be divided.
1379        indices_or_sections (Union[int, tuple(int), list(int)]):
1380            If integer, :math:`N`, the tensor will be divided into
1381            :math:`N` equal tensors along axis.
1382            If tuple(int), list(int) or of sorted integers,
1383            the entries indicate where along axis the array is split.
1384            For example, :math:`[2, 3]` would, for :math:`axis=0`, result in
1385            three sub-tensors :math:`x[:2]`, :math:`x[2:3]`and :math:`x[3:]`.
1386            If an index exceeds the dimension of the array along axis,
1387            an empty sub-array is returned correspondingly.
1388        axis (int): The axis along which to split. Default: 0.
1389
1390    Returns:
1391        A list of sub-tensors.
1392
1393    Raises:
1394        TypeError: If argument `indices_or_sections` is not integer,
1395            tuple(int) or list(int) or argument `axis` is not integer.
1396        ValueError: If argument `axis` is out of range of :math:`[-x.ndim, x.ndim)`.
1397
1398    Supported Platforms:
1399        ``Ascend`` ``GPU`` ``CPU``
1400
1401    Examples:
1402        >>> import mindspore.numpy as np
1403        >>> input_x = np.arange(9).astype("float32")
1404        >>> output = np.split(input_x, 3)
1405        >>> print(output)
1406        (Tensor(shape=[3], dtype=Float32,
1407          value= [ 0.00000000e+00,  1.00000000e+00,  2.00000000e+00]),
1408         Tensor(shape=[3], dtype=Float32,
1409          value= [ 3.00000000e+00,  4.00000000e+00,  5.00000000e+00]),
1410         Tensor(shape=[3], dtype=Float32,
1411          value= [ 6.00000000e+00,  7.00000000e+00,  8.00000000e+00]))
1412    """
1413    return _split(x, indices_or_sections, opname="split", axis=axis)
1414
1415
1416def _split(x, indices_or_sections, opname, axis=0):
1417    """Splits a tensor based on ``np.split`` or ``np.array_split``."""
1418    _check_input_tensor(x)
1419    _ = _check_axis_type(axis, True, False, False)
1420    axis = _canonicalize_axis(axis, x.ndim)
1421    res = None
1422    arr_shape = x.shape
1423    length_along_dim = arr_shape[axis]
1424    if isinstance(indices_or_sections, int):
1425        if indices_or_sections > length_along_dim:
1426            _raise_value_error("empty tensor encountered.")
1427        if opname == "split" or length_along_dim % indices_or_sections == 0:
1428            res = P.Split(axis, indices_or_sections)(x)
1429        else:
1430            num_long_tensor = length_along_dim % indices_or_sections
1431            num_short_tensor = indices_or_sections - num_long_tensor
1432            length1 = num_long_tensor * (length_along_dim // indices_or_sections + 1)
1433            length2 = length_along_dim - length1
1434            start1 = _list_comprehensions(F.rank(x), 0, True)
1435            size1 = _tuple_setitem(arr_shape, axis, length1)
1436            start2 = _tuple_setitem(start1, axis, length1)
1437            size2 = _tuple_setitem(arr_shape, axis, length2)
1438            res = P.Split(axis, num_long_tensor)(F.tensor_slice(x, start1, size1)) + \
1439                P.Split(axis, num_short_tensor)(F.tensor_slice(x, start2, size2))
1440
1441    elif isinstance(indices_or_sections, (list, tuple)) and _check_element_int(indices_or_sections):
1442        res = _split_sub_tensors(x, indices_or_sections, axis)
1443    else:
1444        _raise_type_error("Argument `indices_or_sections` in `mindspore.numpy.split`\
1445            should be integer, tuple(int) or list(int), but got", indices_or_sections)
1446    return res
1447
1448
1449@constexpr
1450def convert_neg_indices(indices, ndim):
1451    """converts negative values in tuple/list indices"""
1452    def canonicalizer(ax):
1453        return ax + ndim if ax < 0 else ax
1454    indices = tuple([canonicalizer(axis) for axis in indices])
1455    return indices
1456
1457
1458def _split_sub_tensors(x, indices, axis):
1459    """
1460    Splits the input tensor `x` into multiple sub-tensors
1461    along the axis according to the given indices.
1462    """
1463    length_along_dim = x.shape[axis]
1464    indices = convert_neg_indices(indices, length_along_dim)
1465    indices += (length_along_dim,)
1466
1467    sub_tensors = []
1468    strides = _list_comprehensions(x.ndim, 1, True)
1469    begin = _list_comprehensions(x.ndim, 0)
1470    end = _list_comprehensions(x.shape)
1471    for i, idx in enumerate(indices):
1472        begin[axis] = 0 if i == 0 else indices[i-1]
1473        end[axis] = idx
1474        if end[axis] <= begin[axis]:
1475            _raise_value_error("empty sub-tensor encountered.")
1476        sliced_tensor = F.strided_slice(x, _type_convert(tuple, begin), _type_convert(tuple, end), strides)
1477        sub_tensors.append(sliced_tensor)
1478    return sub_tensors
1479
1480
1481def vsplit(x, indices_or_sections):
1482    """
1483    Splits a tensor into multiple sub-tensors vertically (row-wise).
1484    It is equivalent to split with :math:`axis=0` (default), the array is always
1485    split along the first axis regardless of the array dimension.
1486
1487    Args:
1488        x (Tensor): A Tensor to be divided.
1489        indices_or_sections (Union[int, tuple(int), list(int)]):
1490            If integer, :math:`N`, the tensor will be divided into
1491            :math:`N` equal tensors along axis.
1492            If tuple(int), list(int) or of sorted integers,
1493            the entries indicate where along axis the array is split.
1494            For example, :math:`[2, 3]` would, for :math:`axis=0`, result in
1495            three sub-tensors :math:`x[:2]`, :math:`x[2:3]`and :math:`x[3:]`.
1496            If an index exceeds the dimension of the array along axis,
1497            an empty sub-array is returned correspondingly.
1498
1499    Returns:
1500        A list of sub-tensors.
1501
1502    Raises:
1503        TypeError: If argument `indices_or_sections` is not integer.
1504
1505    Supported Platforms:
1506        ``Ascend`` ``GPU`` ``CPU``
1507
1508    Examples:
1509        >>> import mindspore.numpy as np
1510        >>> input_x = np.arange(9).reshape((3, 3)).astype('float32')
1511        >>> output = np.vsplit(input_x, 3)
1512        >>> print(output)
1513        (Tensor(shape=[1, 3], dtype=Float32,
1514          value=[[ 0.00000000e+00,  1.00000000e+00,  2.00000000e+00]]),
1515         Tensor(shape=[1, 3], dtype=Float32,
1516          value=[[ 3.00000000e+00,  4.00000000e+00,  5.00000000e+00]]),
1517         Tensor(shape=[1, 3], dtype=Float32,
1518          value=[[ 6.00000000e+00,  7.00000000e+00,  8.00000000e+00]]))
1519    """
1520    return split(x, indices_or_sections, 0)
1521
1522
1523def hsplit(x, indices_or_sections):
1524    """
1525    Splits a tensor into multiple sub-tensors horizontally (column-wise).
1526    It is equivalent to split with :math:`axis=1` (default), the array is always
1527    split along the second axis regardless of the array dimension.
1528
1529    Args:
1530        x (Tensor): A Tensor to be divided.
1531        indices_or_sections (Union[int, tuple(int), list(int)]):
1532            If integer, :math:`N`, the tensor will be divided into
1533            :math:`N` equal tensors along axis.
1534            If tuple(int), list(int) or of sorted integers,
1535            the entries indicate where along axis the array is split.
1536            For example, :math:`[2, 3]` would, for :math:`axis=0`, result in
1537            three sub-tensors :math:`x[:2]`, :math:`x[2:3]`and :math:`x[3:]`.
1538            If an index exceeds the dimension of the array along axis,
1539            an empty sub-array is returned correspondingly.
1540
1541    Returns:
1542        A list of sub-tensors.
1543
1544    Raises:
1545        TypeError: If argument `indices_or_sections` is not integer.
1546
1547    Supported Platforms:
1548        ``Ascend`` ``GPU`` ``CPU``
1549
1550    Examples:
1551        >>> import mindspore.numpy as np
1552        >>> input_x = np.arange(6).reshape((2, 3)).astype('float32')
1553        >>> output = np.hsplit(input_x, 3)
1554        >>> print(output)
1555        (Tensor(shape=[2, 1], dtype=Float32,
1556        value=[[ 0.00000000e+00],
1557               [ 3.00000000e+00]]),
1558        Tensor(shape=[2, 1], dtype=Float32,
1559        value=[[ 1.00000000e+00],
1560               [ 4.00000000e+00]]),
1561        Tensor(shape=[2, 1], dtype=Float32,
1562        value=[[ 2.00000000e+00],
1563               [ 5.00000000e+00]]))
1564    """
1565    return split(x, indices_or_sections, 1)
1566
1567
1568def dsplit(x, indices_or_sections):
1569    """
1570    Splits a tensor into multiple sub-tensors along the 3rd axis (depth).
1571    It is equivalent to split with :math:`axis=2` (default), the array is always
1572    split along the third axis regardless of the array dimension.
1573
1574    Args:
1575        x (Tensor): A Tensor to be divided.
1576        indices_or_sections (Union[int, tuple(int), list(int)]):
1577            If integer, :math:`N`, the tensor will be divided into
1578            :math:`N` equal tensors along axis.
1579            If tuple(int), list(int) or of sorted integers,
1580            the entries indicate where along axis the array is split.
1581            For example, :math:`[2, 3]` would, for :math:`axis=0`, result in
1582            three sub-tensors :math:`x[:2]`, :math:`x[2:3]`and :math:`x[3:]`.
1583            If an index exceeds the dimension of the array along axis,
1584            an empty sub-array is returned correspondingly.
1585
1586    Returns:
1587        A list of sub-tensors.
1588
1589    Raises:
1590        TypeError: If argument `indices_or_sections` is not integer.
1591
1592    Supported Platforms:
1593        ``Ascend`` ``GPU`` ``CPU``
1594
1595    Examples:
1596        >>> import mindspore.numpy as np
1597        >>> input_x = np.arange(6).reshape((1, 2, 3)).astype('float32')
1598        >>> output = np.dsplit(input_x, 3)
1599        >>> print(output)
1600        (Tensor(shape=[1, 2, 1], dtype=Float32,
1601        value=[[[ 0.00000000e+00],
1602                [ 3.00000000e+00]]]),
1603        Tensor(shape=[1, 2, 1], dtype=Float32,
1604        value=[[[ 1.00000000e+00],
1605                [ 4.00000000e+00]]]),
1606        Tensor(shape=[1, 2, 1], dtype=Float32,
1607        value=[[[ 2.00000000e+00],
1608                [ 5.00000000e+00]]]))
1609    """
1610    return split(x, indices_or_sections, 2)
1611
1612
1613@constexpr
1614def _get_flip_start(ndim, shape, axes):
1615    return tuple([shape[i] - 1 if i in axes else 0 for i in range(ndim)])
1616
1617
1618@constexpr
1619def _get_flip_end(ndim, shape, axes):
1620    return tuple([-shape[i] - 1 if i in axes else shape[i] + 1 for i in range(ndim)])
1621
1622
1623@constexpr
1624def _get_flip_strides(ndim, axes):
1625    return tuple([-1 if i in axes else 1 for i in range(ndim)])
1626
1627
1628def flip(m, axis=None):
1629    """
1630    Reverses the order of elements in an array along the given axis.
1631
1632    The shape of the array is preserved, but the elements are reordered.
1633
1634    Args:
1635        m (Tensor): Input array.
1636        axis (None or int or tuple of integers, optional): Axis or axes along which
1637            to flip over. The default, ``axis=None``, will flip over all of the axes
1638            of the input array. If `axis` is negative it counts from the last to
1639            the first axis. If `axis` is a tuple of integers, flipping is performed on
1640            all of the axes specified in the tuple.
1641
1642    Returns:
1643        Tensor, with the entries of `axis` reversed.
1644
1645    Raises:
1646        TypeError: if the input is not a tensor.
1647
1648    Supported Platforms:
1649        ``GPU`` ``CPU``
1650
1651    Example:
1652        >>> import mindspore.numpy as np
1653        >>> A = np.arange(8.0).reshape((2,2,2))
1654        >>> output = np.flip(A)
1655        >>> print(output)
1656        [[[7. 6]
1657        [5. 4]]
1658        [[3. 2]
1659        [1. 0]]]
1660        >>> output = np.flip(A, (0, 2))
1661        >>> print(output)
1662        [[[5. 4]
1663        [7. 6]]
1664        [[1. 0]
1665        [3. 2]]]
1666    """
1667    _check_input_tensor(m)
1668    ndim = F.rank(m)
1669    axes = _check_axis_valid(axis, ndim)
1670    shape = F.shape(m)
1671    dtype = F.dtype(m)
1672    if _is_shape_empty(shape):
1673        return m
1674    if not _check_is_float(dtype):
1675        m = m.astype(mstype.float32)
1676    start = _get_flip_start(ndim, shape, axes)
1677    end = _get_flip_end(ndim, shape, axes)
1678    strides = _get_flip_strides(ndim, axes)
1679    res = F.strided_slice(m, start, end, strides)
1680    if not _check_same_type(F.dtype(res), dtype):
1681        res = F.cast(res, dtype)
1682    return res
1683
1684
1685def flipud(m):
1686    """
1687    Flips the entries in each column in the up/down direction.
1688    Rows are preserved, but appear in a different order than before.
1689
1690    Args:
1691        m (Tensor): Input array.
1692
1693    Returns:
1694        Tensor.
1695
1696    Raises:
1697        TypeError: if the input is not a tensor.
1698
1699    Supported Platforms:
1700        ``GPU`` ``CPU``
1701
1702    Example:
1703        >>> import mindspore.numpy as np
1704        >>> A = np.arange(8.0).reshape((2,2,2))
1705        >>> output = np.flipud(A)
1706        >>> print(output)
1707        [[[4. 5.]
1708        [6. 7.]]
1709        [[0. 1.]
1710        [2. 3.]]]
1711    """
1712    return flip(m, 0)
1713
1714
1715def fliplr(m):
1716    """
1717    Flips the entries in each row in the left/right direction.
1718    Columns are preserved, but appear in a different order than before.
1719
1720    Args:
1721        m (Tensor): Input array.
1722
1723    Returns:
1724        Tensor.
1725
1726    Raises:
1727        TypeError: if the input is not a tensor.
1728
1729    Supported Platforms:
1730        ``GPU`` ``CPU``
1731
1732    Example:
1733        >>> import mindspore.numpy as np
1734        >>> A = np.arange(8.0).reshape((2,2,2))
1735        >>> output = np.fliplr(A)
1736        >>> print(output)
1737        [[[2. 3.]
1738        [0. 1.]]
1739        [[6. 7.]
1740        [4. 5.]]]
1741    """
1742    return flip(m, 1)
1743
1744
1745def take_along_axis(arr, indices, axis):
1746    """
1747    Takes values from the input array by matching 1d index and data slices.
1748
1749    This iterates over matching 1d slices oriented along the specified axis in the
1750    index and data arrays, and uses the former to look up values in the latter.
1751    These slices can be different lengths.
1752
1753    Args:
1754        arr (Tensor): Source array with shape `(Ni…, M, Nk…)`.
1755        indices (Tensor): Indices with shape `(Ni…, J, Nk…)` to take along each 1d
1756            slice of `arr`. This must match the dimension of `arr`, but dimensions `Ni`
1757            and `Nj` only need to broadcast against `arr`.
1758        axis (int): The axis to take 1d slices along. If `axis` is None, the input
1759            array is treated as if it had first been flattened to 1d.
1760
1761    Returns:
1762        Tensor, the indexed result, with shape `(Ni…, J, Nk…)`.
1763
1764    Raises:
1765        ValueError: if input array and indices have different number of dimensions.
1766        TypeError: if the input is not a Tensor.
1767
1768    Supported Platforms:
1769        ``Ascend`` ``GPU`` ``CPU``
1770
1771    Example:
1772        >>> import mindspore.numpy as np
1773        >>> x = np.arange(12).reshape(3, 4)
1774        >>> indices = np.arange(3).reshape(1, 3)
1775        >>> output = np.take_along_axis(x, indices, 1)
1776        >>> print(output)
1777        [[ 0  1  2]
1778        [ 4  5  6]
1779        [ 8  9 10]]
1780    """
1781    _check_input_tensor(arr, indices)
1782    if axis is None:
1783        arr = ravel(arr)
1784        axis = 0
1785    ndim = F.rank(arr)
1786    if ndim != F.rank(indices):
1787        _raise_value_error('`indices` and `arr` must have the same number of dimensions')
1788    axis = _check_axis_in_range(axis, ndim)
1789
1790    shape_arr = F.shape(arr)
1791    shape_indices = F.shape(indices)
1792    # broadcasts indices against the shape of arr except at axis
1793    indices = _broadcast_to(indices, _tuple_slice(shape_indices, None, axis),
1794                            _tuple_slice(shape_arr, None, axis), ndim)
1795    indices = _broadcast_to(indices, _tuple_slice(shape_arr, None, axis + 1) +
1796                            _tuple_slice(shape_indices, axis + 1, None), shape_arr, ndim)
1797    return F.gather_d(arr, axis, indices)
1798
1799
1800def _mod(x, y):
1801    """Computes x mod y."""
1802    quotient = F.tensor_floordiv(x, y)
1803    prod = F.tensor_mul(y, quotient)
1804    return F.tensor_sub(x, prod)
1805
1806
1807def _check_indices(dims, indices, mode, allow_negative_index=True):
1808    """Checks whether indices are out of bounds."""
1809    shape = F.shape(indices)
1810    dtype = F.dtype(indices)
1811    if not allow_negative_index:
1812        lowerbounds = F.fill(dtype, shape, 0)
1813    else:
1814        lowerbounds = F.fill(dtype, shape, -dims)
1815    upperbounds = F.fill(dtype, shape, dims - 1)
1816    out_of_lowerbounds = F.tensor_lt(indices, lowerbounds)
1817    out_of_upperbounds = F.tensor_gt(indices, upperbounds)
1818    if mode == 'raise':
1819        _raise_unimplemented_error('"raise" mode is not implemented')
1820    if mode == 'wrap':
1821        return _mod(indices, F.fill(mstype.float32, shape, dims)).astype(dtype)
1822    if mode != 'clip':
1823        _raise_value_error('invalid mode. Expected "raise", "wrap", or "clip"')
1824    zeros = F.fill(dtype, shape, 0)
1825    clipped = F.select(out_of_lowerbounds, zeros, indices)
1826    clipped = F.select(out_of_upperbounds, upperbounds, clipped)
1827    return clipped
1828
1829
1830def take(a, indices, axis=None, mode='clip'):
1831    """
1832    Takes elements from an array along an axis.
1833
1834    When axis is not None, this function does the same thing as “fancy” indexing
1835    (indexing arrays using arrays); however, it can be easier to use if you need
1836    elements along a given axis. A call such as ``np.take(arr, indices, axis=3)`` is
1837    equivalent to ``arr[:,:,:,indices,...]``.
1838
1839    Note:
1840        Numpy argument out is not supported.
1841        ``mode = 'raise'`` is not supported, and the default mode is 'clip' instead.
1842
1843    Args:
1844        a (Tensor): Source array with shape `(Ni…, M, Nk…)`.
1845        indices (Tensor): The indices with shape `(Nj...)` of the values to extract.
1846        axis (int, optional): The axis over which to select values. By default,
1847            the flattened input array is used.
1848        mode (‘raise’, ‘wrap’, ‘clip’, optional): Specifies how out-of-bounds
1849            indices will behave.
1850
1851            ‘raise’ – raise an error;
1852
1853            ‘wrap’ – wrap around;
1854
1855            ‘clip’ – clip to the range. ‘clip’ mode means that all indices that are
1856            too large are replaced by the index that addresses the last element
1857            along that axis. Note that this disables indexing with negative numbers.
1858
1859    Returns:
1860        Tensor, the indexed result.
1861
1862    Raises:
1863        ValueError: if axis is out of range.
1864        TypeError: if the input is not a Tensor.
1865
1866    Supported Platforms:
1867        ``Ascend`` ``GPU`` ``CPU``
1868
1869    Examples:
1870        >>> import mindspore.numpy as np
1871        >>> a = np.array([4, 3, 5, 7, 6, 8])
1872        >>> indices = np.array([0, 1, 4])
1873        >>> output = np.take(a, indices)
1874        >>> print(output)
1875        [4 3 6]
1876        >>> indices = np.array([[0, 1], [2, 3]])
1877        >>> output = np.take(a, indices)
1878        >>> print(output)
1879        [[4 3]
1880        [5 7]]
1881    """
1882    _check_input_tensor(a, indices)
1883    return a.take(indices, axis=axis, mode=mode)
1884
1885
1886def repeat(a, repeats, axis=None):
1887    """
1888    Repeats elements of an array.
1889
1890    Args:
1891        a (Tensor): Input array.
1892        repeats (int or sequence of ints): The number of repetitions for each element.
1893            `repeats` is broadcasted to fit the shape of the given axis.
1894        axis (int, optional): The axis along which to repeat values. By default,
1895            use the flattened input array, and return a flat output array.
1896
1897    Returns:
1898        Tensor, output array which has the same shape as `a`, except along the given
1899        axis.
1900
1901    Raises:
1902        ValueError: if axis is out of range.
1903        TypeError: if input `a` is not a Tensor.
1904
1905    Supported Platforms:
1906        ``Ascend`` ``GPU`` ``CPU``
1907
1908    Examples:
1909        >>> import mindspore.numpy as np
1910        >>> output = np.repeat(np.array(3), 4)
1911        >>> print(output)
1912        [3 3 3 3]
1913        >>> x = np.array([[1,2],[3,4]])
1914        >>> output = np.repeat(x, 2)
1915        >>> print(output)
1916        [1 1 2 2 3 3 4 4]
1917        >>> output = np.repeat(x, 3, axis=1)
1918        >>> print(output)
1919        [[1 1 1 2 2 2]
1920        [3 3 3 4 4 4]]
1921        >>> output = np.repeat(x, [1, 2], axis=0)
1922        >>> print(output)
1923        [[1 2]
1924        [3 4]
1925        [3 4]]
1926    """
1927    a = _to_tensor(a)
1928    return a.repeat(repeats, axis)
1929
1930
1931def rot90(a, k=1, axes=(0, 1)):
1932    """
1933    Rotates a tensor by 90 degrees in the plane specified by axes.
1934    Rotation direction is from the first towards the second axis.
1935
1936    Args:
1937        a (Tensor): Input tensor of two or more dimensions.
1938        k (int): Number of times the tensor is rotated by 90 degrees. Default: 1.
1939        axes (Union[tuple(int), list(int)]): The tensor is rotated in the plane
1940            defined by the axes. Default: `(0, 1)`.
1941            Axes must be different and with the shape of `(2,)`.
1942
1943    Returns:
1944        Tensor.
1945
1946    Raises:
1947        TypeError: if input `a` is not a Tensor or
1948            the argument `k` is not integer or
1949            the argument `axes` is not tuple of integers or list of ints.
1950        ValueError: if any axis is out of range or
1951            the length of `axes` is not `2`.
1952
1953    Supported Platforms:
1954        ``GPU``
1955
1956    Examples:
1957        >>> import mindspore.numpy as np
1958        >>> a = np.arange(24).reshape((2, 3, 4))
1959        >>> output = np.rot90(a)
1960        >>> print(output)
1961        [[[ 8  9 10 11]
1962          [20 21 22 23]]
1963         [[ 4  5  6  7]
1964          [16 17 18 19]]
1965         [[ 0  1  2  3]
1966          [12 13 14 15]]]
1967        >>> output = np.rot90(a, 3, (1, 2))
1968        >>> print(output)
1969        [[[ 8  4  0]
1970          [ 9  5  1]
1971          [10  6  2]
1972          [11  7  3]]
1973         [[20 16 12]
1974          [21 17 13]
1975          [22 18 14]
1976          [23 19 15]]]
1977    """
1978    _check_input_tensor(a)
1979
1980    if not isinstance(k, int):
1981        _raise_type_error("integer argument expected, but got ", k)
1982    k = k % 4 if k >= 0 else 4 - (-k % 4)
1983
1984    if not isinstance(axes, (tuple, list)):
1985        _raise_type_error("tuple(ints) or list(ints) expected, but got ", axes)
1986    if len(axes) != 2:
1987        _raise_value_error("len(axes) must be 2.")
1988    axis1, axis2 = axes[0], axes[1]
1989    axis1 = _canonicalize_axis(axis1, a.ndim)
1990    axis2 = _canonicalize_axis(axis2, a.ndim)
1991    if axis1 == axis2:
1992        _raise_value_error('Axes must be different.')
1993
1994    if k == 0:
1995        return a
1996    if k == 2:
1997        return flip(flip(a, axis1), axis2)
1998    perm = _list_comprehensions(a.ndim)
1999    perm[axis1], perm[axis2] = perm[axis2], perm[axis1]
2000    if k == 1:
2001        return flip(transpose(a, perm), axis1)
2002    return flip(transpose(a, perm), axis2)
2003
2004
2005def select(condlist, choicelist, default=0):
2006    """
2007    Returns an array drawn from elements in `choicelist`, depending on conditions.
2008
2009    Args:
2010        condlist (Union[int, float, bool, list, tuple, Tensor]): The list of conditions
2011            which determine from which array in `choicelist` the output elements are
2012            taken. When multiple conditions are satisfied, the first one encountered in
2013            `condlist` is used.
2014        choicelist (Union[int, float, bool, list, tuple, Tensor]): The list of arrays
2015            from which the output elements are taken. It has to be of the same length as
2016            `condlist`.
2017        default (scalar, optional): The element inserted in output when all conditions
2018            evaluate to `False`. Defaults to 0.
2019
2020    Returns:
2021        Tensor, the output at position `m` is the `m-th` element of the array in
2022        `choicelist` where the `m-th` element of the corresponding array in `condlist`
2023        is `True`.
2024
2025    Raises:
2026        ValueError: if ``len(condlist) != len(choicelist)``.
2027
2028    Supported Platforms:
2029        ``Ascend`` ``GPU`` ``CPU``
2030
2031    Examples:
2032        >>> import mindspore.numpy as np
2033        >>> condlist = [[True, True, True, False, False], [False, False, True, False, True]]
2034        >>> choicelist = [[0, 1, 2, 3, 4], [0, 1, 4, 9, 16]]
2035        >>> output = np.select(condlist, choicelist)
2036        >>> print(output)
2037        [ 0  1  2  0 16]
2038    """
2039    condlist, choicelist = _to_tensor(condlist, choicelist)
2040    shape_cond = F.shape(condlist)
2041    shape_choice = F.shape(choicelist)
2042    if F.rank(condlist) == 0 or F.rank(choicelist) == 0:
2043        _raise_value_error('input cannot be scalars')
2044    case_num = shape_cond[0]
2045    if shape_choice[0] != case_num:
2046        _raise_value_error('list of cases must be same length as list of conditions')
2047
2048    case_size_cond = _tuple_slice(shape_cond, 1, None)
2049    case_size_choice = _tuple_slice(shape_choice, 1, None)
2050    # performs broadcast over the cases in condlist and choicelist
2051    case_size = _infer_out_shape(case_size_cond, case_size_choice)
2052    shape_broadcasted = (case_num,) + case_size
2053    ndim = len(shape_broadcasted)
2054    shape_cond_expanded = ((case_num,) + _list_comprehensions(ndim - F.rank(condlist), 1, True) +
2055                           case_size_cond)
2056    condlist = _broadcast_to_shape(F.reshape(condlist, shape_cond_expanded), shape_broadcasted)
2057    shape_choice_expanded = ((case_num,) + _list_comprehensions(ndim - F.rank(choicelist), 1, True) +
2058                             case_size_choice)
2059    choicelist = _broadcast_to_shape(F.reshape(choicelist, shape_choice_expanded), shape_broadcasted)
2060
2061    slice_start = _list_comprehensions(ndim - 1, 0, True)
2062    slice_size = (1,) + case_size
2063    dtype = F.dtype(choicelist)
2064    if isinstance(default, Tensor):
2065        default_slice = default.astype(F.dtype(choicelist)).reshape(slice_size)
2066    else:
2067        default_slice = F.fill(F.dtype(choicelist), slice_size, default)
2068    for i in range(case_num - 1, -1, -1):
2069        cond_slice = F.tensor_slice(condlist.astype(mstype.float32), (i,) + slice_start, slice_size)
2070        choice_slice = F.tensor_slice(choicelist, (i,) + slice_start, slice_size)
2071        default_slice = F.select(cond_slice.astype(mstype.bool_), choice_slice, default_slice)
2072    return F.reshape(default_slice, (case_size)).astype(dtype)
2073
2074
2075@constexpr
2076def _get_grid(shape):
2077    """Returns a grid representing all the indices for an array with the given shape."""
2078    grids = []
2079    ndim = len(shape)
2080    for i in range(ndim):
2081        dim_grid = _iota(mstype.int32, shape[i])
2082        dim_shape = _expanded_shape(ndim, shape[i], i)
2083        dim_grid = _broadcast_to_shape(dim_grid.reshape(dim_shape), shape)
2084        grids.append(dim_grid)
2085    return stack(grids, -1)
2086
2087
2088def choose(a, choices, mode='clip'):
2089    """
2090    Construct an array from an index array and a list of arrays to choose from.
2091    Given an “index” array `a` of integers and a sequence of n arrays (choices),
2092    `a` and each choice array are first broadcast, as necessary, to arrays of a
2093    common shape; calling these `Ba` and `Bchoices[i], i = 0,…,n-1` we have that,
2094    necessarily, ``Ba.shape == Bchoices[i].shape`` for each `i`. Then, a new array
2095    with ``shape Ba.shape`` is created as follows:
2096
2097    - if ``mode='raise'`` (the default), then, first of all, each element of `a`
2098      (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that `i`
2099      (in that range) is the value at the `(j0, j1, ..., jm)` position in
2100      `Ba` - then the value at the same position in the new array is the
2101      value in ``Bchoices[i]`` at that same position;
2102
2103    - if ``mode='wrap'``, values in `a` (and thus `Ba`) may be any (signed)
2104      integer; modular arithmetic is used to map integers outside the
2105      range ``[0, n-1]`` back into that range; and then the new array is
2106      constructed as above;
2107
2108    - if ``mode='clip'``, values in `a` (and thus `Ba`) may be any (signed) integer;
2109      negative integers are mapped to 0; values greater than `n-1` are mapped to
2110      `n-1`; and then the new array is constructed as above.
2111
2112    Note:
2113        Numpy argument `out` is not supported.
2114        ``mode = 'raise'`` is not supported, and the default mode is 'clip' instead.
2115
2116    Args:
2117        a (int array): This array must contain integers in ``[0, n-1]``, where `n` is
2118            the number of choices, unless ``mode=wrap`` or ``mode=clip``, in which
2119            cases any integers are permissible.
2120        choices (sequence of arrays): Choice arrays. `a` and all of the `choices` must
2121            be broadcastable to the same shape. If `choices` is itself an array, then
2122            its outermost dimension (i.e., the one corresponding to ``choices.shape[0]``)
2123            is taken as defining the “sequence”.
2124        mode (‘raise’, ‘wrap’, ‘clip’, optional): Specifies how indices outside
2125            ``[0, n-1]`` will be treated:
2126
2127            ‘raise’ – raise an error;
2128
2129            ‘wrap’ – wrap around;
2130
2131            ‘clip’ – clip to the range. ‘clip’ mode means that all indices that are
2132            too large are replaced by the index that addresses the last element
2133            along that axis. Note that this disables indexing with negative numbers.
2134
2135    Returns:
2136        Tensor, the merged result.
2137
2138    Raises:
2139        ValueError: if `a` and any of the `choices` cannot be broadcast.
2140
2141    Supported Platforms:
2142        ``Ascend`` ``GPU`` ``CPU``
2143
2144    Examples:
2145        >>> import mindspore.numpy as np
2146        >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33]]
2147        >>> print(np.choose([2, 3, 1, 0], choices))
2148        [20 31 12  3]
2149        >>> print(np.choose([2, 4, 1, 0], choices, mode='clip'))
2150        [20 31 12  3]
2151        >>> print(np.choose([2, 4, 1, 0], choices, mode='wrap'))
2152        [20  1 12  3]
2153        >>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
2154        >>> choices = [-10, 10]
2155        >>> print(np.choose(a, choices))
2156        [[ 10 -10  10]
2157         [-10  10 -10]
2158         [ 10 -10  10]]
2159    """
2160    a = _to_tensor(a)
2161    if not _check_is_int(F.dtype(a)):
2162        _raise_value_error('`a` should be an int array')
2163    if isinstance(choices, (tuple, list)):
2164        # broadcasts choices to the same shape if choices is a sequence
2165        choices = _to_tensor(*choices)
2166        shapes = ()
2167        for choice in choices:
2168            shapes += (F.shape(choice),)
2169        shape_choice = _infer_out_shape(F.shape(a), *shapes)
2170        tmp = []
2171        for choice in choices:
2172            tmp.append(broadcast_to(choice, shape_choice))
2173        choices = stack(tmp)
2174    else:
2175        choices = _to_tensor(choices)
2176        shape_choice = _infer_out_shape(F.shape(a), F.shape(choices)[1:])
2177        choices = F.reshape(choices, choices.shape[:1] + _add_unit_axes(choices.shape[1:], len(shape_choice)))
2178        choices = broadcast_to(choices, (F.shape(choices)[0],) + shape_choice)
2179
2180    if F.rank(a) == 0 or F.rank(choices) == 0:
2181        _raise_value_error('input cannot be scalars')
2182    a = broadcast_to(a, shape_choice)
2183    a = _check_indices(F.shape(choices)[0], a, mode, allow_negative_index=False)
2184    grid = _get_grid(F.shape(a))
2185    indices = concatenate((a.reshape(F.shape(a) + (1,)), grid), -1)
2186    return F.gather_nd(choices, indices)
2187
2188
2189def size(a, axis=None):
2190    """
2191    Returns the number of elements along a given axis.
2192
2193    Args:
2194        a (Union[int, float, bool, list, tuple, Tensor]): Input data.
2195        axis (int): Axis along which the elements are counted. Default: None.
2196            If None, give the total number of elements.
2197
2198    Returns:
2199        Number of elements along the specified axis.
2200
2201    Supported Platforms:
2202        ``Ascend`` ``GPU`` ``CPU``
2203
2204    Raises:
2205        TypeError: If input is not array_like or `axis` is not int.
2206        ValueError: If any axis is out of range or duplicate axes exist.
2207
2208    Examples:
2209        >>> import mindspore.numpy as np
2210        >>> x = np.arange(10).reshape(2, 5).astype('float32')
2211        >>> print(np.size(x))
2212        10
2213        >>> print(np.size(x, axis=1))
2214        5
2215    """
2216    a = _to_tensor(a)
2217    if axis is None:
2218        return a.size
2219    if not isinstance(axis, int):
2220        _raise_type_error("axis argument should be integer.")
2221    axis = _canonicalize_axis(axis, a.ndim)
2222    return a.shape[axis]
2223
2224
2225def array_str(a):
2226    """
2227    Returns a string representation of the data in an array.
2228
2229    The data in the array is returned as a single string.
2230    This function is similar to array_repr, the difference being that array_repr also
2231    returns information on the kind of array and its data type.
2232
2233    Note:
2234        Numpy argument `max_line_width`, `precision` and `suppress_small` are not supported.
2235        Graph mode does not support the function.
2236
2237    Args:
2238        a (Tensor): Input data.
2239
2240    Returns:
2241        String.
2242
2243    Supported Platforms:
2244        ``Ascend`` ``GPU`` ``CPU``
2245
2246    Raises:
2247        TypeError: If input is not tensor.
2248
2249    Examples:
2250        >>> import mindspore.numpy as np
2251        >>> x = np.arange(5)
2252        >>> np.array_str(x)
2253        '[0 1 2 3 4]'
2254    """
2255    if not isinstance(a, Tensor):
2256        _raise_type_error("Expect input to be tensor.")
2257    return a.__str__()
2258
2259
2260def apply_along_axis(func1d, axis, arr, *args, **kwargs):
2261    """
2262    Applies a function to 1-D slices along the given axis.
2263    Executes ``func1d(a, *args, **kwargs)`` where `func1d` operates on 1-D arrays and `a` is a
2264    1-D slice of arr along axis.
2265
2266    Args:
2267        func1d (function): Maps `(M,) -> (Nj…)`. This function should accept 1-D arrays. It is
2268            applied to 1-D slices of arr along the specified axis.
2269        axis (int): Axis along which arr is sliced.
2270        arr (Tensor): Input array with shape `(Ni…, M, Nk…)`.
2271        args (any): Additional arguments to `func1d`.
2272        kwargs (any): Additional named arguments to `func1d`.
2273
2274    Returns:
2275        Tensor with shape `(Ni…, Nj…, Nk…)`, the output array. Its shape is identical to the
2276        shape of `arr`, except along the `axis` dimension. This axis is removed, and replaced
2277        with new dimensions equal to the shape of the return value of `func1d`. So if `func1d`
2278        returns a scalar, the output will have one fewer dimensions than `arr`.
2279
2280    Supported Platforms:
2281        ``Ascend`` ``GPU`` ``CPU``
2282
2283    Raises:
2284        ValueError: if axis is out of the range.
2285
2286    Examples:
2287        >>> import mindspore.numpy as np
2288        >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
2289        >>> print(np.apply_along_axis(np.diag, -1, b))
2290        [[[1 0 0]
2291        [0 2 0]
2292        [0 0 3]]
2293        [[4 0 0]
2294        [0 5 0]
2295        [0 0 6]]
2296        [[7 0 0]
2297        [0 8 0]
2298        [0 0 9]]]
2299    """
2300    ndim = F.rank(arr)
2301    shape = F.shape(arr)
2302    axis = _check_axis_in_range(axis, ndim)
2303    arr = moveaxis(arr, axis, -1)
2304    arr = F.reshape(arr, (-1, F.shape(arr)[-1]))
2305    slices = []
2306    for i in range(F.shape(arr)[0]):
2307        slices.append(func1d(arr[i], *args, **kwargs))
2308    stacked_slices = stack(slices)
2309    shape_stacked = (_tuple_slice(shape, None, axis) + _tuple_slice(shape, axis + 1, None) +
2310                     _tuple_slice(F.shape(stacked_slices), 1, None))
2311    res = F.reshape(stacked_slices, shape_stacked)
2312
2313    # moves the dimensions returned by `func1d` back to `axis`
2314    ndim_func = F.rank(res) - ndim + 1
2315    if ndim_func >= 1:
2316        res = moveaxis(res, F.make_range(ndim - 1, F.rank(res)),
2317                       F.make_range(axis, axis + ndim_func))
2318    return res
2319
2320
2321def _stack_arrays(arrs):
2322    """Stacks a sequence of Tensor"""
2323    if isinstance(arrs, (tuple, list)):
2324        tensor_list = []
2325        for arr in arrs:
2326            tensor_list.append(_to_tensor(arr))
2327        return stack(tensor_list)
2328    return atleast_1d(_to_tensor(arrs))
2329
2330
2331def piecewise(x, condlist, funclist, *args, **kw):
2332    """
2333    Evaluates a piecewise-defined function.
2334    Given a set of conditions and corresponding functions, evaluate each function on the input
2335    data wherever its condition is true.
2336
2337    Args:
2338        x (Union[int, float, bool, list, tuple, Tensor]): The input domain.
2339        condlist (Union[bool, list of bool Tensor]): Each boolean array corresponds to a
2340            function in `funclist`. Wherever `condlist[i]` is True, `funclist[i](x)` is used as
2341            the output value. Each boolean array in `condlist` selects a piece of `x`, and
2342            should therefore be of the same shape as `x`. The length of `condlist` must
2343            correspond to that of `funclist`. If one extra function is given, i.e. if
2344            ``len(funclist) == len(condlist) + 1``, then that extra function is the default
2345            value, used wherever all conditions are false.
2346        funclist (Union[list of callables, list of scalars]): Each function is evaluated over
2347            `x` wherever its corresponding condition is True. It should take a 1d array as input
2348            and give an 1d array or a scalar value as output. If, instead of a callable, a scalar
2349            is provided then a constant function ``(lambda x: scalar)`` is assumed.
2350        args (any): Any further arguments given to `piecewise` are passed to the functions upon
2351            execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then each function is
2352            called as ``f(x, 1, 'a')``.
2353        kw (any): Keyword arguments used in calling `piecewise` are passed to the functions upon
2354            execution, i.e., if called ``piecewise(..., ..., alpha=1)``, then each function is
2355            called as ``f(x, alpha=1)``.
2356
2357    Returns:
2358        Tensor, the output is the same shape and type as `x` and is found by calling the
2359        functions in `funclist` on the appropriate portions of `x`, as defined by the boolean
2360        arrays in `condlist`. Portions not covered by any condition have a default value of 0.
2361
2362    Supported Platforms:
2363        ``Ascend`` ``GPU`` ``CPU``
2364
2365    Raises:
2366        ValueError: if length of `funclist` is not in ``(len(condlist), len(condlist) + 1)``
2367
2368    Examples:
2369        >>> import mindspore.numpy as np
2370        >>> x = np.linspace(-2.5, 2.5, 6)
2371        >>> print(np.piecewise(x, [x < 0, x >= 0], [-1, 1]))
2372        [-1 -1 -1  1  1  1]
2373    """
2374    x = _to_tensor(x)
2375    choicelist = funclist
2376    if isinstance(funclist, (tuple, list)):
2377        if _callable(x, funclist[0]):
2378            choicelist = []
2379            for func in funclist:
2380                choicelist.append(func(x, *args, **kw))
2381    condlist = _stack_arrays(condlist)
2382    choicelist = _stack_arrays(choicelist)
2383
2384    default = 0
2385    n1 = len(condlist)
2386    n2 = len(funclist)
2387    if n1 + 1 == n2:
2388        default = choicelist[-1]
2389        choicelist = choicelist[:-1]
2390    elif n1 != n2:
2391        _raise_value_error('the number of choices should be either equal to conditions or ', n1 + 1)
2392    return select(condlist, choicelist, default=default)
2393
2394
2395def unravel_index(indices, shape, order='C'):
2396    """
2397    Converts a flat index or array of flat indices into a tuple of coordinate arrays.
2398
2399    Note:
2400        Out-of-bound indices are clipped by the boundaries of `shape` instead of raising
2401        an error.
2402
2403    Args:
2404        indices (Union[int, float, bool, list, tuple, Tensor]): An integer array whose elements
2405            are indices into the flattened version of an array of dimensions shape.
2406        shape (tuple of integers): The shape of the array to use for unraveling indices.
2407        order (Union['C', 'F'], optional): Determines whether the indices should be viewed as
2408            indexing in row-major (C-style) or column-major (Fortran-style) order.
2409
2410    Returns:
2411        Tensor, each array in the tuple has the same shape as the indices array.
2412
2413    Supported Platforms:
2414        ``Ascend`` ``GPU`` ``CPU``
2415
2416    Raises:
2417        ValueError: if `order` is not 'C' or 'F'.
2418
2419    Examples:
2420        >>> import mindspore.numpy as np
2421        >>> print(np.unravel_index([22, 41, 37], (7,6)))
2422        (Tensor(shape=[3], dtype=Int32, value= [3, 6, 6]),
2423        Tensor(shape=[3], dtype=Int32, value= [4, 5, 1]))
2424        >>> print(np.unravel_index([31, 41, 13], (7,6), order='F'))
2425        (Tensor(shape=[3], dtype=Int32, value= [3, 6, 6]),
2426        Tensor(shape=[3], dtype=Int32, value= [4, 5, 1]))
2427    """
2428    indices = _to_tensor(indices)
2429    if order not in ('C', 'F'):
2430        _raise_value_error('invalid order. Expected "C" or "F"')
2431    if isinstance(shape, int):
2432        shape = (shape,)
2433    ndim = F.rank(indices)
2434    if order == 'F':
2435        sizes = _cumprod(shape)
2436    else:
2437        sizes = _cumprod(shape[::-1])
2438    sizes = _to_tensor(sizes[::-1] + (1,))
2439    sizes = F.reshape(sizes, (-1,) + _list_comprehensions(ndim, 1, True))
2440    total_size = sizes[0]
2441    indices = where(indices > total_size - 1, total_size - 1, indices)
2442    if _get_device() == 'GPU':
2443        dtype = F.dtype(total_size)
2444        lowerbounds = (-(total_size.astype(mstype.float32))).astype(dtype)
2445    else:
2446        lowerbounds = -total_size
2447    indices = where(indices < lowerbounds, lowerbounds, indices)
2448    res = _mod(indices, sizes[:-1])//sizes[1:]
2449
2450    num = len(res)
2451    if ndim == 0 and num == 1:
2452        return res.ravel()
2453    if order == 'F':
2454        r = range(num - 1, -1, -1)
2455    else:
2456        r = range(num)
2457    subs = ()
2458    for i in r:
2459        subs += (res[i],)
2460    return subs
2461
2462
2463def apply_over_axes(func, a, axes):
2464    """
2465    Applies a function repeatedly over multiple axes.
2466
2467    `func` is called as `res = func(a, axis)`, where `axis` is the first element of `axes`.
2468    The result `res` of the function call must have either the same dimensions as `a` or
2469    one less dimension. If `res` has one less dimension than `a`, a dimension is inserted before `axis`.
2470    The call to `func` is then repeated for each axis in `axes`, with `res` as the first argument.
2471
2472    Args:
2473        func (function): This function must take two arguments, `func(a, axis)`.
2474        a (Union[int, float, bool, list, tuple, Tensor]): Input tensor.
2475        axes (Union[int, list, tuple]): Axes over which `func` is applied; the elements must be integers.
2476
2477    Returns:
2478        Tensor. The number of dimensions is the same as `a`, but the shape can be different.
2479        This depends on whether `func` changes the shape of its output with respect to its input.
2480
2481    Raises:
2482        TypeError: If input `a` is not array_like or `axes` is not int or sequence of ints.
2483        ValueError: If any axis is out of range or duplicate axes exist.
2484
2485    Supported Platforms:
2486        ``Ascend`` ``GPU`` ``CPU``
2487
2488    Examples:
2489        >>> import mindspore.numpy as np
2490        >>> x = np.arange(10).reshape(2, 5).astype('float32')
2491        >>> print(x)
2492        [[0. 1. 2. 3. 4.]
2493         [5. 6. 7. 8. 9.]]
2494        >>> print(np.apply_over_axes(np.sum, x, axes=0))
2495        [[ 5.  7.  9. 11. 13.]]
2496    """
2497    a = _to_tensor(a)
2498    if isinstance(axes, int):
2499        axes = (axes,)
2500    res = a
2501    for axis in axes:
2502        res = func(res, axis=axis)
2503        res = F.expand_dims(res, axis) if res.ndim != a.ndim else res
2504        if res.ndim != a.ndim:
2505            _raise_value_error("function is not returning a tensor of the correct shape")
2506    return res
2507