• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2022-2023 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15
16"""Operators for function."""
17from __future__ import absolute_import
18
19import builtins
20import operator
21import numbers
22import numpy as np
23
24import mindspore as ms
25import mindspore.common.dtype as mstype
26from mindspore.ops import operations as P
27from mindspore.ops import functional as F
28from mindspore.ops.primitive import constexpr
29from mindspore.ops.primitive import _primexpr
30import mindspore.ops as ops
31from mindspore.ops.operations._inner_ops import DynamicBroadcastTo
32from mindspore.ops.operations._sequence_ops import TupleToTensor
33from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
34from mindspore.ops.operations._sequence_ops import TensorToList
35from mindspore.ops.auto_generate import OnesLikeExt, ZerosLikeExt, FillScalar, FillTensor, Arange, Chunk, UniqueDim,\
36    Unique2, SortExt, NonZero, NonZeroExt
37from mindspore.ops.auto_generate.gen_ops_prim import SplitTensor
38from mindspore.ops.auto_generate.gen_ops_prim import SplitWithSize, RepeatInterleaveInt, RepeatInterleaveTensor
39
40from mindspore.ops.operations.array_ops import (
41    UniqueConsecutive,
42    SearchSorted,
43    MatrixDiagV3,
44    MatrixDiagPartV3,
45    MatrixSetDiagV3,
46    Fills,
47    Col2Im,
48    ScatterNdMax,
49    ScatterNdMul,
50    IndexFill,
51    AffineGrid,
52    Im2Col,
53    Expand,
54    Lstsq,
55    Mvlgamma,
56    Tril,
57    Argmax,
58    ArgMaxWithValue,
59    ArgMinWithValue
60)
61from mindspore.ops.operations.array_ops import TensorScatterElements
62from mindspore.common import Tensor
63from mindspore.ops._primitive_cache import _get_cache_prim
64from mindspore import _checkparam as validator
65from mindspore._c_expression import Tensor as Tensor_
66from mindspore.ops._utils.utils import ms_arrange
67
68from mindspore.ops.auto_generate import cat, range, scatter_nd, deepcopy, masked_fill, diagonal, expand_dims, \
69    flip, transpose, triu, unsorted_segment_sum, diag, gather, gather_d, gather_nd, reshape, \
70    broadcast_to, strided_slice, ones, zeros, max_, min_, select
71from mindspore.ops.auto_generate.gen_ops_prim import scatter_add_ext_op, slice_ext_op
72from mindspore.ops.operations.manually_defined import tile, rank, scalar_cast
73
74arg_max_with_value_ = ArgMaxWithValue()
75arg_min_with_value_ = ArgMinWithValue()
76batch_to_space_nd_v2_ = P.BatchToSpaceNDV2()
77cast_ = P.Cast()
78diag_ = P.Diag()
79dynamic_broadcast_to_ = DynamicBroadcastTo()
80eye_ = P.Eye()
81fills_ = Fills()
82fillv2_ = P.FillV2()
83flatten_ = P.Flatten()
84gather_ = P.Gather()
85gather_d_ = P.GatherD()
86gather_nd_ = P.GatherNd()
87ger_ = P.Ger()
88index_fill_ = IndexFill()
89lstsq_ = Lstsq()
90masked_select_ = P.MaskedSelect()
91matrix_band_part_ = P.array_ops.MatrixBandPart()
92ones_ = P.Ones()
93population_count_ = P.PopulationCount()
94range_ = P.Range()
95rank_ = P.Rank()
96reduce_max_ = P.ReduceMax()
97reduce_min_ = P.ReduceMin()
98reshape_ = P.Reshape()
99scalar_to_tensor_ = P.ScalarToTensor()
100scatter_add_ = P.ScatterAdd()
101scatter_div_ = P.ScatterDiv()
102scatter_max_ = P.ScatterMax()
103scatter_min_ = P.ScatterMin()
104scatter_mul_ = P.ScatterMul()
105scatter_nd_ = P.ScatterNd()
106scatter_update_ = P.ScatterUpdate()
107shape_ = P.Shape()
108split_tensor = SplitTensor()
109split_with_size = SplitWithSize()
110size_ = P.Size()
111tensor_scatter_add_ = P.TensorScatterAdd()
112tensor_scatter_div_ = P.TensorScatterDiv()
113tensor_scatter_max_ = P.TensorScatterMax()
114tensor_scatter_min_ = P.TensorScatterMin()
115tensor_scatter_mul_ = P.TensorScatterMul()
116tensor_scatter_sub_ = P.TensorScatterSub()
117tensor_select_ = P.Select()
118tensor_shape_ = P.TensorShape()
119tensor_slice = P.Slice()
120tile_ = P.Tile()
121transpose_ = P.Transpose()
122tuple_to_array_ = P.TupleToArray()
123tuple_to_tensor_ = TupleToTensor()
124unique_ = P.Unique()
125unique_with_pad_ = P.UniqueWithPad()
126unsorted_segment_max_ = P.UnsortedSegmentMax()
127unsorted_segment_min_ = P.UnsortedSegmentMin()
128unsorted_segment_prod_ = P.UnsortedSegmentProd()
129unsorted_segment_sum_ = P.UnsortedSegmentSum()
130ones_like_ = P.OnesLike()
131zeros_like_ = P.ZerosLike()
132ones_like_ext_ = OnesLikeExt()
133zeros_like_ext_ = ZerosLikeExt()
134fill_scalar_ = FillScalar()
135fill_tensor_ = FillTensor()
136sort_ext_ = SortExt()
137arange_ = Arange()
138chunk_ = Chunk()
139repeat_interleave_int_ = RepeatInterleaveInt()
140repeat_interleave_tensor_ = RepeatInterleaveTensor()
141unique_dim_ = UniqueDim()
142unique2_ = Unique2()
143non_zero_ = NonZero()
144non_zero_ext_ = NonZeroExt()
145
146
147@_primexpr
148def get_x_shape(x_shape):
149    if ops.is_sequence_shape_unknown(x_shape):
150        return (-2,)
151    if ops.is_sequence_value_unknown(x_shape):
152        return (-1,)
153    s = 1
154    for i in x_shape:
155        s = s * i
156    return (s,)
157
158
159@constexpr
160def _check_attr_dtype(param_name, input_dtype, allow_dtypes, cls_name):
161    validator.check_value_type(param_name, input_dtype, allow_dtypes, cls_name)
162
163
164check_flatten_order_const = constexpr(validator.check_flatten_order)
165
166
167##############################
168# Tensor Creation Functions.
169##############################
170
171
172def _cast_type(x, to_type):
173    """cast input to the specified type or cast input to tensor"""
174    if isinstance(x, Tensor):
175        x = cast_(x, to_type)
176    else:
177        x = scalar_to_tensor_(x, to_type)
178    return x
179
180
181def _get_type(x):
182    """get the dtype of input"""
183    if isinstance(x, Tensor):
184        return x.dtype
185    return ops.typeof(x)
186
187
188def _get_max_type(start, end, step):
189    """get max input type with `level`"""
190    valid_dtypes = [mstype.int32, mstype.float32, mstype.int64, mstype.float64]
191    arg_map = [start, end, step]
192    arg_type_map = [str(_get_type(i)) for i in arg_map]
193    for arg_value in arg_map:
194        if not (isinstance(arg_value, (float, int))
195                or (isinstance(arg_value, Tensor) and arg_value.dtype in valid_dtypes)):
196            raise TypeError(
197                f"For arange, the input type must be int or float or a TensorScalar in {valid_dtypes},"
198                f" but got {_get_type(arg_value)}")
199
200    type_map = {'Float64': '3', 'Float32': '2', "<class 'float'>": '2', 'Int64': '1', "<class 'int'>": '1',
201                'Int32': '0'}
202    type_map_reverse = {'3': mstype.float64, '2': mstype.float32, '1': mstype.int64, '0': mstype.int32}
203    type_level = [type_map.get(i) for i in arg_type_map]
204    max_level = builtins.max(type_level)
205    return type_map_reverse.get(max_level)
206
207
208def arange(start=0, end=None, step=1, *, dtype=None):
209    r"""
210    Creates a sequence of numbers that begins at `start` and extends by increments of
211    `step` up to but not including `end`.
212
213    Args:
214        start (Union[float, int, Tensor], optional): The start of the interval.
215            If Tensor, the shape must be :math:`()` . Default: ``0`` .
216        end (Union[float, int, Tensor], optional): The end of the interval, exclusive.
217            If Tensor, the shape must be :math:`()`.
218            Default: ``None`` . If ``None`` , it defaults to the value of `start`, and 0 is used as the starting value.
219        step (Union[float, int, Tensor], optional): Number that increments `start`.
220            If Tensor, the shape must be :math:`()`. Default: ``1`` .
221
222    Keyword Args:
223        dtype (mindspore.dtype, optional): The required data type of returned Tensor. Default: ``None`` .
224            When `dtype` is not specified or ``None``:
225
226            If `start`, `end`, and `step` are all integers, the dtype of output is int64,
227
228            If `start`, `end`, and `step` contain at least one floating-point number, the dtype of output is float32.
229
230    Returns:
231        A 1-D Tensor, with the same type as the inputs.
232
233    Raises:
234        TypeError: If `start`, `end` or `step` is not an int or a float or a TensorScalar(Special Tensor with shape ())
235                   in valid dtypes.
236        ValueError: If `step` = 0.
237        ValueError: If `start` >= `end` when `step` > 0.
238        ValueError: If `start` <= `end` when `step` < 0.
239
240    Supported Platforms:
241        ``Ascend`` ``GPU`` ``CPU``
242
243    Examples:
244        >>> import mindspore as ms
245        >>> from mindspore import Tensor, ops
246        >>> output = ops.arange(1, 6)
247        >>> print(output)
248        [1 2 3 4 5]
249        >>> print(output.dtype)
250        Int64
251        >>> output = ops.arange(0, 3, 1.2)
252        >>> print(output)
253        [0.  1.2 2.4]
254        >>> print(output.dtype)
255        Float32
256        >>> output = ops.arange(7, 1, -2)
257        >>> print(output)
258        [7 5 3]
259        >>> print(output.dtype)
260        Int64
261        >>> output = ops.arange(ms.Tensor(12.0, dtype=ms.float64), 2, ms.Tensor(-1.0, dtype=ms.float32))
262        >>> print(output)
263        [12. 11. 10.  9.  8.  7.  6.  5.  4.  3.]
264        >>> print(output.dtype)
265        Float32
266    """
267    if end is None:
268        start, end = 0, start
269    max_type = _get_max_type(start, end, step)
270    start = _cast_type(start, max_type)
271    end = _cast_type(end, max_type)
272    step = _cast_type(step, max_type)
273
274    if start.shape != () or end.shape != () or step.shape != ():
275        raise ValueError(f"For arange, the input args must be a TensorScalar,"
276                         f" but got start shape:{start.shape}, end shape:{end.shape}, step shape:{step.shape}")
277    data = range_(start, end, step)
278    if dtype is not None:
279        data = cast_(data, dtype)
280    return data
281
282
283def arange_ext(start=0, end=None, step=1, *, dtype=None):
284    r"""
285    Creates a sequence of numbers that begins at `start` and extends by increments of
286    `step` up to but not including `end`.
287
288    Args:
289        start (Union[float, int], optional): The start of the interval. Default: ``0`` .
290        end (Union[float, int], optional): The end of the interval, exclusive.
291            Default: ``None`` . If ``None`` , it defaults to the value of `start`, and 0 is used as the starting value.
292        step (Union[float, int], optional): The step size with which the array element increments. Default: ``1`` .
293
294    Keyword Args:
295        dtype (mindspore.dtype, optional): The required data type of returned Tensor. Default: ``None`` .
296            When `dtype` is not specified or ``None``:
297
298            If `start`, `end`, and `step` are all integers, the dtype of output is int64,
299
300            If `start`, `end`, and `step` contain at least one floating-point number, the dtype of output is float32.
301
302    Returns:
303        A 1-D Tensor, cast to `dtype` if provided, may potentially lose precision due to casting.
304
305    Raises:
306        TypeError: If `start`, `end` or `step` are not of type int or float.
307        ValueError: If `step` = 0.
308        ValueError: If `start` >= `end` when `step` > 0.
309        ValueError: If `start` <= `end` when `step` < 0.
310
311    Supported Platforms:
312        ``Ascend``
313
314    Examples:
315        >>> import mindspore as ms
316        >>> from mindspore import Tensor, ops
317        >>> output = ops.arange_ext(1, 6)
318        >>> print(output)
319        [1 2 3 4 5]
320        >>> print(output.dtype)
321        Int64
322        >>> output = ops.arange_ext(0, 3, 1.2)
323        >>> print(output)
324        [0.  1.2 2.4]
325        >>> print(output.dtype)
326        Float32
327        >>> output = ops.arange_ext(7, 1, -2)
328        >>> print(output)
329        [7 5 3]
330        >>> print(output.dtype)
331        Int64
332        >>> output = ops.arange_ext(12, 2, -1, dtype=ms.bfloat16))
333        >>> print(output)
334        [12. 11. 10.  9.  8.  7.  6.  5.  4.  3.]
335        >>> print(output.dtype)
336        BFloat16
337    """
338    if end is None:
339        start, end = 0, start
340    return arange_(start, end, step, dtype)
341
342
343def concat(tensors, axis=0):
344    """
345    Alias for :func:`mindspore.ops.cat()`.
346
347    Tutorial Examples:
348        - `Tensor - Tensor Operation <https://mindspore.cn/tutorials/en/master/beginner/tensor.html#tensor-operation>`_
349        - `Vision Transformer Image Classification - Building ViT as a whole
350          <https://mindspore.cn/tutorials/application/en/master/cv/vit.html#building-vit-as-a-whole>`_
351        - `Sentiment Classification Implemented by RNN - Dense
352          <https://mindspore.cn/tutorials/application/en/master/nlp/sentiment_analysis.html#dense>`_
353    """
354    return cat(tensors, axis)
355
356
357def eye(n, m=None, dtype=None):
358    """
359    Creates a tensor with ones on the diagonal and zeros in the rest.
360
361    Note:
362        The data type of returned tensor can be float16, float32, int8, int16, int32, int64, uint8
363        or bool on Ascend platforms.
364
365    Args:
366        n (int): The number of rows of returned tensor. Constant value only.
367        m (int, optional): The number of columns of returned tensor. Constant value only.
368            Default: ``None`` , if ``None`` , the number of columns is as the same as n.
369        dtype (mindspore.dtype, optional): MindSpore's dtype, the data type of the returned tensor.
370            The data type can be bool or Number.
371            Default: ``None`` , the data type of the returned tensor is mindspore.float32.
372
373    Returns:
374        Tensor, a tensor with ones on the diagonal and the rest of elements are zero. The shape of `output` depends on
375        the user's Inputs `n` and `m`. And the data type depends on Inputs `dtype`.
376
377    Raises:
378        TypeError: If `m` or `n` is not an int.
379        ValueError: If `m` or `n` is less than 0.
380
381    Supported Platforms:
382        ``Ascend`` ``GPU`` ``CPU``
383
384    Examples:
385        >>> import mindspore
386        >>> from mindspore import ops
387        >>> output = ops.eye(2, 2, mindspore.int32)
388        >>> print(output)
389        [[1 0]
390         [0 1]]
391        >>> print(output.dtype)
392        Int32
393        >>> output = ops.eye(1, 2, mindspore.float32)
394        >>> print(output)
395        [[1. 0.]]
396        >>> print(output.dtype)
397        Float32
398        >>> output = ops.eye(2, dtype=mindspore.int32)
399        >>> print(output)
400        [[1 0]
401         [0 1]]
402        >>> print(output.dtype)
403        Int32
404        >>> output = ops.eye(2)
405        >>> print(output)
406        [[1. 0.]
407         [0. 1.]]
408        >>> print(output.dtype)
409        Float32
410    """
411    if m is None:
412        m = n
413    if dtype is None:
414        dtype = ms.float32
415    return eye_(n, m, dtype)
416
417
418def hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None):
419    r"""
420    Returns the Hamming window.
421
422    .. math::
423
424        w[n]=\alpha − \beta \cos \left( \frac{2 \pi n}{N - 1} \right),
425
426    where :math:`N` is the full window size.
427
428    Args:
429        window_length (int): The size of returned window. Must be a non negative integer.
430        periodic (bool, optional): If True, return a periodic window. If False, return a symmetric window.
431            Default: ``True`` .
432        alpha (float, optional): The coefficient α. Default: ``0.54`` .
433        beta (float, optional): The coefficient β. Default: ``0.46`` .
434
435    Keyword Args:
436        dtype (mindspore.dtype, optional): The output window data type. Default: ``None`` .
437
438    Returns:
439        Tensor, a 1-D tensor of size (window_length) containing the window.
440
441    Raises:
442        TypeError: If `window_length` is a negative integer.
443        TypeError: If `periodic` is not bool.
444
445    Supported Platforms:
446        ``Ascend`` ``GPU`` ``CPU``
447
448    Examples:
449        >>> from mindspore import ops
450        >>> print(ops.hamming_window(6, False))
451        [0.08 0.39785218 0.91214782  0.91214782  0.39785218 0.08]
452    """
453    if not isinstance(window_length, int):
454        raise TypeError(f"For array function 'hamming_window', 'window_length' must be int, but got" \
455                        f" {type(window_length)}.")
456    if window_length < 0:
457        raise ValueError(f"For array function 'hamming_window', 'window_length' must be non negative number.")
458    if not isinstance(periodic, bool):
459        raise TypeError(f"For array function 'hamming_window', 'periodic' must be bool, but got {type(periodic)}.")
460    if not isinstance(alpha, float):
461        raise TypeError(f"For array function 'hamming_window', 'alpha' must be float, but got {type(alpha)}.")
462    if not isinstance(beta, float):
463        raise TypeError(f"For array function 'hamming_window', 'beta' must be float, but got {type(beta)}.")
464    if window_length <= 1:
465        return Tensor(np.ones(window_length))
466    if dtype is not None and dtype not in mstype.float_type:
467        raise TypeError(f"For array function 'hamming_window', 'dtype' must be floating point dtypes, but got {dtype}.")
468
469    dtype = mstype.float32 if dtype is None else dtype
470    op = _get_cache_prim(P.HammingWindow)(periodic, alpha, beta, dtype)
471    length = Tensor(np.array([window_length]).astype(np.int32))
472    out = op(length)
473    return out
474
475
476def where(condition, input, other):
477    r"""
478    Selects elements from `input` or `other` based on `condition` and returns a tensor.
479
480    .. math::
481        output_i = \begin{cases} input_i,\quad &if\ condition_i \\ other_i,\quad &otherwise \end{cases}
482
483    Args:
484        condition (Tensor[bool]): If True, yield `input`, otherwise yield `other`.
485        input (Union[Tensor, Scalar]): When `condition` is True, values to select from.
486        other (Union[Tensor, Scalar]): When `condition` is False, values to select from.
487
488    Returns:
489        Tensor, elements are selected from `input` and `other`.
490
491    Raises:
492        TypeError: If `condition` is not a Tensor.
493        TypeError: If both `input` and `other` are scalars.
494        ValueError: If `condition`, `input` and `other` can not broadcast to each other.
495
496    Supported Platforms:
497        ``Ascend`` ``GPU`` ``CPU``
498
499    Examples:
500        >>> import numpy as np
501        >>> from mindspore import Tensor, ops
502        >>> from mindspore import dtype as mstype
503        >>> a = Tensor(np.arange(4).reshape((2, 2)), mstype.float32)
504        >>> b = Tensor(np.ones((2, 2)), mstype.float32)
505        >>> condition = a < 3
506        >>> output = ops.where(condition, a, b)
507        >>> print(output)
508        [[0. 1.]
509         [2. 1.]]
510    """
511    return tensor_select_(condition, input, other)
512
513
514def reverse(x, axis):
515    """
516    :func:`mindspore.ops.reverse` will be deprecated in the future.
517    Please use :func:`mindspore.ops.flip` instead.
518    """
519    return flip(x, axis)
520
521
522def ravel(input):
523    """
524    Expand the multidimensional Tensor into 1D along the 0 axis direction.
525
526    Args:
527        input (Tensor): A tensor to be flattened.
528
529    Returns:
530        Tensor, a 1-D tensor, containing the same elements of the input.
531
532    Raises:
533        TypeError: If argument `input` is not Tensor.
534
535    Supported Platforms:
536        ``Ascend`` ``GPU`` ``CPU``
537
538    Examples:
539        >>> import numpy as np
540        >>> from mindspore import Tensor, ops
541        >>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
542        >>> output = ops.ravel(x)
543        >>> print(output)
544        [0. 1. 2. 1.]
545        >>> print(output.shape)
546        (4,)
547    """
548    return ops.reshape(input, (-1,))
549
550
551def matrix_band_part(x, lower, upper):
552    r"""
553    Copy a tensor setting everything outside a central band in each innermost matrix to zero.
554
555    .. warning::
556        This is an experimental API that is subject to change or deletion.
557
558    Args:
559        x (Tensor): Input tensor. :math:`(*, m, n)` where :math:`*` means, any number of additional dimensions.
560        lower (Union[int, Tensor]): Number of subdiagonals to keep. The data type must be int32 or int64.
561            If negative, keep entire lower triangle.
562        upper (Union[int, Tensor]): Number of superdiagonals to keep. The data type must be int32 or int64.
563            If negative, keep entire upper triangle.
564
565    Returns:
566        Tensor, has the same type and shape as `x`.
567
568    Raises:
569        TypeError: If `x` is not a Tensor.
570        TypeError: If dtype of `x` is not valid.
571        TypeError: If `lower` is neither a number nor a Tensor.
572        TypeError: If `upper` is neither a number nor a Tensor.
573        TypeError: If dtype of `lower` is neither int32 nor int64.
574        TypeError: If dtype of `upper` is neither int32 nor int64.
575        ValueError: If the shape of `x` is not greater than or equal to 2D.
576        ValueError: If the shape of `lower` is not equal to 0D.
577        ValueError: If the shape of `upper` is not equal to 0D.
578
579    Supported Platforms:
580        ``Ascend`` ``GPU`` ``CPU``
581
582    Examples:
583        >>> import numpy as np
584        >>> from mindspore import Tensor, ops
585        >>> x = Tensor(np.ones([2, 4, 4]).astype(np.float32))
586        >>> output = ops.matrix_band_part(x, 2, 1)
587        >>> print(output)
588        [[[1. 1. 0. 0.]
589          [1. 1. 1. 0.]
590          [1. 1. 1. 1.]
591          [0. 1. 1. 1.]]
592         [[1. 1. 0. 0.]
593          [1. 1. 1. 0.]
594          [1. 1. 1. 1.]
595          [0. 1. 1. 1.]]]
596    """
597    return matrix_band_part_(x, lower, upper)
598
599
600def padding(x, pad_dim_size=8):
601    r"""
602    Extends the last dimension of the input tensor from 1 to pad_dim_size, by filling with 0.
603
604    Args:
605        x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The rank of `x` must be at least 2.
606            The last dimension of `x` must be 1. The data type is Number.
607        pad_dim_size (int): The value of the last dimension of `x` to be extended, which must be positive.
608            Default: ``8`` .
609
610    Returns:
611        Tensor, has the same type and shape as input shape value.
612
613    Raises:
614        TypeError: If `pad_dim_size` is not an int.
615        ValueError: If `pad_dim_size` is less than 1.
616        ValueError: If last dim of `x` is not equal to 1.
617
618    Supported Platforms:
619        ``Ascend`` ``GPU`` ``CPU``
620
621    Examples:
622        >>> import mindspore
623        >>> import numpy as np
624        >>> from mindspore import Tensor, ops
625        >>> x = Tensor(np.array([[8], [10]]), mindspore.float32)
626        >>> pad_dim_size = 4
627        >>> output = ops.padding(x, pad_dim_size)
628        >>> print(output)
629        [[ 8.  0.  0.  0.]
630         [10.  0.  0.  0.]]
631    """
632    padding_ = _get_cache_prim(P.array_ops.Padding)(pad_dim_size)
633    return padding_(x)
634
635
636@constexpr
637def _check_axis_type(axis, type_int=True, type_tuple=True, type_list=True, ops_name="ops"):
638    """Check axis argument type."""
639    if type_int and isinstance(axis, int):
640        return True
641    if (type_tuple and isinstance(axis, tuple)) or (type_list and isinstance(axis, list)):
642        for ax in axis:
643            if not isinstance(ax, int):
644                raise TypeError(f"For {ops_name}, each axis must be integer, but got {type(ax)} in {axis}.")
645        return True
646
647    type_str = ""
648    if type_int:
649        type_str += "int, "
650    if type_tuple:
651        type_str += "tuple, "
652    if type_list:
653        type_str += "list, "
654    raise TypeError(f"For {ops_name}, the axis should be {type_str}, but got {type(axis)}.")
655
656
657def one_hot(indices, depth, on_value=1, off_value=0, axis=-1):
658    r"""
659    Computes a one-hot tensor.
660
661    The locations represented by indices in `indices` take value `on_value`, while all
662    other locations take value `off_value`.
663
664    Note:
665        If the input `indices` has rank `N`, the output will have rank `N+1`.
666        The new axis is created at dimension `axis`. On Ascend, if `on_value` is int64 dtype, `indices` must be
667        int64 dtype, and the value for `on_value` and `off_value` can only be 1 and 0.
668
669    Args:
670        indices(Tensor): A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
671            Data type must be int32 or int64.
672        depth(int): A scalar defining the depth of the one-hot dimension.
673        on_value(Union[Tensor, int, float], optional): A value to fill in output when `indices[j] = i`.
674            Data type must be int32, int64, float16 or float32. Default: ``1`` .
675        off_value(Union[Tensor, int, float], optional): A value to fill in output when `indices[j] != i`.
676            Has the same data type as `on_value`. Default: ``0`` .
677        axis(int, optional): Position to insert the value. e.g. If shape of `self` is :math:`(N, C)`, and `axis` is -1,
678            the output shape will be :math:`(N, C, depth)`, If `axis` is 0,
679            the output shape will be :math:`(depth, N, C)`.
680            Default: ``-1`` .
681
682    Returns:
683        Tensor, one-hot tensor. Tensor of shape :math:`(X_0, \ldots, X_{axis}, \text{depth} ,X_{axis+1}, \ldots, X_n)`,
684        and it has the same data type as `on_value`.
685
686    Raises:
687        TypeError: If `axis` or `depth` is not an int.
688        TypeError: If dtype of `indices` is not int32 or int64.
689        TypeError: If dtype of `on_value` is not int32, int64, float16 or float32.
690        TypeError: If `indices`, `on_value` or `off_value` is not a Tensor.
691        ValueError: If `axis` is not in range [-1, ndim].
692        ValueError: If `depth` is less than 0.
693
694    Supported Platforms:
695        ``Ascend`` ``GPU`` ``CPU``
696
697    Examples:
698        >>> import mindspore
699        >>> import numpy as np
700        >>> from mindspore import Tensor, ops
701        >>> indices = Tensor(np.array([0, 1, 2]), mindspore.int32)
702        >>> depth, on_value, off_value = 3, Tensor(1.0, mindspore.float32), Tensor(0.0, mindspore.float32)
703        >>> output = ops.one_hot(indices, depth, on_value, off_value, axis=-1)
704        >>> print(output)
705        [[1. 0. 0.]
706         [0. 1. 0.]
707         [0. 0. 1.]]
708    """
709    if not isinstance(on_value, Tensor):
710        on_value = Tensor(on_value)
711    if not isinstance(off_value, Tensor):
712        off_value = Tensor(off_value)
713    onehot = _get_cache_prim(P.OneHot)(axis)
714    return onehot(indices, depth, on_value, off_value)
715
716
717def fill(type, shape, value):  # pylint: disable=redefined-outer-name
718    """
719    Create a Tensor of the specified shape and fill it with the specified value.
720
721    Args:
722        type (mindspore.dtype): The specified type of output tensor. The data type only supports
723            `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ and
724            `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ .
725        shape (Union(Tensor, tuple[int])): The specified shape of output tensor.
726        value (Union(Tensor, number.Number, bool)): Value to fill the returned tensor.
727
728    Returns:
729        Tensor.
730
731    Raises:
732        TypeError: If `shape` is not a tuple or a tensor.
733
734    Supported Platforms:
735        ``Ascend`` ``GPU`` ``CPU``
736
737    Examples:
738        >>> import mindspore
739        >>> from mindspore import ops
740        >>> output = ops.fill(mindspore.float32, (2, 2), 1)
741        >>> print(output)
742        [[1. 1.]
743         [1. 1.]]
744        >>> output = ops.fill(mindspore.float32, (3, 3), 0)
745        >>> print(output)
746        [[0. 0. 0.]
747         [0. 0. 0.]
748         [0. 0. 0.]]
749    """
750    value = cast_(value, type)
751    return fillv2_(shape, value)
752
753
754def full(size, fill_value, *, dtype=None):  # pylint: disable=redefined-outer-name
755    """
756    Create a Tensor of the specified shape and fill it with the specified value.
757
758    Args:
759        size (Union(tuple[int], list[int])): The specified shape of output tensor.
760        fill_value (number.Number): Value to fill the returned tensor. Complex numbers are not supported for now.
761
762    Keyword Args:
763        dtype (mindspore.dtype): The specified type of output tensor. `bool_` and `number` are supported, for details,
764            please refer to :class:`mindspore.dtype` . Default: ``None`` .
765
766    Returns:
767        Tensor.
768
769    Raises:
770        TypeError: If `size` is not a tuple or list.
771        ValueError: The element in `size` is less than 0.
772
773    Supported Platforms:
774        ``Ascend`` ``GPU`` ``CPU``
775
776    Examples:
777        >>> from mindspore import ops
778        >>> output = ops.full((2, 2), 1)
779        >>> print(output)
780        [[1. 1.]
781         [1. 1.]]
782        >>> output = ops.full((3, 3), 0)
783        >>> print(output)
784        [[0. 0. 0.]
785         [0. 0. 0.]
786         [0. 0. 0.]]
787    """
788    if not isinstance(size, (list, tuple)):
789        raise TypeError(f"For 'ops.full', 'size' must be a tuple or list of ints, but got {type(size)}.")
790    if dtype is None:
791        dtype = mstype.int64
792    if dtype not in mstype.all_types:
793        raise TypeError(f"For 'ops.full', 'dtype' must be mindspore.type, but got {dtype}.")
794    if isinstance(size, list):
795        size = tuple(size)
796    return ops.fill(dtype, size, fill_value)
797
798
799def full_ext(size, fill_value, *, dtype=None):  # pylint: disable=redefined-outer-name
800    """
801    Create a Tensor of the specified shape and fill it with the specified value.
802
803    Args:
804        size (Union(tuple[int], list[int])): The specified shape of output tensor.
805        fill_value (number.Number): Value to fill the returned tensor. Complex numbers are not supported for now.
806
807    Keyword Args:
808        dtype (mindspore.dtype): The specified type of output tensor. `bool_` and `number` are supported, for details,
809            please refer to :class:`mindspore.dtype` . Default: ``None`` .
810
811    Returns:
812        Tensor.
813
814    Raises:
815        TypeError: If `size` is not a tuple or list.
816        ValueError: The element in `size` is less than 0.
817
818    Supported Platforms:
819        ``Ascend`` ``GPU`` ``CPU``
820
821    Examples:
822        >>> from mindspore import ops
823        >>> output = ops.full((2, 2), 1)
824        >>> print(output)
825        [[1. 1.]
826         [1. 1.]]
827        >>> output = ops.full((3, 3), 0)
828        >>> print(output)
829        [[0. 0. 0.]
830         [0. 0. 0.]
831         [0. 0. 0.]]
832    """
833    if isinstance(fill_value, Tensor):
834        return fill_tensor_(size, fill_value, dtype)
835    return fill_scalar_(size, fill_value, dtype)
836
837
838def full_like(input, fill_value, *, dtype=None):
839    """
840    Return a Tensor of the same shape as `input` and filled with `fill_value`.
841
842    Args:
843        input (Tensor): input Tensor and the output Tensor have the same shape as `input`.
844        fill_value (Number): Value to fill the returned Tensor. Complex numbers are not supported for now.
845
846    Keyword Args:
847        dtype (mindspore.dtype, optional): The specified type of output tensor. `bool_` and `number` are supported,
848            for details, please refer to :class:`mindspore.dtype` . Default: ``None`` .
849
850    Returns:
851        Tensor.
852
853    Raises:
854        TypeError: If `input` is not a Tensor.
855
856    Supported Platforms:
857        ``Ascend`` ``GPU`` ``CPU``
858
859    Examples:
860        >>> import mindspore
861        >>> from mindspore import Tensor, ops
862        >>> input = Tensor([[0, 1], [2, 1]], dtype=mindspore.int32)
863        >>> output = ops.full_like(input, 1)
864        >>> print(output)
865        [[1. 1.]
866         [1. 1.]]
867        >>> input = Tensor([[0, 1, 1], [2, 1, 2], [1, 3, 4]], dtype=mindspore.int32)
868        >>> output = ops.full_like(input, 0)
869        >>> print(output)
870        [[0. 0. 0.]
871         [0. 0. 0.]
872         [0. 0. 0.]]
873    """
874    if not isinstance(input, Tensor):
875        raise TypeError(f"For ops.full_like, the argument 'x' must be tensor, but got {type(input)}")
876    if dtype is None:
877        dtype = input.dtype
878    return full(input.shape, fill_value, dtype=dtype)
879
880
881def chunk(input, chunks, axis=0):
882    """
883    Cut the input Tensor into `chunks` sub-tensors along the specified axis.
884
885    Note:
886        This function may return less than the specified number of chunks!
887
888    Args:
889        input (Tensor): A Tensor to be cut.
890        chunks (int): Number of sub-tensors to cut.
891        axis (int, optional): Specify the dimensions that you want to split. Default: ``0`` .
892
893    Returns:
894        A tuple of sub-tensors.
895
896    Raises:
897        TypeError: If argument `input` is not Tensor.
898        TypeError: The sum of `chunks` is not int.
899        TypeError: If argument `axis` is not int.
900        ValueError: If argument `axis` is out of range of :math:`[-input.ndim, input.ndim)` .
901        ValueError: If argument `chunks` is not positive number.
902
903    Supported Platforms:
904        ``Ascend`` ``GPU`` ``CPU``
905
906    Examples:
907        >>> import numpy as np
908        >>> from mindspore import ops, Tensor
909        >>> input_x = np.arange(9).astype("float32")
910        >>> output = ops.chunk(Tensor(input_x), 3)
911        >>> print(output)
912        (Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00,  1.00000000e+00,  2.00000000e+00]),
913         Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00,  4.00000000e+00,  5.00000000e+00]),
914         Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00,  7.00000000e+00,  8.00000000e+00]))
915    """
916    if not isinstance(input, Tensor):
917        raise TypeError(f'For ops.chunk parameter `input` must be Tensor, but got {type(input)}')
918    _check_axis_type(axis, True, False, False, "ops.chunk")
919    arr_axis = _canonicalize_axis(axis, input.ndim)
920
921    if not isinstance(chunks, int):
922        raise TypeError(f"For ops.chunk type of argument `chunks` should be integer, but got {type(chunks)}")
923    if chunks <= 0:
924        raise ValueError(f"For ops.chunk parameter 'chunks' must be greater than 0, but got {chunks}")
925
926    arr_shape = input.shape
927    length_along_dim = arr_shape[arr_axis]
928
929    if chunks > length_along_dim:
930        res = _get_cache_prim(P.Split)(arr_axis, length_along_dim)(input)
931    elif length_along_dim % chunks == 0:
932        res = _get_cache_prim(P.Split)(arr_axis, chunks)(input)
933    else:
934        block_size = int(np.ceil(length_along_dim / chunks))
935        true_chunks = int(length_along_dim // block_size)
936        length1 = true_chunks * block_size
937        length2 = length_along_dim - length1
938        start1 = _list_comprehensions(rank_(input), 0, True)
939        size1 = _tuple_setitem(arr_shape, arr_axis, length1)
940        start2 = _tuple_setitem(start1, arr_axis, length1)
941        size2 = _tuple_setitem(arr_shape, arr_axis, length2)
942        res = _get_cache_prim(P.Split)(arr_axis, true_chunks)(tensor_slice(input, start1, size1))
943        if length2:
944            res += _get_cache_prim(P.Split)(arr_axis, 1)(tensor_slice(input, start2, size2))
945    return res
946
947
948def chunk_ext(input, chunks, dim=0):
949    """
950    Cut the input Tensor into `chunks` sub-tensors along the specified axis.
951
952    Note:
953        This function may return less than the specified number of chunks!
954
955    Args:
956        input (Tensor): A Tensor to be cut.
957        chunks (int): Number of sub-tensors to cut.
958        dim (int, optional): Specify the dimensions that you want to split. Default: ``0`` .
959
960    Returns:
961        A tuple of sub-tensors.
962
963    Raises:
964        TypeError: If argument `input` is not Tensor.
965        TypeError: The sum of `chunks` is not int.
966        TypeError: If argument `dim` is not int.
967        ValueError: If argument `dim` is out of range of :math:`[-input.ndim, input.ndim)` .
968        ValueError: If argument `chunks` is not positive number.
969
970    Supported Platforms:
971        ``Ascend``
972
973    Examples:
974        >>> import numpy as np
975        >>> import mindspore
976        >>> from mindspore import Tensor
977        >>> input_x = np.arange(9).astype("float32")
978        >>> output = mindspore.mint.chunk(Tensor(input_x), 3)
979        >>> print(output)
980        (Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00,  1.00000000e+00,  2.00000000e+00]),
981         Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00,  4.00000000e+00,  5.00000000e+00]),
982         Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00,  7.00000000e+00,  8.00000000e+00]))
983    """
984    return chunk_(input, chunks, dim)
985
986
987def fills(x, value):
988    """
989    `fills` is deprecated, please use `ops.fill` instead.
990    """
991    if isinstance(value, float):
992        value_ = value
993    elif isinstance(value, int):
994        value_ = float(value)
995    elif isinstance(value, Tensor):
996        if value.ndim != 0:
997            raise ValueError(f"For 'ops.fills', if the argument 'value' is a tensor, the number of its dimension"
998                             f" should be 0, but got {value.ndim}")
999        value_ = value.astype(mstype.float32)
1000    else:
1001        raise TypeError(f"For 'ops.fills', the type of argument 'value' should be int, float or Tensor,"
1002                        f" but got {type(value)}")
1003    return fills_(x, value_)
1004
1005
1006def ones_like(input, *, dtype=None):
1007    """
1008    Returns a Tensor with a value of 1 and its shape is the same as the input.
1009
1010    Args:
1011        input (Tensor): Tensor of any dimension.
1012
1013    Keyword Args:
1014        dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype` is ``None`` ,
1015            the dtype of the input tensor will be used. Default: ``None`` .
1016
1017    Returns:
1018        Tensor, has the same shape as `input` but filled with ones.
1019
1020    Raises:
1021        TypeError: If `input` is not a Tensor.
1022
1023    Supported Platforms:
1024        ``Ascend`` ``GPU`` ``CPU``
1025
1026    Examples:
1027        >>> import numpy as np
1028        >>> from mindspore import Tensor, ops
1029        >>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
1030        >>> output = ops.ones_like(x)
1031        >>> print(output)
1032        [[1 1]
1033         [1 1]]
1034    """
1035    output = ones_like_(input)
1036    _dtype = input.dtype if dtype is None else dtype
1037    output = cast_(output, _dtype)
1038    return output
1039
1040
1041def zeros_like(input, *, dtype=None):
1042    r"""
1043    Creates a tensor filled with 0, with the same size as input, and the given dtype.
1044
1045    If `dtype = None`, the tensor will have the same dtype as input `input`.
1046
1047    Args:
1048        input (Tensor): Tensor of any dimension.
1049
1050    Keyword Args:
1051        dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype` is ``None`` ,
1052            the dtype of the input tensor will be used. Default: ``None`` .
1053
1054    Returns:
1055        Tensor, filled with 0.
1056
1057    Raises:
1058        TypeError: If dtype is not a MindSpore dtype.
1059
1060    Supported Platforms:
1061        ``Ascend`` ``GPU`` ``CPU``
1062
1063    Examples:
1064        >>> import mindspore
1065        >>> import numpy as np
1066        >>> from mindspore import Tensor, ops
1067        >>> x = Tensor(np.arange(4).reshape(2, 2))
1068        >>> output = ops.zeros_like(x, dtype=mindspore.float32)
1069        >>> print(output)
1070        [[0. 0.]
1071         [0. 0.]]
1072    """
1073    _dtype = input.dtype if dtype is None else dtype
1074    output = zeros_like_(input)
1075    output = cast_(output, _dtype)
1076    return output
1077
1078
1079def ones_like_ext(input, *, dtype=None):
1080    """
1081    Creates a tensor filled with 1, with the same shape as input, and its data type is determined by the given dtype.
1082
1083    If `dtype = None`, the tensor will have the same dtype as input `input`.
1084
1085    Args:
1086        input (Tensor): Tensor of any dimension.
1087
1088    Keyword Args:
1089        dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype` is ``None`` ,
1090            the dtype of the input tensor will be used. Default: ``None`` .
1091
1092    Returns:
1093        Tensor, has the same shape as `input` but filled with ones.
1094
1095    Raises:
1096        TypeError: If `input` is not a Tensor.
1097
1098    Supported Platforms:
1099        ``Ascend`` ``GPU`` ``CPU``
1100
1101    Examples:
1102        >>> import numpy as np
1103        >>> from mindspore import Tensor, ops
1104        >>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
1105        >>> output = ops.function.array_func.ones_like_ext(x)
1106        >>> print(output)
1107        [[1 1]
1108         [1 1]]
1109    """
1110    return ones_like_ext_(input, dtype)
1111
1112
1113def zeros_like_ext(input, *, dtype=None):
1114    r"""
1115    Creates a tensor filled with 0, with the same size as input. Its data type is determined by the given dtype.
1116
1117    If `dtype = None`, the tensor will have the same dtype as input `input`.
1118
1119    Args:
1120        input (Tensor): Tensor of any dimension.
1121
1122    Keyword Args:
1123        dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype` is ``None`` ,
1124            the dtype of the input tensor will be used. Default: ``None`` .
1125
1126    Returns:
1127        Tensor, filled with 0.
1128
1129    Raises:
1130        TypeError: If dtype is not a MindSpore dtype.
1131
1132    Supported Platforms:
1133        ``Ascend`` ``GPU`` ``CPU``
1134
1135    Examples:
1136        >>> import mindspore
1137        >>> import numpy as np
1138        >>> from mindspore import Tensor, ops
1139        >>> x = Tensor(np.arange(4).reshape(2, 2))
1140        >>> output = ops.function.array_func.zeros_like_ext(x, dtype=mindspore.float32)
1141        >>> print(output)
1142        [[0. 0.]
1143         [0. 0.]]
1144    """
1145    return zeros_like_ext_(input, dtype)
1146
1147
1148##############################
1149# Tensor Operation Functions.
1150##############################
1151
1152
1153def unique(input):
1154    """
1155    Returns the unique elements of input tensor and also return a tensor containing the index of each value of input
1156    tensor corresponding to the output unique tensor.
1157
1158    The output contains Tensor `y` and Tensor `idx`, the format is probably similar to (`y`, `idx`).
1159    The shape of Tensor `y` and Tensor `idx` is different in most cases, because Tensor `y` will be deduplicated,
1160    and the shape of Tensor `idx` is consistent with the input.
1161
1162    To get the same shape between `idx` and `y`, please ref to :class:`mindspore.ops.UniqueWithPad` operator.
1163
1164    Args:
1165        input (Tensor): The input tensor.
1166            The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
1167
1168    .. warning::
1169        This is an experimental API that is subject to change or deletion.
1170
1171    Returns:
1172        Tuple, containing Tensor objects (`y`, `idx`), `y` is a tensor with the
1173        same type as `input`, and contains the unique elements in `input`.
1174        `idx` is a tensor containing indices of elements in
1175        the input corresponding to the output tensor, have the same shape with `input`.
1176
1177    Raises:
1178        TypeError: If `input` is not a Tensor.
1179
1180    Supported Platforms:
1181        ``Ascend`` ``GPU`` ``CPU``
1182
1183    Examples:
1184        >>> import mindspore
1185        >>> import numpy as np
1186        >>> from mindspore import Tensor, nn
1187        >>> from mindspore import ops
1188        >>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)
1189        >>> output = ops.unique(x)
1190        >>> print(output)
1191        (Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 1]))
1192        >>> y = output[0]
1193        >>> print(y)
1194        [1 2 5]
1195        >>> idx = output[1]
1196        >>> print(idx)
1197        [0 1 2 1]
1198    """
1199    shape_x = input.shape
1200    length_x = get_x_shape(shape_x)
1201    input = reshape_(input, length_x)
1202    y, idx = unique_(input)
1203    idx = reshape_(idx, shape_x)
1204    return y, idx
1205
1206
1207def unique_ext(input, sorted=True, return_inverse=False, return_counts=False, dim=None):
1208    """
1209    Returns the unique elements of input tensor.
1210
1211    when `return_inverse=True`, also return a tensor containing the index of each value of input
1212    tensor corresponding to the output unique tensor.
1213    when `return_counts=True`, also return a tensor containing the number of occurrences for each
1214    unique value or tensor
1215
1216    Args:
1217        input (Tensor): The input tensor.
1218        sorted(bool): Whether to sort the unique elements in ascending order before returning as output.
1219            Default: ``True`` .
1220        return_inverse(bool): Whether to also return the indices for where elements in the original input ended up in
1221            the returned unique list. Default: ``False`` .
1222        return_counts(bool): Whether to also return the counts for each unique element. Default: ``False`` .
1223        dim(int): the dimension to operate upon. If ``None``, the unique of the flattened input is returned.
1224            Otherwise, each of the tensors indexed by the given dimension is treated as one of the elements to apply the
1225            unique operation upon. Default: ``None`` .
1226
1227
1228    Returns:
1229        A tensor or a tuple of tensors containing some of tensor objects (`output`, `inverse_indices`, `counts`).
1230
1231        - output(Tensor) - The output tensor including the unique elements of input tensor, it has same dtype as input.
1232        - inverse_indices(Tensor) - Return when ``return_inverse`` is True. It represents the indices for where
1233          elements in the original input map to in the output. When ``dim`` is ``None``, it has same shape as input,
1234          otherwise, the shape is input.shape[dim].
1235        - counts(Tensor) - Return when ``return_counts`` is True. It represents the number of occurrences for each
1236          unique value or tensor.  When ``dim`` is ``None``, it has same shape as output, otherwise, the shape is
1237          output.shape(dim).
1238
1239
1240    Raises:
1241        TypeError: If `input` is not a Tensor.
1242
1243    Supported Platforms:
1244        ``Ascend``
1245
1246    Examples:
1247        >>> import mindspore
1248        >>> import numpy as np
1249        >>> from mindspore import Tensor, nn
1250        >>> from mindspore import ops
1251        >>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)
1252        >>> output = ops.unique_ext(x, return_inverse=True)
1253        >>> print(output)
1254        (Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int64, value= [0, 1, 2, 1]))
1255        >>> y = output[0]
1256        >>> print(y)
1257        [1 2 5]
1258        >>> idx = output[1]
1259        >>> print(idx)
1260        [0 1 2 1]
1261    """
1262    if not F.isconstant(return_inverse) or not F.isconstant(return_counts):
1263        raise ValueError(f"For 'unique_ext', 'return_inverse' and 'return_counts' cannot be mutable")
1264    if dim is None:
1265        y, inverse, counts = unique2_(input, sorted, return_inverse, return_counts)
1266    else:
1267        validator.check_value_type("return_counts", return_counts, [bool], "unique_ext")
1268        y, inverse, counts = unique_dim_(input, sorted, return_inverse, dim)
1269    if return_inverse and return_counts:
1270        return y, inverse, counts
1271    if return_inverse:
1272        return y, inverse
1273    if return_counts:
1274        return y, counts
1275    return y
1276
1277
1278def unique_with_pad(x, pad_num):
1279    """
1280    Returns unique elements and relative indexes in 1-D tensor, filled with padding num.
1281
1282    The basic function is the same as the Unique operator, but the UniqueWithPad operator adds a Pad function.
1283    The returned tuple(`y`, `idx`) after the input Tensor `x` is processed by the unique operator,
1284    in which the shapes of `y` and `idx` are mostly not equal. Therefore, in order to solve the above situation,
1285    the UniqueWithPad operator will fill the `y` Tensor with the `pad_num` specified by the user
1286    to make it have the same shape as the Tensor `idx`.
1287
1288    Args:
1289        x (Tensor): The tensor need to be unique. Must be 1-D vector with types: int32, int64.
1290        pad_num (int): Pad num. The data type is an int.
1291
1292    Returns:
1293        tuple(Tensor), tuple of 2 tensors, `y` and `idx`.
1294
1295        - y (Tensor) - The unique elements filled with pad_num, the shape and data type same as `x`.
1296        - idx (Tensor) - The index of each value of `x` in the unique output `y`, the shape and data type same as `x`.
1297
1298    Raises:
1299        TypeError: If dtype of `x` is neither int32 nor int64.
1300        ValueError: If length of shape of `x` is not equal to 1.
1301
1302    Supported Platforms:
1303        ``Ascend`` ``GPU`` ``CPU``
1304
1305    Examples:
1306        >>> import mindspore
1307        >>> import numpy as np
1308        >>> from mindspore import Tensor, nn
1309        >>> from mindspore import ops
1310        >>> x = Tensor(np.array([1, 2, 2, 3, 5, 5]), mindspore.int32)
1311        >>> output = ops.unique_with_pad(x, 0)
1312        >>> print(output)
1313        (Tensor(shape=[6], dtype=Int32, value= [1, 2, 3, 5, 0, 0]),
1314         Tensor(shape=[6], dtype=Int32, value= [0, 1, 1, 2, 3, 3]))
1315        >>> y = output[0]
1316        >>> print(y)
1317        [1 2 3 5 0 0]
1318        >>> idx = output[1]
1319        >>> print(idx)
1320        [0 1 1 2 3 3]
1321    """
1322    return unique_with_pad_(x, pad_num)
1323
1324
1325def unique_consecutive(input, return_idx=False, return_counts=False, axis=None):
1326    """
1327    Returns the elements that are unique in each consecutive group of equivalent elements in the input tensor.
1328
1329    Args:
1330        input (Tensor): The input tensor.
1331        return_idx (bool, optional): Whether to return the index of where the element in the original input
1332            maps to the position in the output. Default: ``False`` .
1333        return_counts (bool, optional): Whether to return the counts of each unique element. Default: ``False`` .
1334        axis (int, optional): The dimension to apply unique. If ``None`` , the unique of the flattened input is
1335            returned. If specified, it must be int32 or int64. Default: ``None`` .
1336
1337    Returns:
1338        A tensor or a tuple of tensors containing tensor objects (`output`, `idx`, `counts`). `output` has the
1339        same type as `input` and is used to represent the output list of unique scalar elements. If `return_idx` is
1340        True, there will be an additional returned tensor, `idx`, which has the same shape as `input` and represents
1341        the index of where the element in the original input maps to the position in the output. If `return_counts`
1342        is True, there will be an additional returned tensor, `counts`, which represents the number of occurrences
1343        for each unique value or tensor.
1344
1345    Raises:
1346        TypeError: If `input` is not a Tensor.
1347        TypeError: If dtype of `input` is not supported.
1348        TypeError: If `return_idx` is not a bool.
1349        TypeError: If `return_counts` is not a bool.
1350        TypeError: If `axis` is not an int.
1351        ValueError: If `axis` is not in the range of :math:`[-ndim, ndim-1]`.
1352
1353    Supported Platforms:
1354        ``Ascend`` ``GPU`` ``CPU``
1355
1356    Examples:
1357        >>> import numpy as np
1358        >>> from mindspore import Tensor, ops
1359        >>> from mindspore import dtype as mstype
1360        >>> x = Tensor(np.array([1, 1, 2, 2, 3, 1, 1, 2]), mstype.int32)
1361        >>> output, idx, counts = ops.unique_consecutive(x, True, True, None)
1362        >>> print(output)
1363        [1 2 3 1 2]
1364        >>> print(idx)
1365        [0 0 1 1 2 3 3 4]
1366        >>> print(counts)
1367        [2 2 1 2 1]
1368    """
1369
1370    if not isinstance(input, (Tensor, Tensor_)):
1371        raise TypeError("For 'unique_consecutive', 'input' must be Tensor.")
1372    unique_consecutive_op = _get_cache_prim(UniqueConsecutive)(return_idx, return_counts, axis)
1373    output, idx, counts = unique_consecutive_op(input)
1374    if return_idx and return_counts:
1375        return output, idx, counts
1376    if return_idx:
1377        return output, idx
1378    if return_counts:
1379        return output, counts
1380    return output
1381
1382
1383def searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side=None, sorter=None):
1384    """
1385    Return the position indices such that after inserting the values into the `sorted_sequence`, the order of innermost
1386    dimension of the `sorted_sequence` remains unchanged.
1387
1388    Args:
1389        sorted_sequence (Tensor): The input tensor.
1390            It must contain a monotonically increasing sequence on the innermost dimension.
1391        values (Tensor): The value that should be inserted.
1392
1393    Keyword Args:
1394        out_int32 (bool, optional): Output datatype. If ``True`` , the output datatype will be int32;
1395            if ``False`` , the output datatype will be int64. Default: ``False`` .
1396        right (bool, optional): Search Strategy. If ``True`` , return the last suitable index found;
1397            if ``False`` , return the first such index. Default: ``False`` .
1398        side (str, optional): the same as right but preferred. ``"left"`` corresponds to ``False`` for `right`
1399            and ``"right"`` corresponds to ``True`` for `right`. An error will be reported if this parameter is
1400            set to ``"left"`` while `right` is ``True``. Default: ``None`` .
1401        sorter(Tensor, optional): if provided, a tensor matching the shape of the unsorted sorted_sequence
1402            containing a sequence of indices that sort it in the ascending order on the innermost
1403            dimension and type must be int64. Default: ``None`` .
1404
1405    Returns:
1406        Tensor containing the indices from the innermost dimension of `sorted_sequence` such that,
1407        if insert the corresponding value in the `values` Tensor, the order of `sorted_sequence` would be preserved,
1408        whose datatype is int32 if out_int32 is ``True`` , otherwise int64, and shape is the same as the shape of
1409        `values`.
1410
1411    Raises:
1412        ValueError: If the dimension of `sorted_sequence` isn't 1 and all dimensions except the last dimension of
1413            `sorted_sequence` and `values` are different.
1414        ValueError: If `sorted_sequence` value is a scalar.
1415        ValueError: If `values` is a scalar when `sorted_sequence` dimension is not 1.
1416
1417    Supported Platforms:
1418        ``Ascend`` ``GPU`` ``CPU``
1419
1420    Examples:
1421        >>> import mindspore
1422        >>> import numpy as np
1423        >>> from mindspore import Tensor, ops
1424        >>> sorted_sequence = Tensor(np.array([[0, 1, 3, 5, 7], [2, 4, 6, 8, 10]]), mindspore.float32)
1425        >>> values = Tensor(np.array([[3, 6, 9], [3, 6, 9]]), mindspore.float32)
1426        >>> output = ops.searchsorted(sorted_sequence, values)
1427        >>> print(output)
1428        [[2 4 5]
1429         [1 2 4]]
1430    """
1431
1432    validator.check_value_type("out_int32", out_int32, [bool], "search_sorted")
1433    validator.check_value_type("right", right, [bool], "search_sorted")
1434    dtype = mstype.int32 if bool(out_int32) else mstype.int64
1435    if (side == "left" and right is True):
1436        raise ValueError(f"For 'searchsorted', side and right can't be set to opposites,"
1437                         f"got side of left while right was True.")
1438    if side == "right":
1439        right = True
1440    search_sorted_ = SearchSorted(dtype, right)
1441    return search_sorted_(sorted_sequence, values, sorter)
1442
1443
1444def ger(input, vec2):
1445    r"""
1446    Ger product of `input` and `vec2`. Calculate the outer product of two arrays. If `input` is a 1D Tensor of
1447    shape :math:`(m,)` and `vec2` is a 1D Tensor of shape :math:`(n,)`, then `output` must be a 2D Tensor of shape
1448    :math:`(m, n)`.
1449
1450    Note:
1451        Currently Ascend does not support float64 data input.
1452
1453    Args:
1454        input (Tensor): input Tensor, with dtype of float16, float32 or float64.
1455        vec2 (Tensor): input Tensor, with dtype of float16, float32 or float64, must have the same dtype as `input`.
1456
1457    Returns:
1458        Tensor, output matrix with the same dtype as inputs. With `input` shape :math:`(m,)` and
1459        `vec2` shape of :math:`(n,)`, the `output` has shape :math:`(m, n)`.
1460
1461    Raises:
1462        TypeError: If `input` or `vec2` is not a 1-D Tensor.
1463        TypeError: If the dtype of `input` and `vec2` is not float16, float32 or float64.
1464        TypeError: If the dtype of `input` and `vec2` are not the same.
1465
1466    Supported Platforms:
1467        ``Ascend`` ``GPU`` ``CPU``
1468
1469    Examples:
1470        >>> import mindspore
1471        >>> from mindspore import Tensor, ops
1472        >>> input = Tensor([1., 2., 3., 4.], mindspore.float32)
1473        >>> vec2 = Tensor([1., 2., 3.], mindspore.float32)
1474        >>> output = ops.ger(input, vec2)
1475        >>> print(output)
1476        [[ 1.  2.  3.]
1477         [ 2.  4.  6.]
1478         [ 3.  6.  9.]
1479         [ 4.  8. 12.]]
1480    """
1481    return ger_(input, vec2)
1482
1483
1484def size(input_x):
1485    r"""
1486    Returns a Scalar of type int that represents the size of the input Tensor and the total number of elements in the
1487    Tensor.
1488
1489    Args:
1490        input_x (Tensor): Input parameters, the shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is
1491            `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
1492
1493    Returns:
1494        int. A scalar representing the elements' size of `input_x`, tensor is the number of elements
1495        in a tensor, :math:`size=x_1*x_2*...x_R`. The data type is an int.
1496
1497    Raises:
1498        TypeError: If `input_x` is not a Tensor.
1499
1500    Supported Platforms:
1501        ``Ascend`` ``GPU`` ``CPU``
1502
1503    Examples:
1504        >>> import mindspore
1505        >>> import numpy as np
1506        >>> from mindspore import Tensor, ops
1507        >>> input_x = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
1508        >>> output = ops.size(input_x)
1509        >>> print(output)
1510        4
1511    """
1512    return size_(input_x)
1513
1514
1515def shape(input_x):
1516    """
1517    Returns the shape of the input tensor.
1518
1519    Args:
1520        input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
1521
1522    Returns:
1523        tuple[int], the output tuple is constructed by multiple integers,
1524        :math:`(x_1, x_2, ..., x_R)`.
1525
1526    Raises:
1527        TypeError: If `input_x` is not a Tensor.
1528
1529    Supported Platforms:
1530        ``Ascend`` ``GPU`` ``CPU``
1531
1532    Examples:
1533        >>> import mindspore
1534        >>> import numpy as np
1535        >>> from mindspore import Tensor, ops
1536        >>> input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
1537        >>> output = ops.shape(input_x)
1538        >>> print(output)
1539        (3, 2, 1)
1540    """
1541    return shape_(input_x)
1542
1543
1544def dyn_shape(input_x):
1545    """
1546    Returns the shape of the input tensor.
1547
1548    Args:
1549        input_x (Tensor): The input Tensor.
1550
1551    Returns:
1552        Tensor, the shape of `input_x` .
1553
1554    Raises:
1555        TypeError: If `input_x` is not a Tensor.
1556
1557    Supported Platforms:
1558        ``Ascend`` ``GPU`` ``CPU``
1559
1560    Examples:
1561        >>> import mindspore
1562        >>> import numpy as np
1563        >>> from mindspore import Tensor, ops
1564        >>> input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
1565        >>> output = ops.dyn_shape(input_x)
1566        >>> print(output)
1567        [3 2 1]
1568    """
1569    return tensor_shape_(input_x)
1570
1571
1572def reverse_sequence(x, seq_lengths, seq_dim, batch_dim=0):
1573    r"""
1574    Reverses variable length slices.
1575
1576    Args:
1577        x (Tensor): The input to reverse, supporting all number types including bool.
1578        seq_lengths (Tensor): Specified reversing length, must be a 1-D vector with int32 or int64 types.
1579        seq_dim (int): The dimension where reversal is performed. Required.
1580        batch_dim (int): The input is sliced in this dimension. Default: ``0`` .
1581
1582    Returns:
1583        Tensor, with the same shape and data type as `x`.
1584
1585    Raises:
1586        TypeError: If `seq_dim` or `batch_dim` is not an int.
1587        ValueError: If :math:`len(seq\_lengths) != x.shape[batch\_dim]`.
1588        ValueError: If :math:`batch\_dim == seq\_dim`.
1589        ValueError: If :math:`seq\_dim < 0` or :math:`seq\_dim >= len(x.shape)`.
1590        ValueError: If :math:`batch\_dim < 0` or :math:`batch\_dim >= len(x.shape)`.
1591        RuntimeError: If any value of `seq_lengths` is less than 0.
1592        RuntimeError: If any value of `seq_lengths` is larger than `x.shape[seq_dim]`.
1593
1594    Supported Platforms:
1595        ``Ascend`` ``GPU`` ``CPU``
1596
1597    Examples:
1598        >>> import mindspore
1599        >>> import numpy as np
1600        >>> from mindspore import Tensor, ops
1601        >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
1602        >>> seq_lengths = Tensor(np.array([1, 2, 3]))
1603        >>> output = ops.reverse_sequence(x, seq_lengths, seq_dim=1)
1604        >>> print(output)
1605        [[1. 2. 3.]
1606         [5. 4. 6.]
1607         [9. 8. 7.]]
1608        >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
1609        >>> seq_lengths = Tensor(np.array([1, 2, 3]))
1610        >>> output = ops.reverse_sequence(x, seq_lengths, seq_dim=0, batch_dim=1)
1611        >>> print(output)
1612        [[1. 5. 9.]
1613         [4. 2. 6.]
1614         [7. 8. 3.]]
1615        >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
1616        >>> seq_lengths = Tensor(np.array([2, 2, 3]))
1617        >>> output = ops.reverse_sequence(x, seq_lengths, seq_dim=1)
1618        >>> print(output)
1619        [[2. 1. 3.]
1620         [5. 4. 6.]
1621         [9. 8. 7.]]
1622        >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
1623        >>> seq_lengths = Tensor(np.array([3, 2, 3]))
1624        >>> output = ops.reverse_sequence(x, seq_lengths, seq_dim=1)
1625        >>> print(output)
1626        [[3. 2. 1.]
1627         [5. 4. 6.]
1628         [9. 8. 7.]]
1629        >>> x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.float32)
1630        >>> seq_lengths = Tensor(np.array([4, 4]))
1631        >>> output = ops.reverse_sequence(x, seq_lengths, seq_dim=1)
1632        >>> print(output)
1633        [[4. 3. 2. 1.]
1634         [8. 7. 6. 5.]]
1635    """
1636    return _get_cache_prim(P.ReverseSequence)(seq_dim=seq_dim, batch_dim=batch_dim)(x, seq_lengths)
1637
1638
1639def flatten(input, order='C', *, start_dim=1, end_dim=-1):
1640    r"""
1641    Flatten a tensor along dimensions from `start_dim` to `start_dim`.
1642
1643    Args:
1644        input (Tensor): The input Tensor.
1645        order (str, optional): Only ``'C'`` and ``'F'`` are supported.
1646            ``'C'`` means to flatten in row-major (C-style) order.
1647            ``'F'`` means to flatten in column-major (Fortran-style) order. Default: ``'C'`` .
1648
1649    Keyword Args:
1650        start_dim (int, optional): The first dimension to flatten. Default: ``1`` .
1651        end_dim (int, optional): The last dimension to flatten. Default: ``-1`` .
1652
1653    Returns:
1654        Tensor. If no dimensions are flattened, returns the original `input`, otherwise return the flattened Tensor.
1655        If `input` is a 0-dimensional Tensor, a 1-dimensional Tensor will be returned.
1656
1657    Raises:
1658        TypeError: If `input` is not a Tensor.
1659        TypeError: If `order` is not string type.
1660        ValueError: If `order` is string type, but not ``'C'`` or ``'F'``.
1661        TypeError: If `start_dim` or `end_dim` is not int.
1662        ValueError: If `start_dim` is greater than `end_dim` after canonicalized.
1663        ValueError: If `start_dim` or `end_dim` is not in range of [-input.dim, input.dim-1].
1664
1665    Supported Platforms:
1666        ``Ascend`` ``GPU`` ``CPU``
1667
1668    Examples:
1669        >>> import mindspore
1670        >>> import numpy as np
1671        >>> from mindspore import Tensor, ops
1672        >>> input_x = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32)
1673        >>> output = ops.flatten(input_x)
1674        >>> print(output.shape)
1675        (1, 24)
1676    """
1677
1678    def check_axis_valid(axis, ndim):
1679        if axis < -ndim or axis >= ndim:
1680            raise ValueError("'start_dim' or 'end_dim' out of range.")
1681
1682    def check_dim_valid(start_dim, end_dim):
1683        if start_dim > end_dim:
1684            raise ValueError("For 'flatten', 'start_dim' cannot come after 'end_dim'.")
1685
1686    def canonicalize_axis(axis, x_rank):
1687        ndim = x_rank if x_rank != 0 else 1
1688        check_axis_valid(axis, ndim)
1689        return axis if axis >= 0 else axis + ndim
1690
1691    # Check the types of arguments.
1692    if not isinstance(input, Tensor):
1693        raise TypeError(f"For 'flatten', argument 'input' must be Tensor.")
1694    if not isinstance(start_dim, int) or not isinstance(end_dim, int) or \
1695            isinstance(start_dim, bool) or isinstance(end_dim, bool):
1696        raise TypeError(f"For 'flatten', both 'start_dim' and 'end_dim' must be int.")
1697    check_flatten_order_const(order)
1698    if order == 'F':
1699        x_rank = rank_(input)
1700        # If input is a 0-dimensional Tensor, a 1-dimensional Tensor will be returned.
1701        if x_rank in (0, 1):
1702            return reshape_(input, (-1,))
1703        perm = ops.make_range(0, x_rank)
1704        new_order = ops.tuple_reversed(perm)
1705        input = transpose_(input, new_order)
1706
1707    # Handle the default case.
1708    x_shape = shape_(input)
1709    x_rank = rank_(input)
1710    if start_dim == 1 and end_dim == -1:
1711        if x_rank in (0, 1):
1712            return reshape_(input, (-1,))
1713        return flatten_(input)
1714
1715    # Check axis.
1716    start_dim = canonicalize_axis(start_dim, x_rank)
1717    end_dim = canonicalize_axis(end_dim, x_rank)
1718    check_dim_valid(start_dim, end_dim)
1719    # If input is a 0-dimensional Tensor, a 1-dimensional Tensor will be returned.
1720    if x_rank in (0, 1):
1721        return reshape_(input, (-1,))
1722    # If no dimensions to flatten, return the original object.
1723    if start_dim == end_dim:
1724        return input
1725    # Flatten elements along specified dimensions.
1726    dim_length = 1
1727    idx = start_dim
1728    while idx <= end_dim:
1729        dim_length *= x_shape[idx]
1730        idx += 1
1731    new_shape = x_shape[:start_dim] + (dim_length,) + x_shape[end_dim + 1:]
1732    return reshape_(input, new_shape)
1733
1734
1735def slice(input_x, begin, size):
1736    r"""
1737    Slices a tensor in the specified shape.
1738
1739    Slice the tensor `input_x` in shape of `size` and starting at the location specified by `begin`.
1740    The slice `begin` represents the offset in each dimension of `input_x`.
1741    The slice `size` represents the size of the output tensor.
1742
1743    Note:
1744        `begin` is zero-based and `size` is one-based.
1745
1746    If `size[i]` is -1, all remaining elements in dimension i are included in the slice.
1747    This is equivalent to setting :math:`size[i] = input\_x.shape(i) - begin[i]`.
1748
1749    Args:
1750        input_x (Tensor): The target tensor.
1751        begin (Union[tuple, list]): The beginning of the slice. Only constant value(>=0) is allowed.
1752        size (Union[tuple, list]): The size of the slice. Only constant value is allowed.
1753
1754    Returns:
1755        Tensor, the shape is input `size`, the data type is the same as `input_x`.
1756
1757    Raises:
1758        TypeError: If `begin` or `size` is neither tuple nor list.
1759
1760    Supported Platforms:
1761        ``Ascend`` ``GPU`` ``CPU``
1762
1763    Examples:
1764        >>> from mindspore import Tensor
1765        >>> from mindspore import ops
1766        >>> import numpy as np
1767        >>> data = Tensor(np.array([[[1, 1, 1], [2, 2, 2]],
1768        ...                         [[3, 3, 3], [4, 4, 4]],
1769        ...                         [[5, 5, 5], [6, 6, 6]]]).astype(np.int32))
1770        >>> output = ops.slice(data, (1, 0, 0), (1, 1, 3))
1771        >>> print(output)
1772        [[[3 3 3]]]
1773        >>> output = ops.slice(data, (1, 0, 0), (1, 1, 2))
1774        >>> print(output)
1775        [[[3 3]]]
1776        >>> output = ops.slice(data, (1, 0, 0), (1, 1, 1))
1777        >>> print(output)
1778        [[[3]]]
1779        >>> output = ops.slice(data, (1, 1, 0), (1, 1, 3))
1780        >>> print(output)
1781        [[[4 4 4]]]
1782        >>> output = ops.slice(data, (1, 0, 1), (1, 1, 2))
1783        >>> print(output)
1784        [[[3 3]]]
1785    """
1786    return tensor_slice(input_x, begin, size)
1787
1788
1789def stack(tensors, axis=0):
1790    r"""
1791    Stacks a list of tensors in specified axis.
1792
1793    Stacks the list of input tensors with the same rank `R`, output is a tensor of rank `(R+1)`.
1794
1795    Given input tensors of shape :math:`(x_1, x_2, ..., x_R)`. Set the number of input tensors as `N`.
1796    If :math:`axis \ge 0`, the shape of the output tensor is
1797    :math:`(x_1, x_2, ..., x_{axis}, N, x_{axis+1}, ..., x_R)`.
1798
1799    Args:
1800        tensors (Union[tuple, list]): A Tuple or list of Tensor objects with the same shape and type.
1801        axis (int): Dimension to stack. The range is [-(R+1), R+1). Default: ``0`` .
1802
1803    Returns:
1804        Tensor. A stacked Tensor with the same type as `tensors`.
1805
1806    Raises:
1807        TypeError: If the data types of elements in `tensors` are not the same.
1808        ValueError: If the length of `tensors` is not greater than zero;
1809                    or if axis is out of the range [-(R+1), R+1);
1810                    or if the shapes of elements in tensors are not the same.
1811
1812    Supported Platforms:
1813        ``Ascend`` ``GPU`` ``CPU``
1814
1815    Examples:
1816        >>> import numpy as np
1817        >>> from mindspore import Tensor, ops
1818        >>> input_x1 = Tensor(np.array([0, 1]).astype(np.float32))
1819        >>> input_x2 = Tensor(np.array([2, 3]).astype(np.float32))
1820        >>> output = ops.stack((input_x1, input_x2), 0)
1821        >>> print(output)
1822        [[0. 1.]
1823         [2. 3.]]
1824    """
1825    _stack = _get_cache_prim(P.Stack)(axis)
1826    return _stack(tensors)
1827
1828
1829def unstack(input_x, axis=0):
1830    r"""
1831    Unstacks tensor in specified axis, this is the opposite of :func:`mindspore.ops.stack`.
1832    Assuming input is a tensor of rank `R`, output tensors will have rank `(R-1)`.
1833
1834    Args:
1835        input_x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`.
1836            A tensor to be unstacked and the rank of the tensor must be greater than 0.
1837        axis (int): Dimension along which to unpack. Default: ``0`` .
1838            Negative values wrap around. The range is [-R, R).
1839
1840    Returns:
1841        A tuple of tensors, the shape of each objects is the same.
1842        Given a tensor of shape :math:`(x_1, x_2, ..., x_R)`. If :math:`0 \le axis`,
1843        the shape of tensor in output is :math:`(x_1, x_2, ..., x_{axis}, x_{axis+2}, ..., x_R)`.
1844
1845    Raises:
1846        ValueError: If axis is out of the range [-len(input_x.shape), len(input_x.shape)).
1847
1848    Supported Platforms:
1849        ``Ascend`` ``GPU`` ``CPU``
1850
1851    Examples:
1852        >>> import numpy as np
1853        >>> from mindspore import Tensor, ops
1854        >>> input_x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]))
1855        >>> output = ops.unstack(input_x, 0)
1856        >>> print(output)
1857        (Tensor(shape=[4], dtype=Int64, value= [1, 1, 1, 1]), Tensor(shape=[4], dtype=Int64, value= [2, 2, 2, 2]))
1858    """
1859    _unstack = _get_cache_prim(P.Unstack)(axis)
1860    return _unstack(input_x)
1861
1862
1863def unbind(input, dim=0):
1864    r"""
1865    Removes a tensor dimension in specified axis.
1866
1867    Unstacks a tensor of rank `R` along axis dimension, and output tensors will have rank `(R-1)`.
1868
1869    Given a tensor of shape :math:`(n_1, n_2, ..., n_R)` and a specified `dim`,
1870    shape of the output tensors is :math:`(n_1, n_2, ..., n_{dim}, n_{dim+2}, ..., n_R)`.
1871
1872    Args:
1873        input (Tensor): The shape is :math:`(n_1, n_2, ..., n_R)`.
1874            A tensor to be unstacked and the rank of the tensor must be greater than 0.
1875        dim (int): Dimension along which to unpack. Negative values wrap around. The range is [-R, R). Default: ``0`` .
1876
1877    Returns:
1878        A tuple of tensors, the shape of each objects is the same.
1879
1880    Raises:
1881        ValueError: If axis is out of the range [-R, R).
1882
1883    Supported Platforms:
1884        ``Ascend`` ``GPU`` ``CPU``
1885
1886    Examples:
1887        >>> import numpy as np
1888        >>> from mindspore import Tensor, ops
1889        >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
1890        >>> output = ops.unbind(x, dim=0)
1891        >>> print(output)
1892        (Tensor(shape=[3], dtype=Int64, value=[1, 2, 3]), Tensor(shape=[3], dtype=Int64, value=[4, 5, 6]),
1893        Tensor(shape=[3], dtype=Int64, value=[7, 8, 9]))
1894    """
1895    _unstack = _get_cache_prim(P.Unstack)(dim)
1896    return _unstack(input)
1897
1898
1899def unsqueeze(input, dim):
1900    """
1901    Adds an additional dimension to `input` at the given dim.
1902
1903    Args:
1904        input (Tensor): The shape of tensor is :math:`(n_1, n_2, ..., n_R)`.
1905        dim (int): Specifies the dimension index at which to expand
1906            the shape of `input`. The value of `dim` must be in the range
1907            `[-input.ndim-1, input.ndim]`. Only constant value is allowed.
1908
1909    Returns:
1910        Tensor, the shape of tensor is :math:`(1, n_1, n_2, ..., n_R)` if the
1911        value of `dim` is 0. It has the same data type as `input`.
1912
1913    Raises:
1914        TypeError: If `dim` is not an int.
1915        ValueError: If `dim` is not in the valid range :math:`[-input.ndim-1, input.ndim]`.
1916
1917    Supported Platforms:
1918        ``Ascend`` ``GPU`` ``CPU``
1919
1920    Examples:
1921        >>> import mindspore
1922        >>> import numpy as np
1923        >>> from mindspore import Tensor, ops
1924        >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
1925        >>> output = ops.unsqueeze(input_tensor, dim=0)
1926        >>> print(output)
1927        [[[2. 2.]
1928          [2. 2.]]]
1929    """
1930    return expand_dims(input, dim)
1931
1932
1933def squeeze(input, axis=None):
1934    """
1935    Return the Tensor after deleting the dimension of size 1 in the specified `axis`.
1936
1937    If :math:`axis=None`, it will remove all the dimensions of size 1.
1938    If `axis` is specified, it will remove the dimensions of size 1 in the given `axis`.
1939    For example, if the dimension is not specified :math:`axis=None`, input shape is (A, 1, B, C, 1, D),
1940    then the shape of the output Tensor is (A, B, C, D). If the dimension is specified, the squeeze operation
1941    is only performed in the specified dimension. If input shape is (A, 1, B), input Tensor will be changed
1942    to (A, B) when :math:`axis=1`, but when :math:`axis=0` or :math:`axis=2`, an error will occur.
1943
1944    Note:
1945        - Squeezing a dimension that is not 1 will raise an error.
1946        - Please note that in dynamic graph mode, the output Tensor will share data with the input Tensor,
1947          and there is no Tensor data copy process.
1948        - The dimension index starts at 0 and must be in the range `[-input.ndim, input.ndim]`.
1949
1950    Args:
1951        input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
1952        axis (Union[int, tuple(int), list(int)]): Specifies the dimension indexes of shape to be removed, which will
1953            remove all the dimensions of size 1 in the given axis parameter. If specified, it must be int32 or int64.
1954            Default: ``None`` , an empty tuple will be used.
1955
1956    Returns:
1957        Tensor, the shape of tensor is :math:`(x_1, x_2, ..., x_S)`.
1958
1959    Raises:
1960        TypeError: If `input` is not a tensor.
1961        TypeError: If `axis` is not an int, tuple or list.
1962        TypeError: If `axis` is a tuple or list whose elements are not all int.
1963        ValueError: If the corresponding dimension of the specified axis isn't equal to 1.
1964
1965    Supported Platforms:
1966        ``Ascend`` ``GPU`` ``CPU``
1967
1968    Examples:
1969        >>> import mindspore
1970        >>> import numpy as np
1971        >>> from mindspore import Tensor, ops
1972        >>> input = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
1973        >>> output = ops.squeeze(input)
1974        >>> print(output)
1975        [[1. 1.]
1976         [1. 1.]
1977         [1. 1.]]
1978    """
1979    if axis is None:
1980        axis = ()
1981    if isinstance(axis, list):
1982        axis = tuple(axis)
1983    squeeze_ = _get_cache_prim(P.Squeeze)(axis)
1984    return squeeze_(input)
1985
1986
1987def scatter_mul(input_x, indices, updates):
1988    r"""
1989    Using given values to update tensor value through the mul operation, along with the input indices.
1990    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
1991
1992    for each `i, ..., j` in `indices.shape`:
1993
1994    .. math::
1995
1996        \text{input_x}[\text{indices}[i, ..., j], :] \mathrel{*}= \text{updates}[i, ..., j, :]
1997
1998    Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
1999    If they have different data types, the lower priority data type will be converted to
2000    the relatively highest priority data type. A RuntimeError will be reported
2001    when the data types of parameters need to be converted.
2002
2003    Args:
2004        input_x (Parameter): The target tensor to be updated, with data type of Parameter.
2005            The shape is :math:`(N,*)` where :math:`*` means any number of additional dimensions.
2006        indices (Tensor): The index to do mul operation whose data type must be int32 or int64.
2007        updates (Tensor): The tensor doing the mul operation with `input_x`,
2008            the data type is same as `input_x`, the shape is `indices.shape + input_x.shape[1:]`.
2009
2010    Returns:
2011        Tensor, the updated `input_x`, has the same shape and type as `input_x`.
2012
2013    Raises:
2014        TypeError: If `indices` is not an int32 or int64.
2015        ValueError: If the shape of `updates` is not equal to `indices.shape + input_x.shape[1:]`.
2016        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
2017                      is required when data type conversion of Parameter is not supported.
2018
2019    Supported Platforms:
2020        ``Ascend`` ``GPU`` ``CPU``
2021
2022    Examples:
2023        >>> import mindspore
2024        >>> import numpy as np
2025        >>> from mindspore import Tensor, ops, Parameter
2026        >>> input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mindspore.float32), name="x")
2027        >>> indices = Tensor(np.array([0, 1]), mindspore.int32)
2028        >>> updates = Tensor(np.array([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]), mindspore.float32)
2029        >>> output = ops.scatter_mul(input_x, indices, updates)
2030        >>> print(output)
2031        [[2. 2. 2.]
2032         [4. 4. 4.]]
2033        >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.
2034        >>> input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mindspore.float32), name="x")
2035        >>> # for indices = [[0, 1], [1, 1]]
2036        >>> # step 1: [0, 1]
2037        >>> # input_x[0] = [1.0, 1.0, 1.0] * [1.0, 1.0, 1.0] = [1.0, 1.0, 1.0]
2038        >>> # input_x[1] = [2.0, 2.0, 2.0] * [3.0, 3.0, 3.0] = [6.0, 6.0, 6.0]
2039        >>> # step 2: [1, 1]
2040        >>> # input_x[1] = [6.0, 6.0, 6.0] * [7.0, 7.0, 7.0] = [42.0, 42.0, 42.0]
2041        >>> # input_x[1] = [42.0, 42.0, 42.0] * [9.0, 9.0, 9.0] = [378.0, 378.0, 378.0]
2042        >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)
2043        >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],
2044        ...                            [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)
2045        >>> output = ops.scatter_mul(input_x, indices, updates)
2046        >>> print(output)
2047        [[  1.   1.   1.]
2048         [378. 378. 378.]]
2049        >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.
2050        >>> input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mindspore.float32), name="x")
2051        >>> # for indices = [[1, 0], [1, 1]]
2052        >>> # step 1: [1, 0]
2053        >>> # input_x[0] = [1.0, 1.0, 1.0] * [3.0, 3.0, 3.0] = [3.0, 3.0, 3.0]
2054        >>> # input_x[1] = [2.0, 2.0, 2.0] * [1.0, 1.0, 1.0] = [2.0, 2.0, 2.0]
2055        >>> # step 2: [1, 1]
2056        >>> # input_x[1] = [2.0, 2.0, 2.0] * [7.0, 7.0, 7.0] = [14.0, 14.0, 14.0]
2057        >>> # input_x[1] = [14.0, 14.0, 14.0] * [9.0, 9.0, 9.0] = [126.0, 126.0, 126.0]
2058        >>> indices = Tensor(np.array([[1, 0], [1, 1]]), mindspore.int32)
2059        >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],
2060        ...                            [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)
2061        >>> output = ops.scatter_mul(input_x, indices, updates)
2062        >>> print(output)
2063        [[  3.   3.   3.]
2064         [126. 126. 126.]]
2065        >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.
2066        >>> input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mindspore.float32), name="x")
2067        >>> # for indices = [[0, 1], [0, 1]]
2068        >>> # step 1: [0, 1]
2069        >>> # input_x[0] = [1.0, 1.0, 1.0] * [1.0, 1.0, 1.0] = [1.0, 1.0, 1.0]
2070        >>> # input_x[1] = [2.0, 2.0, 2.0] * [3.0, 3.0, 3.0] = [6.0, 6.0, 6.0]
2071        >>> # step 2: [0, 1]
2072        >>> # input_x[0] = [1.0, 1.0, 1.0] * [7.0, 7.0, 7.0] = [7.0, 7.0, 7.0]
2073        >>> # input_x[1] = [6.0, 6.0, 6.0] * [9.0, 9.0, 9.0] = [54.0, 54.0, 54.0]
2074        >>> indices = Tensor(np.array([[0, 1], [0, 1]]), mindspore.int32)
2075        >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],
2076        ...                            [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)
2077        >>> output = ops.scatter_mul(input_x, indices, updates)
2078        >>> print(output)
2079        [[ 7.  7.  7.]
2080         [54. 54. 54.]]
2081    """
2082    return scatter_mul_(input_x, indices, updates)
2083
2084
2085def scatter_max(input_x, indices, updates):
2086    r"""
2087    Using given values to update tensor value through the max operation, along with the input indices.
2088    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
2089
2090    for each :math:`i, ..., j` in `indices.shape`:
2091
2092    .. math::
2093
2094        \text{input_x}[\text{indices}[i, ..., j], :]
2095        = \max(\text{input_x}[\text{indices}[i, ..., j], :], \text{updates}[i, ..., j, :])
2096
2097    Inputs of `input_x` and `updates` follow the implicit type conversion rules to keep the data types consistent.
2098    If they have different data types, the lower priority data type will be converted to the relatively highest
2099    priority data type. A RuntimeError will be reported when `updates` does not support conversion to the data type
2100    required by `input_x`.
2101
2102    Args:
2103        input_x (Parameter): The target tensor, with data type of Parameter.
2104            The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
2105        indices (Tensor): The index to do max operation whose data type must be mindspore.int32.
2106        updates (Tensor): The tensor doing the max operation with `input_x`,
2107            the data type is same as `input_x`, the shape is `indices.shape + x.shape[1:]`.
2108
2109    Returns:
2110        Tensor, the updated `input_x`, the type and shape same as `input_x`.
2111
2112    Raises:
2113        TypeError: If `indices` is not an int32 or int64.
2114        ValueError: If the shape of `updates` is not equal to `indices.shape + input_x.shape[1:]`.
2115        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
2116                      is required when data type conversion of Parameter is not supported.
2117        RuntimeError: On the Ascend platform, the input data dimension of `input_x` , `indices`
2118                      and `updates` is greater than 8 dimensions.
2119
2120    Supported Platforms:
2121        ``Ascend`` ``GPU`` ``CPU``
2122
2123    Examples:
2124        >>> import mindspore
2125        >>> import numpy as np
2126        >>> from mindspore import Tensor, ops, Parameter
2127        >>> input_x = Parameter(Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32), name="input_x")
2128        >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
2129        >>> updates = Tensor(np.ones([2, 2, 3]) * 88, mindspore.float32)
2130        >>> output = ops.scatter_max(input_x, indices, updates)
2131        >>> print(output)
2132        [[88. 88. 88.]
2133         [88. 88. 88.]]
2134    """
2135    return scatter_max_(input_x, indices, updates)
2136
2137
2138def scatter_add(input_x, indices, updates):
2139    r"""
2140    Using given values to update tensor value through the add operation, along with the input indices.
2141    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
2142
2143    Args:
2144        input_x (Parameter): The target tensor, with data type of Parameter.
2145        indices (Tensor): The index to do add operation whose data type must be int32 or int64.
2146        updates (Tensor): The tensor doing the add operation with `input_x`,
2147            the data type is same as `input_x`, the shape is `indices.shape + x.shape[1:]`.
2148
2149    Returns:
2150        Tensor, the updated `input_x`, has the same shape and type as `input_x`.
2151
2152    Raises:
2153        TypeError: If `indices` is not an int32 or int64.
2154        ValueError: If the shape of `updates` is not equal to `indices.shape + input_x.shape[1:]`.
2155        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
2156            is required when data type conversion of Parameter is not supported.
2157
2158    Supported Platforms:
2159        ``Ascend`` ``GPU`` ``CPU``
2160
2161    Examples:
2162        >>> import numpy as np
2163        >>> import mindspore
2164        >>> from mindspore import Tensor, Parameter
2165        >>> from mindspore import ops
2166        >>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name="x")
2167        >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)
2168        >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],
2169        ...                            [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)
2170        >>> output = ops.scatter_add(input_x, indices, updates)
2171        >>> print(output)
2172        [[ 1.  1.  1.]
2173         [19. 19. 19.]]
2174    """
2175    return scatter_add_(input_x, indices, updates)
2176
2177
2178def scatter_min(input_x, indices, updates):
2179    r"""
2180    Using given values to update tensor value through the min operation, along with the input indices.
2181    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
2182
2183    for each :math:`i, ..., j` in `indices.shape`:
2184
2185    .. math::
2186
2187        \text{input_x}[\text{indices}[i, ..., j], :]
2188        = \min(\text{input_x}[\text{indices}[i, ..., j], :], \text{updates}[i, ..., j, :])
2189
2190    Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
2191    If they have different data types, the lower priority data type will be converted to
2192    the relatively highest priority data type. A RuntimeError will be reported
2193    when `updates` does not support conversion to the data type required by `input_x`.
2194
2195    Args:
2196        input_x (Parameter): The target tensor, with data type of Parameter.
2197        indices (Tensor): The index to do min operation whose data type must be mindspore.int32 or mindspore.int64.
2198        updates (Tensor): The tensor doing the min operation with `input_x`,
2199            the data type is same as `input_x`, the shape is `indices.shape + input_x.shape[1:]`.
2200
2201    Returns:
2202        Tensor, the updated `input_x`, has the same shape and type as `input_x`.
2203
2204    Raises:
2205        TypeError: If `indices` is not an int32 or an int64.
2206        ValueError: If the shape of `updates` is not equal to `indices.shape + input_x.shape[1:]`.
2207        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
2208                      is required when data type conversion of Parameter is not supported.
2209        RuntimeError: On the Ascend platform, the input data dimension of `input_x` , `indices`
2210                      and `updates` is greater than 8 dimensions.
2211
2212    Supported Platforms:
2213        ``Ascend`` ``GPU`` ``CPU``
2214
2215    Examples:
2216        >>> import numpy as np
2217        >>> import mindspore
2218        >>> from mindspore import Tensor, Parameter
2219        >>> from mindspore import ops
2220        >>> input_x = Parameter(Tensor(np.zeros((2, 3)), mindspore.float32), name="input_x")
2221        >>> indices = Tensor(np.array([1, 0]), mindspore.int32)
2222        >>> update = Tensor(np.arange(6).reshape((2, 3)), mindspore.float32)
2223        >>> output = ops.scatter_min(input_x, indices, update)
2224        >>> print(output)
2225        [[0. 0. 0.]
2226         [0. 0. 0.]]
2227    """
2228    return scatter_min_(input_x, indices, updates)
2229
2230
2231def scatter_div(input_x, indices, updates):
2232    r"""
2233    Using given values to update tensor value through the div operation, along with the input indices.
2234    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
2235
2236    for each :math:`i, ..., j` in `indices.shape`:
2237
2238    .. math::
2239
2240        \text{input_x}[\text{indices}[i, ..., j], :] \mathrel{/}= \text{updates}[i, ..., j, :]
2241
2242    Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
2243    If they have different data types, the lower priority data type will be converted to
2244    the relatively highest priority data type. A RuntimeError will be reported
2245    when `updates` does not support conversion to the data type required by `input_x`.
2246
2247    Args:
2248        input_x (Parameter): The target tensor, with data type of Parameter.
2249        indices (Tensor): The index to do divide operation whose data type must be mindspore.int32 or
2250          mindspore.int64.
2251        updates (Tensor): The tensor doing the divide operation with `input_x`, the data type is same as `input_x`,
2252          the shape is `indices.shape + input_x.shape[1:]`.
2253
2254    Returns:
2255        Tensor, the updated `input_x`, has the same shape and type as `input_x`.
2256
2257    Raises:
2258        TypeError: If the type of `indices` is not one of the following dtype: int32, int64.
2259        ValueError: If the shape of `updates` is not equal to `indices.shape + input_x.shape[1:]`.
2260        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter is required
2261                      when data type conversion of Parameter is not supported.
2262        RuntimeError: On the Ascend platform, the input data dimension of `input_x` , `indices`
2263                      and `updates` is greater than 8 dimensions.
2264
2265    Supported Platforms:
2266        ``Ascend`` ``GPU`` ``CPU``
2267
2268    Examples:
2269        >>> import mindspore
2270        >>> import numpy as np
2271        >>> from mindspore import Tensor, ops, Parameter
2272        >>> input_x = Parameter(Tensor(np.array([[6.0, 6.0, 6.0], [2.0, 2.0, 2.0]]), mindspore.float32), name="x")
2273        >>> indices = Tensor(np.array([0, 1]), mindspore.int32)
2274        >>> updates = Tensor(np.array([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]), mindspore.float32)
2275        >>> output = ops.scatter_div(input_x, indices, updates)
2276        >>> print(output)
2277        [[3. 3. 3.]
2278         [1. 1. 1.]]
2279        >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.
2280        >>> input_x = Parameter(Tensor(np.array([[105.0, 105.0, 105.0],
2281        ...                                      [315.0, 315.0, 315.0]]), mindspore.float32), name="x")
2282        >>> # for indices = [[0, 1], [1, 1]]
2283        >>> # step 1: [0, 1]
2284        >>> # input_x[0] = [105.0, 105.0, 105.0] / [1.0, 1.0, 1.0] = [105.0, 105.0, 105.0]
2285        >>> # input_x[1] = [315.0, 315.0, 315.0] / [3.0, 3.0, 3.0] = [105.0, 105.0, 105.0]
2286        >>> # step 2: [1, 1]
2287        >>> # input_x[1] = [105.0, 105.0, 105.0] / [5.0, 5.0, 5.0] = [21.0, 21.0, 21.0]
2288        >>> # input_x[1] = [21.0, 21.0, 21.0] / [7.0, 7.0, 7.0] = [3.0, 3.0, 3.0]
2289        >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)
2290        >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],
2291        ...                            [[5.0, 5.0, 5.0], [7.0, 7.0, 7.0]]]), mindspore.float32)
2292        >>> output = ops.scatter_div(input_x, indices, updates)
2293        >>> print(output)
2294        [[105. 105. 105.]
2295         [  3.   3.   3.]]
2296        >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.
2297        >>> input_x = Parameter(Tensor(np.array([[105.0, 105.0, 105.0],
2298        ...                                      [315.0, 315.0, 315.0]]), mindspore.float32), name="x")
2299        >>> # for indices = [[1, 0], [1, 1]]
2300        >>> # step 1: [1, 0]
2301        >>> # input_x[0] = [105.0, 105.0, 105.0] / [3.0, 3.0, 3.0] = [35.0, 35.0, 35.0]
2302        >>> # input_x[1] = [315.0, 315.0, 315.0] / [1.0, 1.0, 1.0] = [315.0, 315.0, 315.0]
2303        >>> # step 2: [1, 1]
2304        >>> # input_x[1] = [315.0, 315.0, 315.0] / [5.0, 5.0, 5.0] = [63.0 63.0 63.0]
2305        >>> # input_x[1] = [63.0 63.0 63.0] / [7.0, 7.0, 7.0] = [9.0, 9.0, 9.0]
2306        >>> indices = Tensor(np.array([[1, 0], [1, 1]]), mindspore.int32)
2307        >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],
2308        ...                            [[5.0, 5.0, 5.0], [7.0, 7.0, 7.0]]]), mindspore.float32)
2309        >>> output = ops.scatter_div(input_x, indices, updates)
2310        >>> print(output)
2311        [[35. 35. 35.]
2312         [ 9.  9.  9.]]
2313    """
2314    return scatter_div_(input_x, indices, updates)
2315
2316
2317def scatter_update(input_x, indices, updates):
2318    r"""
2319    Updates tensor values by using input indices and value.
2320
2321    Using given values to update tensor value, along with the input indices.
2322
2323    for each `i, ..., j` in `indices.shape`:
2324
2325    .. math::
2326
2327        \text{input_x}[\text{indices}[i, ..., j], :] = \text{updates}[i, ..., j, :]
2328
2329    Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
2330    If they have different data types, the lower priority data type will be converted to
2331    the relatively highest priority data type.
2332
2333    Args:
2334        input_x (Parameter): The target tensor, with data type of Parameter.
2335        indices (Tensor): The index of input tensor. With int32 or int64 data type.
2336            If there are duplicates in indices, the order for updating is undefined.
2337        updates (Tensor): The tensor to update the input tensor, has the same type as input,
2338            and updates.shape = indices.shape + input_x.shape[1:].
2339
2340    Returns:
2341        Tensor, has the same shape and type as `input_x`.
2342
2343    Raises:
2344        TypeError: If `indices` is not an int32 or an int64.
2345        ValueError: If the shape of `updates` is not equal to `indices.shape + input_x.shape[1:]`.
2346        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
2347                      is required when data type conversion of Parameter is not supported.
2348
2349    Supported Platforms:
2350        ``Ascend`` ``GPU`` ``CPU``
2351
2352    Examples:
2353        >>> import mindspore
2354        >>> import numpy as np
2355        >>> from mindspore import Tensor, ops
2356        >>> np_x = np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]])
2357        >>> input_x = mindspore.Parameter(Tensor(np_x, mindspore.float32), name="x")
2358        >>> indices = Tensor(np.array([0, 1]), mindspore.int32)
2359        >>> np_updates = np.array([[2.0, 1.2, 1.0], [3.0, 1.2, 1.0]])
2360        >>> updates = Tensor(np_updates, mindspore.float32)
2361        >>> output = ops.scatter_update(input_x, indices, updates)
2362        >>> print(output)
2363        [[2. 1.2  1.]
2364         [3. 1.2  1.]]
2365    """
2366    return scatter_update_(input_x, indices, updates)
2367
2368
2369def scatter_nd_add(input_x, indices, updates, use_locking=False):
2370    r"""
2371    Applies sparse addition to individual values or slices in a tensor.
2372
2373    Using given values to update tensor value through the add operation, along with the input indices.
2374    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
2375
2376    `input_x` has rank P and `indices` has rank Q where `Q >= 2`.
2377
2378    `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)` where `N <= P`.
2379
2380    The last dimension of `indices` (with length `N` ) indicates slices along the `N` th dimension of `input_x`.
2381
2382    `updates` is a tensor of rank `Q-1+P-N`. Its shape is:
2383    :math:`(i_0, i_1, ..., i_{Q-2}, x\_shape_N, ..., x\_shape_{P-1})`.
2384
2385    Args:
2386        input_x (Parameter): The target tensor, with data type of Parameter.
2387        indices (Tensor): The index to do min operation whose data type must be mindspore.int32 or mindspore.int64.
2388            The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
2389        updates (Tensor): The tensor doing the addition operation with `input_x`,
2390            the data type is same as `input_x`, the shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
2391        use_locking (bool): Whether to protect the assignment by a lock. Default: ``False`` .
2392
2393    Returns:
2394        Tensor, the updated `input_x`, has the same shape and type as `input_x`.
2395
2396    Raises:
2397        TypeError: If the dtype of `use_locking` is not bool.
2398        TypeError: If the dtype of `indices` is not int32 or int64.
2399        TypeError: If dtype of `input_x` and `updates` are not the same.
2400        ValueError: If the shape of `updates` is not equal to `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
2401        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
2402                      is required when data type conversion of Parameter is not supported.
2403
2404    Supported Platforms:
2405        ``Ascend`` ``GPU`` ``CPU``
2406
2407    Examples:
2408        >>> import mindspore
2409        >>> import numpy as np
2410        >>> from mindspore import Tensor, ops, Parameter
2411        >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
2412        >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
2413        >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
2414        >>> output = ops.scatter_nd_add(input_x, indices, updates, False)
2415        >>> print(output)
2416        [ 1. 10.  9.  4. 12.  6.  7. 17.]
2417        >>> input_x = Parameter(Tensor(np.zeros((4, 4, 4)), mindspore.int32))
2418        >>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
2419        >>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
2420        ...                            [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]), mindspore.int32)
2421        >>> output = ops.scatter_nd_add(input_x, indices, updates, False)
2422        >>> print(output)
2423        [[[1 1 1 1]
2424          [2 2 2 2]
2425          [3 3 3 3]
2426          [4 4 4 4]]
2427         [[0 0 0 0]
2428          [0 0 0 0]
2429          [0 0 0 0]
2430          [0 0 0 0]]
2431         [[5 5 5 5]
2432          [6 6 6 6]
2433          [7 7 7 7]
2434          [8 8 8 8]]
2435         [[0 0 0 0]
2436          [0 0 0 0]
2437          [0 0 0 0]
2438          [0 0 0 0]]]
2439    """
2440    scatter_nd_add_inner = _get_cache_prim(P.ScatterNdAdd)(use_locking)
2441    return scatter_nd_add_inner(input_x, indices, updates)
2442
2443
2444def scatter_nd_sub(input_x, indices, updates, use_locking=False):
2445    r"""
2446    Applies sparse subtraction to individual values or slices in a tensor.
2447
2448    Using given values to update tensor value through the subtraction operation, along with the input indices.
2449    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
2450
2451    `input_x` has rank P and `indices` has rank Q where `Q >= 2`.
2452
2453    `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)` where `N <= P`.
2454
2455    The last dimension of `indices` (with length `N` ) indicates slices along the `N` th dimension of `input_x`.
2456
2457    `updates` is a tensor of rank `Q-1+P-N`. Its shape is:
2458    :math:`(i_0, i_1, ..., i_{Q-2}, x\_shape_N, ..., x\_shape_{P-1})`.
2459
2460    Args:
2461        input_x (Parameter): The target tensor, with data type of Parameter.
2462        indices (Tensor): The index of input tensor, with int32 or int64 data type.
2463            The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
2464        updates (Tensor): The tensor doing the subtraction operation with `input_x`, has the same type as input.
2465            The shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
2466        use_locking (bool): Whether to protect the assignment by a lock. Default: ``False`` .
2467
2468    Returns:
2469        Tensor, has the same shape and type as `input_x`.
2470
2471    Raises:
2472        TypeError: If the dtype of `use_locking` is not bool.
2473        TypeError: If the dtype of `indices` is not int32 or int64.
2474        TypeError: If dtype of `input_x` and `updates` are not the same.
2475        ValueError: If the shape of `updates` is not equal to `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
2476        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
2477                      is required when data type conversion of Parameter is not supported.
2478
2479    Supported Platforms:
2480        ``Ascend`` ``GPU`` ``CPU``
2481
2482    Examples:
2483        >>> import mindspore
2484        >>> import numpy as np
2485        >>> from mindspore import Tensor, ops, Parameter
2486        >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
2487        >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
2488        >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
2489        >>> output = ops.scatter_nd_sub(input_x, indices, updates, False)
2490        >>> print(output)
2491        [ 1. -6. -3.  4. -2.  6.  7. -1.]
2492        >>> input_x = Parameter(Tensor(np.zeros((4, 4, 4)), mindspore.int32))
2493        >>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
2494        >>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
2495        ...                            [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]), mindspore.int32)
2496        >>> output = ops.scatter_nd_sub(input_x, indices, updates, False)
2497        >>> print(output)
2498        [[[-1 -1 -1 -1]
2499          [-2 -2 -2 -2]
2500          [-3 -3 -3 -3]
2501          [-4 -4 -4 -4]]
2502         [[ 0  0  0  0]
2503          [ 0  0  0  0]
2504          [ 0  0  0  0]
2505          [ 0  0  0  0]]
2506         [[-5 -5 -5 -5]
2507          [-6 -6 -6 -6]
2508          [-7 -7 -7 -7]
2509          [-8 -8 -8 -8]]
2510         [[ 0  0  0  0]
2511          [ 0  0  0  0]
2512          [ 0  0  0  0]
2513          [ 0  0  0  0]]]
2514    """
2515    scatter_nd_sub_inner = _get_cache_prim(P.ScatterNdSub)(use_locking)
2516    return scatter_nd_sub_inner(input_x, indices, updates)
2517
2518
2519def scatter_nd_mul(input_x, indices, updates, use_locking=False):
2520    r"""
2521    Applies sparse multiplication to individual values or slices in a tensor.
2522
2523    Using given values to update parameter value through the multiplication operation, along with the input indices.
2524    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
2525
2526    `input_x` has rank P and `indices` has rank Q, where `Q >= 2`.
2527
2528    `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)` where `N <= P`.
2529
2530    The last dimension of `indices` (with length `N` ) indicates slices along the `N` th dimension of `input_x`.
2531
2532    `updates` is a tensor of rank `Q-1+P-N`. Its shape is:
2533    :math:`(i_0, i_1, ..., i_{Q-2}, x\_shape_N, ..., x\_shape_{P-1})`.
2534
2535    Args:
2536        input_x (Parameter): Input parameter.
2537        indices (Tensor): The index to do multiplication operation whose data type must be mindspore.int32 or
2538            mindspore.int64. The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
2539        updates (Tensor): The tensor to do the multiplication operation with `input_x`.
2540            The data type is same as `input_x`, and the shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
2541        use_locking (bool): Whether to protect the assignment by a lock. Default: ``False`` .
2542
2543    Returns:
2544        Tensor, the updated `input_x`, has the same shape and type as `input_x`.
2545
2546    Raises:
2547        TypeError: If the dtype of `use_locking` is not bool.
2548        TypeError: If the dtype of `indices` is not int32 or int64.
2549        TypeError: If dtype of `input_x` and `updates` are not the same.
2550        ValueError: If the shape of `updates` is not equal to `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
2551        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
2552                      is required when data type conversion of Parameter is not supported.
2553
2554    Supported Platforms:
2555        ``GPU`` ``CPU``
2556
2557    Examples:
2558        >>> import mindspore
2559        >>> import numpy as np
2560        >>> from mindspore import Tensor, ops, Parameter
2561        >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
2562        >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
2563        >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
2564        >>> output = ops.scatter_nd_mul(input_x, indices, updates)
2565        >>> print(output)
2566        [ 1. 16. 18.  4. 35.  6.  7. 72.]
2567        >>> input_x = Parameter(Tensor(np.ones((4, 4, 4)), mindspore.int32))
2568        >>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
2569        >>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
2570        ...                            [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]), mindspore.int32)
2571        >>> output = ops.scatter_nd_mul(input_x, indices, updates)
2572        >>> print(output)
2573        [[[1 1 1 1]
2574          [2 2 2 2]
2575          [3 3 3 3]
2576          [4 4 4 4]]
2577         [[1 1 1 1]
2578          [1 1 1 1]
2579          [1 1 1 1]
2580          [1 1 1 1]]
2581         [[5 5 5 5]
2582          [6 6 6 6]
2583          [7 7 7 7]
2584          [8 8 8 8]]
2585         [[1 1 1 1]
2586          [1 1 1 1]
2587          [1 1 1 1]
2588          [1 1 1 1]]]
2589    """
2590    scatter_nd_mul_inner = _get_cache_prim(ScatterNdMul)(use_locking)
2591    return scatter_nd_mul_inner(input_x, indices, updates)
2592
2593
2594def scatter_nd_div(input_x, indices, updates, use_locking=False):
2595    r"""
2596    Applying sparse division to individual values or slices in a tensor.
2597
2598    Using given values to update tensor value through the div operation, along with the input indices.
2599    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
2600
2601    `input_x` has rank P and `indices` has rank Q, where `Q >= 2`.
2602
2603    `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)` where `N <= P`.
2604
2605    The last dimension of `indices` (with length `N` ) indicates slices along the `N` th dimension of `input_x`.
2606
2607    `updates` is a tensor of rank `Q-1+P-N`. Its shape is:
2608    :math:`(i_0, i_1, ..., i_{Q-2}, x\_shape_N, ..., x\_shape_{P-1})`.
2609
2610    Args:
2611        input_x (Parameter): The target tensor, with data type of Parameter.
2612        indices (Tensor): The index to do div operation whose data type must be mindspore.int32 or mindspore.int64.
2613            The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
2614        updates (Tensor): The tensor to do the div operation with `input_x`.
2615            The data type is same as `input_x`, and the shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
2616        use_locking (bool): Whether to protect the assignment by a lock. Default: ``False`` .
2617
2618    Returns:
2619        Tensor, the updated `input_x`, has the same shape and type as `input_x`.
2620
2621    Raises:
2622        TypeError: If the dtype of `use_locking` is not bool.
2623        TypeError: If the dtype of `indices` is not int32 or int64.
2624        TypeError: If dtype of `input_x` and `updates` are not the same.
2625        ValueError: If the shape of `updates` is not equal to `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
2626        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
2627                      is required when data type conversion of Parameter is not supported.
2628
2629    Supported Platforms:
2630        ``GPU`` ``CPU``
2631
2632    Examples:
2633        >>> import mindspore
2634        >>> import numpy as np
2635        >>> from mindspore import Tensor, ops, Parameter
2636        >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
2637        >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
2638        >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
2639        >>> output = ops.scatter_nd_div(input_x, indices, updates, False)
2640        >>> print(output)
2641        [1.         0.25       0.5        4.         0.71428573 6.
2642         7.         0.8888889 ]
2643        >>> input_x = Parameter(Tensor(np.ones((4, 4, 4)), mindspore.float32))
2644        >>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
2645        >>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
2646        ...                            [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]), mindspore.float32)
2647        >>> output = ops.scatter_nd_div(input_x, indices, updates, False)
2648        >>> print(output)
2649        [[[1.         1.         1.         1.        ]
2650          [0.5        0.5        0.5        0.5       ]
2651          [0.33333334 0.33333334 0.33333334 0.33333334]
2652          [0.25       0.25       0.25       0.25      ]]
2653         [[1.         1.         1.         1.        ]
2654          [1.         1.         1.         1.        ]
2655          [1.         1.         1.         1.        ]
2656          [1.         1.         1.         1.        ]]
2657         [[0.2        0.2        0.2        0.2       ]
2658          [0.16666667 0.16666667 0.16666667 0.16666667]
2659          [0.14285715 0.14285715 0.14285715 0.14285715]
2660          [0.125      0.125      0.125      0.125     ]]
2661         [[1.         1.         1.         1.        ]
2662          [1.         1.         1.         1.        ]
2663          [1.         1.         1.         1.        ]
2664          [1.         1.         1.         1.        ]]]
2665    """
2666    scatter_nd_div_inner = _get_cache_prim(P.ScatterNdDiv)(use_locking)
2667    return scatter_nd_div_inner(input_x, indices, updates)
2668
2669
2670def scatter_nd_max(input_x, indices, updates, use_locking=False):
2671    r"""
2672    Applying sparse maximum to individual values or slices in a tensor.
2673
2674    Using given values to update parameter value through the max operation, along with the input indices.
2675    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
2676
2677    `input_x` has rank P and `indices` has rank Q where `Q >= 2`.
2678
2679    `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)` where `N <= P`.
2680
2681    The last dimension of `indices` (with length `N` ) indicates slices along the `N` th dimension of `input_x`.
2682
2683    `updates` is a tensor of rank `Q-1+P-N`. Its shape is:
2684    :math:`(i_0, i_1, ..., i_{Q-2}, x\_shape_N, ..., x\_shape_{P-1})`.
2685
2686    Args:
2687        input_x (Parameter): The target tensor, with data type of Parameter.
2688        indices (Tensor): The index to do maximum operation whose data type must be mindspore.int32 or mindspore.int64.
2689            The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
2690        updates (Tensor): The tensor to do the max operation with `input_x`.
2691            The data type is same as `input_x`, and the shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
2692        use_locking (bool): Whether to protect the assignment by a lock. Default: ``False`` .
2693
2694    Returns:
2695        Tensor, the updated `input_x`, has the same shape and type as `input_x`.
2696
2697    Raises:
2698        TypeError: If the dtype of `use_locking` is not bool.
2699        TypeError: If the dtype of `indices` is not int32 or int64.
2700        TypeError: If dtype of `input_x` and `updates` are not the same.
2701        ValueError: If the shape of `updates` is not equal to `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
2702        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
2703                      is required when data type conversion of Parameter is not supported.
2704
2705    Supported Platforms:
2706        ``Ascend`` ``GPU`` ``CPU``
2707
2708    Examples:
2709        >>> import mindspore
2710        >>> import numpy as np
2711        >>> from mindspore import Tensor, ops, Parameter
2712        >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
2713        >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
2714        >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
2715        >>> output = ops.scatter_nd_max(input_x, indices, updates, False)
2716        >>> print(output)
2717        [1. 8. 6. 4. 7. 6. 7. 9.]
2718        >>> input_x = Parameter(Tensor(np.ones((4, 4, 4)), mindspore.int32))
2719        >>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
2720        >>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
2721        ...                            [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]), mindspore.int32)
2722        >>> output = ops.scatter_nd_max(input_x, indices, updates, False)
2723        >>> print(output)
2724        [[[1 1 1 1]
2725          [2 2 2 2]
2726          [3 3 3 3]
2727          [4 4 4 4]]
2728         [[1 1 1 1]
2729          [1 1 1 1]
2730          [1 1 1 1]
2731          [1 1 1 1]]
2732         [[5 5 5 5]
2733          [6 6 6 6]
2734          [7 7 7 7]
2735          [8 8 8 8]]
2736         [[1 1 1 1]
2737          [1 1 1 1]
2738          [1 1 1 1]
2739          [1 1 1 1]]]
2740    """
2741    scatter_nd_max_inner = _get_cache_prim(ScatterNdMax)(use_locking)
2742    return scatter_nd_max_inner(input_x, indices, updates)
2743
2744
2745def scatter_nd_min(input_x, indices, updates, use_locking=False):
2746    r"""
2747    Applying sparse minimum to individual values or slices in a tensor.
2748
2749    Using given values to update tensor value through the min operation, along with the input indices.
2750    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
2751
2752    `input_x` has rank P and `indices` has rank Q where `Q >= 2`.
2753
2754    `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)` where `N <= P`.
2755
2756    The last dimension of `indices` (with length `N` ) indicates slices along the `N` th dimension of `input_x`.
2757
2758    `updates` is a tensor of rank `Q-1+P-N`. Its shape is:
2759    :math:`(i_0, i_1, ..., i_{Q-2}, x\_shape_N, ..., x\_shape_{P-1})`.
2760
2761    Args:
2762        input_x (Parameter): The target tensor, with data type of Parameter.
2763        indices (Tensor): The index to do min operation whose data type must be mindspore.int32 or mindspore.int64.
2764            The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
2765        updates (Tensor): The tensor to do the min operation with `input_x`.
2766            The data type is same as `input_x`, and the shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
2767        use_locking (bool): Whether to protect the assignment by a lock. Default: ``False`` .
2768
2769    Returns:
2770        Tensor, the updated `input_x`, has the same shape and type as `input_x`.
2771
2772    Raises:
2773        TypeError: If the dtype of `use_locking` is not bool.
2774        TypeError: If the dtype of `indices` is not int32 or int64.
2775        TypeError: If dtype of `input_x` and `updates` are not the same.
2776        ValueError: If the shape of `updates` is not equal to `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
2777        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
2778                      is required when data type conversion of Parameter is not supported.
2779
2780    Supported Platforms:
2781        ``Ascend`` ``GPU`` ``CPU``
2782
2783    Examples:
2784        >>> import mindspore
2785        >>> import numpy as np
2786        >>> from mindspore import Tensor, ops, Parameter
2787        >>> input_x = Parameter(Tensor(np.ones(8) * 10, mindspore.float32), name="x")
2788        >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
2789        >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
2790        >>> output = ops.scatter_nd_min(input_x, indices, updates, False)
2791        >>> print(output)
2792        [10.  8.  6. 10.  7. 10. 10.  9.]
2793        >>> input_x = Parameter(Tensor(np.ones((4, 4, 4)) * 10, mindspore.int32))
2794        >>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
2795        >>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
2796        ...                            [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]), mindspore.int32)
2797        >>> output = ops.scatter_nd_min(input_x, indices, updates, False)
2798        >>> print(output)
2799        [[[ 1  1  1  1]
2800          [ 2  2  2  2]
2801          [ 3  3  3  3]
2802          [ 4  4  4  4]]
2803         [[10 10 10 10]
2804          [10 10 10 10]
2805          [10 10 10 10]
2806          [10 10 10 10]]
2807         [[ 5  5  5  5]
2808          [ 6  6  6  6]
2809          [ 7  7  7  7]
2810          [ 8  8  8  8]]
2811         [[10 10 10 10]
2812          [10 10 10 10]
2813          [10 10 10 10]
2814          [10 10 10 10]]]
2815    """
2816    scatter_nd_min_inner = _get_cache_prim(P.ScatterNdMin)(use_locking)
2817    return scatter_nd_min_inner(input_x, indices, updates)
2818
2819
2820def sort(input_x, axis=-1, descending=False):
2821    r"""
2822    Sorts the elements of the input tensor along the given dimension in the specified order.
2823
2824    Args:
2825        input_x(Tensor): The input tensor to sort.
2826            The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
2827        axis (int, optional): The dimension to sort along. Default: ``-1``, means the last dimension.
2828            The Ascend backend only supports sorting the last dimension.
2829        descending (bool, optional): Controls the sort order. If `descending` is True, the elements
2830            are sorted in descending order, or else sorted in ascending order. Default: ``False`` .
2831
2832    .. warning::
2833        Currently, the data types of float16, uint8, int8, int16, int32, int64 are well supported.
2834        If use float32, it may cause loss of accuracy.
2835
2836    Returns:
2837
2838        - y1, a tensor whose values are the sorted values, with the same shape and data type as input.
2839        - y2, a tensor that consists of the indices of the elements in the original input tensor.
2840          Data type is int32.
2841
2842    Raises:
2843        TypeError: If `axis` is not an int.
2844        TypeError: If `descending` is not a bool.
2845        TypeError: If dtype of `input_x` is neither float16, float32, uint8, int8, int16, int32, int64.
2846        ValueError: If `axis` is not in range of [-len(input_x.shape), len(input_x.shape)).
2847
2848    Supported Platforms:
2849        ``Ascend`` ``GPU`` ``CPU``
2850
2851    Examples:
2852        >>> import mindspore
2853        >>> import numpy as np
2854        >>> from mindspore import Tensor, ops
2855        >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
2856        >>> output = ops.sort(x)
2857        >>> # The output below is based on the Ascend platform.
2858        >>> print(output)
2859        (Tensor(shape=[3, 3], dtype=Float16, value=
2860        [[ 1.0000e+00,  2.0000e+00,  8.0000e+00],
2861        [ 3.0000e+00,  5.0000e+00,  9.0000e+00],
2862        [ 4.0000e+00,  6.0000e+00,  7.0000e+00]]), Tensor(shape=[3, 3], dtype=Int32, value=
2863        [[2, 1, 0],
2864        [2, 0, 1],
2865        [0, 1, 2]]))
2866    """
2867    _sort = _get_cache_prim(P.Sort)(axis, descending)
2868    return _sort(input_x)
2869
2870
2871def sort_ext(input, *, dim=-1, descending=False, stable=False):
2872    r"""
2873    Sorts the elements of the input tensor along the given dimension in the specified order.
2874
2875    .. warning::
2876        Currently, the data types of float16, uint8, int8, int16, int32, int64 are well supported.
2877        If use float32, it may cause loss of accuracy.
2878
2879    Args:
2880        input(Tensor): The input tensor to sort.
2881            The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
2882
2883    Keyword Args:
2884        dim (int, optional): The dimension to sort along. Default: ``-1``, means the last dimension.
2885        descending (bool, optional): Controls the sort order. If `descending` is True, the elements
2886            are sorted in descending order, or else sorted in ascending order. Default: ``False`` .
2887        stable (bool, optional): Controls the sort order. If stable is True then the sorting routine
2888            becomes stable, preserving the order of equivalent elements. Default: ``False`` .
2889
2890    Returns:
2891        - y1, a tensor whose values are the sorted values, with the same shape and data type as input.
2892        - y2, a tensor that consists of the indices of the elements in the original input tensor.
2893          Data type is int64.
2894
2895    Raises:
2896        TypeError: If `dim` is not an int.
2897        TypeError: If `descending` is not a bool.
2898        TypeError: If `input` not in float16, float32, uint8, int8, int16, int32, int64, bfloat16
2899        TypeError: If `stable` is not a bool.
2900        ValueError: If `dim` is not in range of [-len(input_x.shape), len(input_x.shape)).
2901
2902    Supported Platforms:
2903        ``Ascend``
2904
2905    Examples:
2906        >>> import mindspore
2907        >>> import numpy as np
2908        >>> from mindspore import Tensor, ops
2909        >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
2910        >>> output = ops.function.array_func.sort_ext(x)
2911        >>> # The output below is based on the Ascend platform.
2912        >>> print(output)
2913        (Tensor(shape=[3, 3], dtype=Float16, value=
2914        [[ 1.0000e+00,  2.0000e+00,  8.0000e+00],
2915        [ 3.0000e+00,  5.0000e+00,  9.0000e+00],
2916        [ 4.0000e+00,  6.0000e+00,  7.0000e+00]]), Tensor(shape=[3, 3], dtype=Int64, value=
2917        [[2, 1, 0],
2918        [2, 0, 1],
2919        [0, 1, 2]]))
2920    """
2921    return sort_ext_(input, dim, descending, stable)
2922
2923
2924def argsort(input, axis=-1, descending=False):
2925    r"""
2926    Sorts the input tensor along the given dimension in specified order and return the sorted indices.
2927
2928    Args:
2929        input(Tensor): The input tensor to sort.
2930        axis (int): The axis to sort along. Default: ``-1`` , means the last dimension.
2931            The Ascend backend only supports sorting the last dimension.
2932        descending (bool): The sort order. If `descending` is True then the elements
2933            are sorted in descending order by value. Otherwise sort in ascending order. Default: ``False`` .
2934
2935    Returns:
2936        Tensor, the indices of sorted input tensor. Data type is int32.
2937
2938    Supported Platforms:
2939        ``Ascend`` ``GPU`` ``CPU``
2940
2941    Examples:
2942        >>> import mindspore
2943        >>> import numpy as np
2944        >>> from mindspore import Tensor, ops
2945        >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
2946        >>> sort = ops.argsort(x)
2947        >>> print(sort)
2948        [[2 1 0]
2949         [2 0 1]
2950         [0 1 2]]
2951    """
2952    _sort = _get_cache_prim(P.Sort)(axis, descending)
2953    _, arg_sort = _sort(input)
2954    return arg_sort
2955
2956
2957def gather_elements(input, dim, index):
2958    """
2959    Gathers elements along an axis specified by dim.
2960
2961    For a 3-D tensor, the output is:
2962
2963    .. code-block::
2964
2965        output[i][j][k] = x[index[i][j][k]][j][k]  # if dim == 0
2966
2967        output[i][j][k] = x[i][index[i][j][k]][k]  # if dim == 1
2968
2969        output[i][j][k] = x[i][j][index[i][j][k]]  # if dim == 2
2970
2971    `input` and `index` have the same length of dimensions, and `index.shape[axis] <= input.shape[axis]`
2972    where axis goes through all dimensions of `input` except `dim`.
2973
2974    .. warning::
2975        On Ascend, the behavior is unpredictable in the following cases:
2976
2977        - the value of `index` is not in the range `[-input.shape[dim], input.shape[dim])` in forward;
2978        - the value of `index` is not in the range `[0, input.shape[dim])` in backward.
2979
2980    Args:
2981        input (Tensor): The input tensor.
2982        dim (int): The axis along which to index. It must be int32 or int64. The value range is `[-input.ndim,
2983            input.ndim)`.
2984        index (Tensor): The indices of elements to gather. It can be one of the following data types:
2985            int32, int64. The value range of each index element is `[-input.shape(dim), input.shape(dim))`.
2986
2987    Returns:
2988        Tensor, has the same shape as `index` and has the same data type with `input`.
2989
2990    Raises:
2991        TypeError: If dtype of `dim` or `index` is neither int32 nor int64.
2992        ValueError: If length of shape of `input` is not equal to length of shape of `index`.
2993        ValueError: If the size of the dimension except `dim` in `input` is less than size in `index`.
2994        ValueError: If the value of `dim` is not in the expected range.
2995
2996    Supported Platforms:
2997        ``Ascend`` ``GPU`` ``CPU``
2998
2999    Examples:
3000        >>> import numpy as np
3001        >>> import mindspore
3002        >>> from mindspore import Tensor
3003        >>> x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.int32)
3004        >>> index = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int32)
3005        >>> dim = 1
3006        >>> output = mindspore.ops.gather_elements(x, dim, index)
3007        >>> print(output)
3008        [[1 1]
3009         [4 3]]
3010    """
3011    return gather_d_(input, dim, index)
3012
3013
3014def tensor_scatter_add(input_x, indices, updates):
3015    r"""
3016    Creates a new tensor by adding the values from the positions in `input_x` indicated by
3017    `indices`, with values from `updates`. When multiple values are given for the same
3018    index, the updated result will be the sum of all values. This operation is almost
3019    equivalent to using ScatterNdAdd, except that the updates are applied on output `Tensor`
3020    instead of input `Parameter`.
3021
3022    The last axis of `indices` is the depth of each index vectors. For each index vector,
3023    there must be a corresponding value in `updates`. The shape of `updates` should be
3024    equal to the shape of `input_x[indices]`. For more details, see Examples.
3025
3026    .. math::
3027        output\left [indices  \right ] = input\_x + update
3028
3029    Note:
3030        - On GPU, if some values of the `indices` are out of bound, instead of raising an index error,
3031          the corresponding `updates` will not be updated to self tensor.
3032        - On CPU, if some values of the `indices` are out of bound, raising an index error.
3033        - On Ascend, out of bound checking is not supported, if some values of the `indices` are out of bound,
3034          unknown errors may be caused.
3035
3036    Args:
3037        input_x (Tensor): The input tensor. The dimension of input_x must be no less than indices.shape[-1].
3038        indices (Tensor): The index of input tensor whose data type is int32 or int64.
3039            The rank must be at least 2.
3040        updates (Tensor): The tensor to update the input tensor, has the same type as input,
3041            and updates. And the shape should be
3042            equal to :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
3043
3044    Returns:
3045        Tensor, has the same shape and type as `input_x`.
3046
3047    Raises:
3048        TypeError: If dtype of `indices` is neither int32 nor int64.
3049        ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.
3050        RuntimeError: If a value of `indices` is not in `input_x` on CPU backend.
3051
3052    Supported Platforms:
3053        ``Ascend`` ``GPU`` ``CPU``
3054
3055    Examples:
3056        >>> import mindspore
3057        >>> import numpy as np
3058        >>> from mindspore import Tensor, nn
3059        >>> from mindspore import ops
3060        >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
3061        >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)
3062        >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)
3063        >>> output = ops.tensor_scatter_add(input_x, indices, updates)
3064        >>> print(output)
3065        [[ 3.1  0.3  3.6]
3066         [ 0.4  0.5 -3.2]]
3067    """
3068
3069    return tensor_scatter_add_(input_x, indices, updates)
3070
3071
3072def tensor_scatter_sub(input_x, indices, updates):
3073    r"""
3074    Creates a new tensor by subtracting the values from the positions in `input_x` indicated by
3075    `indices`, with values from `updates`. When multiple values are provided for the same
3076    index, the result of the update will be to subtract these values respectively. This operation is almost
3077    equivalent to using :class:`mindspore.ops.ScatterNdSub` , except that the updates are applied on output `Tensor`
3078    instead of input `Parameter`.
3079
3080    The last axis of `indices` is the depth of each index vectors. For each index vector,
3081    there must be a corresponding value in `updates`. The shape of `updates` should be
3082    equal to the shape of `input_x[indices]`. For more details, see Examples.
3083
3084    .. math::
3085        output[indices] = input\_x - update
3086
3087    Note:
3088        On GPU, if some values of the `indices` are out of bound, instead of raising an index error,
3089        the corresponding `updates` will not be updated to self tensor. On CPU, if some values of
3090        the `indices` are out of bound, raising an index error. On Ascend, out of bound checking is
3091        not supported, if some values of the `indices` are out of bound, unknown errors may be caused.
3092
3093    Args:
3094        input_x (Tensor): The input tensor. The dimension of input_x must be no less than indices.shape[-1].
3095        indices (Tensor): The index of input tensor whose data type is int32 or int64.
3096            The rank must be at least 2.
3097        updates (Tensor): The tensor to update the input tensor, has the same type as `input_x`,
3098            and the shape of `updates` should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].
3099
3100    Returns:
3101        Tensor, has the same shape and type as `input_x`.
3102
3103    Raises:
3104        TypeError: If dtype of `indices` is neither int32 nor int64.
3105        ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.
3106        RuntimeError: If a value of `indices` is not in `input_x`.
3107
3108    Supported Platforms:
3109        ``Ascend`` ``GPU`` ``CPU``
3110
3111    Examples:
3112        >>> import mindspore
3113        >>> import numpy as np
3114        >>> from mindspore import Tensor
3115        >>> from mindspore import ops
3116        >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
3117        >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)
3118        >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)
3119        >>> output = ops.tensor_scatter_sub(input_x, indices, updates)
3120        >>> print(output)
3121        [[-3.3000002  0.3        3.6      ]
3122         [ 0.4        0.5       -3.2      ]]
3123    """
3124
3125    return tensor_scatter_sub_(input_x, indices, updates)
3126
3127
3128def tensor_scatter_max(input_x, indices, updates):
3129    r"""
3130    By comparing the value at the position indicated by `indices` in `input_x` with the value in the `updates`,
3131    the value at the index will eventually be equal to the largest one to create a new tensor.
3132
3133    The last axis of the index is the depth of each index vector. For each index vector,
3134    there must be a corresponding value in `updates`. The shape of `updates` should be
3135    equal to the shape of input_x[indices].
3136
3137    .. math::
3138        output\left [indices  \right ] = \max(input\_x, update)
3139
3140    Note:
3141        - On GPU, if some values of the `indices` are out of bound, instead of raising an index error,
3142          the corresponding `updates` will not be updated to self tensor.
3143        - On CPU, if some values of the `indices` are out of bound, raising an index error.
3144        - On Ascend, out of bound checking is not supported, if some values of the `indices` are out of bound,
3145          unknown errors may be caused.
3146
3147    Args:
3148        input_x (Tensor): The input tensor. The dimension of `input_x` must be no less than indices.shape[-1].
3149        indices (Tensor): The index of input tensor whose data type must be int32 or int64.
3150            The rank must be at least 2.
3151        updates (Tensor): The tensor to update the `input_x` tensor, has the same type as input,
3152            and updates.shape should be equal to :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
3153
3154    Returns:
3155        Tensor, has the same shape and type as `input_x`.
3156
3157    Raises:
3158        TypeError: If dtype of `indices` is neither int32 nor int64.
3159        ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.
3160        RuntimeError: If a value of `indices` is not in `input_x` on CPU backend.
3161
3162    Supported Platforms:
3163        ``GPU`` ``CPU``
3164
3165    Examples:
3166        >>> import mindspore
3167        >>> import numpy as np
3168        >>> from mindspore import Tensor, ops
3169        >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
3170        >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)
3171        >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)
3172        >>> # Next, demonstrate the approximate operation process of this operator:
3173        >>> # 1, indices[0] = [0, 0], indices[1] = [0, 0]
3174        >>> # 2, And input_x[0, 0] = -0.1
3175        >>> # 3, So input_x[indices] = [-0.1, -0.1]
3176        >>> # 4, Satisfy the above formula: input_x[indices].shape=(2) == updates.shape=(2)
3177        >>> output = ops.tensor_scatter_max(input_x, indices, updates)
3178        >>> # 5, Perform the max operation for the first time:
3179        >>> #      first_input_x = Max(input_x[0][0], updates[0]) = [[1.0, 0.3, 3.6], [0.4, 0.5, -3.2]]
3180        >>> # 6, Perform the max operation for the second time:
3181        >>> #      second_input_x = Max(input_x[0][0], updates[1]) = [[2.2, 0.3, 3.6], [0.4, 0.5, -3.2]]
3182        >>> print(output)
3183        [[ 2.2  0.3  3.6]
3184         [ 0.4  0.5 -3.2]]
3185    """
3186    return tensor_scatter_max_(input_x, indices, updates)
3187
3188
3189def tensor_scatter_min(input_x, indices, updates):
3190    r"""
3191    By comparing the value at the position indicated by `indices` in `input_x` with the value in the `updates`,
3192    the value at the index will eventually be equal to the smallest one to create a new tensor.
3193
3194    The last axis of the index is the depth of each index vector. For each index vector,
3195    there must be a corresponding value in `updates`. The shape of `updates` should be
3196    equal to the shape of `input_x[indices]`. For more details, see case below.
3197
3198    .. math::
3199        output\left [indices  \right ] = \min(input\_x, update)
3200
3201    Note:
3202        - On GPU, if some values of the `indices` are out of bound, instead of raising an index error,
3203          the corresponding `updates` will not be updated to self tensor.
3204        - On CPU, if some values of the `indices` are out of bound, raising an index error.
3205        - On Ascend, out of bound checking is not supported, if some values of the `indices` are out of bound,
3206          unknown errors may be caused.
3207
3208    Args:
3209        input_x (Tensor): The input tensor. The dimension of `input_x` must be no less than indices.shape[-1].
3210        indices (Tensor): The index of input tensor whose data type is int32 or int64.
3211            The rank must be at least 2.
3212        updates (Tensor): The tensor to update the input tensor, has the same type as `input_x`
3213            And the shape of `updates` should be
3214            equal to :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
3215
3216    Returns:
3217        Tensor, has the same shape and type as `input_x`.
3218
3219    Raises:
3220        TypeError: If dtype of `indices` is neither int32 nor int64.
3221        ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.
3222        RuntimeError: If a value of `indices` is not in `input_x` on CPU backend.
3223
3224    Supported Platforms:
3225        ``Ascend`` ``GPU`` ``CPU``
3226
3227    Examples:
3228        >>> import mindspore
3229        >>> import numpy as np
3230        >>> from mindspore import Tensor
3231        >>> from mindspore import ops
3232        >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
3233        >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)
3234        >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)
3235        >>> output = ops.tensor_scatter_min(input_x, indices, updates)
3236        >>> print(output)
3237        [[ -0.1  0.3  3.6]
3238        [ 0.4  0.5 -3.2]]
3239    """
3240    return tensor_scatter_min_(input_x, indices, updates)
3241
3242
3243def tensor_scatter_elements(input_x, indices, updates, axis=0, reduction="none"):
3244    """
3245    Write all elements in `updates` to the index specified by `indices` in `input_x` according to the reduction
3246    operation specified by `reduction`.
3247    `axis` controls the direction of the scatter operation.
3248
3249    `tensor_scatter_elements` takes three inputs `input_x`, `updates` and `indices` of the same rank r >= 1.
3250
3251    For a 3-D tensor, the output is:
3252
3253    .. code-block::
3254
3255        output[indices[i][j][k]][j][k] = updates[i][j][k]  # if axis == 0, reduction == "none"
3256
3257        output[i][indices[i][j][k]][k] += updates[i][j][k]  # if axis == 1, reduction == "add"
3258
3259        output[i][j][indices[i][j][k]] = updates[i][j][k]  # if axis == 2, reduction == "none"
3260
3261    .. warning::
3262        - The order in which updates are applied is nondeterministic, meaning that if there are multiple index vectors
3263          in `indices` that correspond to the same position, the value of that position in the output will be
3264          nondeterministic.
3265        - On Ascend, the reduction only support set to "none" for now.
3266        - On Ascend, the data type of `input_x` must be float16 or float32.
3267        - This is an experimental API that is subject to change or deletion.
3268
3269    Note:
3270        If some values of the `indices` exceed the upper or lower bounds of the index of `input_x`, instead of raising
3271        an index error, the corresponding `updates` will not be updated to `input_x`.
3272
3273    Args:
3274        input_x (Tensor): The target tensor. The rank must be at least 1.
3275        indices (Tensor): The index of `input_x` to do scatter operation whose data type must be mindspore.int32 or
3276            mindspore.int64. Same rank as  `input_x`. And accepted range is [-s, s) where s is the size along axis.
3277        updates (Tensor): The tensor doing the scatter operation with `input_x`, has the same type as `input_x` and
3278            the same shape as `indices`.
3279        axis (int): Which axis to scatter. Accepted range is [-r, r) where r = rank(input_x). Default: ``0``.
3280        reduction (str): Which reduction operation to scatter, supports ``"none"`` , ``"add"`` . Default: ``"none"``.
3281            When `reduction` is set to ``"none"``, `updates` will be assigned to `input_x` according to  `indices`.
3282            When `reduction` is set to ``"add"``, `updates` will be added to `input_x` according to  `indices`.
3283
3284    Returns:
3285        Tensor, has the same shape and type as `input_x`.
3286
3287    Raises:
3288        TypeError: If `indices` is neither int32 nor int64.
3289        ValueError: If anyone of the rank among `input_x`, `indices` and `updates` less than 1.
3290        ValueError: If the shape of `updates` is not equal to the shape of `indices`.
3291        ValueError: If the rank of `updates` is not equal to the rank of `input_x`.
3292        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
3293            is required when data type conversion of Parameter is not supported.
3294
3295    Supported Platforms:
3296        ``Ascend`` ``GPU`` ``CPU``
3297
3298    Examples:
3299        >>> import mindspore
3300        >>> from mindspore import Tensor, ops
3301        >>> from mindspore import Parameter
3302        >>> import numpy as np
3303        >>> input_x = Parameter(Tensor(np.array([[1, 2, 3, 4, 5]]), mindspore.int32), name="x")
3304        >>> indices = Tensor(np.array([[2, 4]]), mindspore.int32)
3305        >>> updates = Tensor(np.array([[8, 8]]), mindspore.int32)
3306        >>> axis = 1
3307        >>> reduction = "none"
3308        >>> output = ops.tensor_scatter_elements(input_x, indices, updates, axis, reduction)
3309        >>> print(output)
3310        [[1 2 8 4 8]]
3311        >>> input_x = Parameter(Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.int32), name="x")
3312        >>> indices = Tensor(np.array([[1, -1, 2], [0, 2, 1]]), mindspore.int32)
3313        >>> updates = Tensor(np.array([[1, 2, 2], [4, 5, 8]]), mindspore.int32)
3314        >>> axis = 0
3315        >>> reduction = "add"
3316        >>> output = ops.tensor_scatter_elements(input_x, indices, updates, axis, reduction)
3317        >>> print(output)
3318        [[ 5  2  3]
3319         [ 5  5 14]
3320         [ 7 15 11]]
3321    """
3322    _tensor_scatter_elements = _get_cache_prim(TensorScatterElements)(axis, reduction)
3323    return _tensor_scatter_elements(input_x, indices, updates)
3324
3325
3326def scatter(input, axis, index, src):
3327    """
3328    Update the value in `src` to `input` according to the specified index.
3329    Refer to :func:`mindspore.ops.tensor_scatter_elements` for more details.
3330
3331    Args:
3332        input (Tensor): The target tensor. The rank of `input` must be at least 1.
3333        axis (int): Which axis to scatter. Accepted range is [-r, r) where r = rank(input).
3334        index (Tensor): The index to do update operation whose data type must be mindspore.int32 or
3335            mindspore.int64. Same rank as `input` . And accepted range is [-s, s) where s is the size along axis.
3336        src (Tensor): The tensor doing the update operation with `input` , has the same type as `input` ,
3337            and the shape of `src` should be equal to the shape of `index` .
3338
3339    Returns:
3340        Tensor, has the same shape and type as `input` .
3341
3342    Raises:
3343        TypeError: If `index` is neither int32 nor int64.
3344        ValueError: If anyone of the rank among `input` , `index` and `src` less than 1.
3345        ValueError: If the shape of `src` is not equal to the shape of `index` .
3346        ValueError: If the rank of `src` is not equal to the rank of `input` .
3347        RuntimeError: If the data type of `input` and `src` conversion of Parameter
3348            is required when data type conversion of Parameter is not supported.
3349
3350    Supported Platforms:
3351        ``Ascend`` ``GPU`` ``CPU``
3352
3353    Examples:
3354        >>> import numpy as np
3355        >>> import mindspore as ms
3356        >>> from mindspore import Tensor, ops
3357        >>> input = Tensor(np.array([[1, 2, 3, 4, 5]]), dtype=ms.float32)
3358        >>> src = Tensor(np.array([[8, 8]]), dtype=ms.float32)
3359        >>> index = Tensor(np.array([[2, 4]]), dtype=ms.int64)
3360        >>> out = ops.scatter(input=input, axis=1, index=index, src=src)
3361        >>> print(out)
3362        [[1. 2. 8. 4. 8.]]
3363        >>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
3364        >>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
3365        >>> index = Tensor(np.array([[0, 0, 0], [2, 2, 2], [4, 4, 4]]), dtype=ms.int64)
3366        >>> out = ops.scatter(input=input, axis=0, index=index, src=src)
3367        >>> print(out)
3368        [[1. 2. 3. 0. 0.]
3369        [0. 0. 0. 0. 0.]
3370        [4. 5. 6. 0. 0.]
3371        [0. 0. 0. 0. 0.]
3372        [7. 8. 9. 0. 0.]]
3373        >>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
3374        >>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
3375        >>> index = Tensor(np.array([[0, 2, 4], [0, 2, 4], [0, 2, 4]]), dtype=ms.int64)
3376        >>> out = ops.scatter(input=input, axis=1, index=index, src=src)
3377        >>> print(out)
3378        [[1. 0. 2. 0. 3.]
3379        [4. 0. 5. 0. 6.]
3380        [7. 0. 8. 0. 9.]
3381        [0. 0. 0. 0. 0.]
3382        [0. 0. 0. 0. 0.]]
3383    """
3384    return ops.tensor_scatter_elements(input_x=input, indices=index, updates=src, axis=axis)
3385
3386
3387def scatter_add_ext(input, dim, index, src):
3388    """
3389    Add all elements in `src` to the index specified by `index` to `input` along dimension specified by `dim`.
3390    It takes three inputs `input`, `src` and `index` of the same rank r >= 1.
3391
3392    For a 3-D tensor, the operation updates input as follows:
3393
3394    .. code-block::
3395
3396        input[index[i][j][k]][j][k] += src[i][j][k]  # if dim == 0
3397
3398        input[i][index[i][j][k]][k] += src[i][j][k]  # if dim == 1
3399
3400        input[i][j][index[i][j][k]] += src[i][j][k]  # if dim == 2
3401
3402    Args:
3403        input (Tensor): The target tensor. The rank must be at least 1.
3404        dim (int): Which dim to scatter. Accepted range is [-r, r) where r = rank(`input`). Default: ``0``.
3405        index (Tensor): The index of `input` to do scatter operation whose data type must be mindspore.int32 or
3406            mindspore.int64. Same rank as `input`. Except for the dimension specified by `dim`,
3407            the size of each dimension of `index` must be less than or equal to the size of
3408            the corresponding dimension of `input`.
3409        src (Tensor): The tensor doing the scatter operation with `input`, has the same type as `input` and
3410            the size of each dimension must be greater than or equal to that of `index`.
3411
3412    Returns:
3413        Tensor, has the same shape and type as `input`.
3414
3415    Raises:
3416        TypeError: If `index` is neither int32 nor int64.
3417        ValueError: If anyone of the rank among `input`, `index` and `src` less than 1.
3418        ValueError: If the rank of `input`, `index` and `src` is not the same.
3419        ValueError: If, outside dimension `dim`, the size of any dimension of `index` is greater than the size of
3420            the corresponding dimension of `input` .
3421        ValueError: If the size of any dimension of `src` is less than that of `index`.
3422
3423    Supported Platforms:
3424        ``Ascend``
3425
3426    Examples:
3427        >>> import numpy as np
3428        >>> import mindspore as ms
3429        >>> from mindspore import Tensor, ops
3430        >>> input = Tensor(np.array([[1, 2, 3, 4, 5]]), dtype=ms.float32)
3431        >>> src = Tensor(np.array([[8, 8]]), dtype=ms.float32)
3432        >>> index = Tensor(np.array([[2, 4]]), dtype=ms.int64)
3433        >>> out = ops.function.array_func.scatter_add_ext(input=input, dim=1, index=index, src=src)
3434        >>> print(out)
3435        [[1. 2. 11. 4. 13.]]
3436        >>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
3437        >>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
3438        >>> index = Tensor(np.array([[0, 0, 0], [2, 2, 2], [4, 4, 4]]), dtype=ms.int64)
3439        >>> out = ops.function.array_func.scatter_add_ext(input=input, dim=0, index=index, src=src)
3440        >>> print(out)
3441        [[1. 2. 3. 0. 0.]
3442         [0. 0. 0. 0. 0.]
3443         [4. 5. 6. 0. 0.]
3444         [0. 0. 0. 0. 0.]
3445         [7. 8. 9. 0. 0.]]
3446        >>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
3447        >>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
3448        >>> index = Tensor(np.array([[0, 2, 4], [0, 2, 4], [0, 2, 4]]), dtype=ms.int64)
3449        >>> out = ops.function.array_func.scatter_add_ext(input=input, dim=1, index=index, src=src)
3450        >>> print(out)
3451        [[1. 0. 2. 0. 3.]
3452         [4. 0. 5. 0. 6.]
3453         [7. 0. 8. 0. 9.]
3454         [0. 0. 0. 0. 0.]
3455         [0. 0. 0. 0. 0.]]
3456    """
3457    return scatter_add_ext_op(input, dim, index, src)
3458
3459
3460def _get_slice_scatter_const(x_shape, axis, start, end, step):
3461    r"""
3462    Calculate the rank of input, embedded dimensions and index.
3463    """
3464    x_rank = len(x_shape)
3465    axis = axis if axis >= 0 else axis + x_rank
3466    start = start if start is not None else 0
3467    start = start if start >= 0 else start + x_rank
3468    end = end if end is not None else x_shape[axis]
3469    end = end if end >= 0 else end + x_shape[axis]
3470    end = end if end < x_shape[axis] else x_shape[axis]
3471    index = list(builtins.range(start, end, step))
3472    return x_rank, index, axis
3473
3474
3475def slice_scatter(input, src, axis=0, start=None, end=None, step=1):
3476    r"""
3477    Slice the input Tensor in the specified dimension and overlay the slice results with the source Tensor.
3478    The `input` is sliced along the specified dimension. The start position of the slice is `start` ,
3479    the end position is `end` , and the step size is `step` .
3480    Then the slicing result is overwritten with `src` to get the output Tensor.
3481
3482    Args:
3483        input (Tensor): The target Tensor.
3484        src (Tensor): The source Tensor.
3485        axis (int, optional): The dimension of `input` to be sliced. Default: ``0`` .
3486        start (int, optional): The start index to slice in the specified dimension.
3487            Default: ``None``, `start` is ``0`` .
3488        end (int, optional): The end index to slice in the specified dimension.
3489            Default: ``None``, `end` is the length of `input` in the specified dimension.
3490        step (int, optional): Step size. Default: ``1``, the distance from the next slice element is ``1`` .
3491
3492    Returns:
3493        Tensor after embedding, has the same shape and type as `input` .
3494
3495    Raises:
3496        ValueError: The shape of `src` is not the same as the shape of `input` slice.
3497        TypeError: If `input` is not a Tensor.
3498        TypeError: If `src` is not a Tensor.
3499        TypeError: If `axis` or `step` is not an integer.
3500        TypeError: If `start` or `end` is not ``None`` or an integer.
3501
3502    Supported Platforms:
3503        ``Ascend`` ``GPU`` ``CPU``
3504
3505    Examples:
3506        >>> import mindspore as ms
3507        >>> a = ms.ops.zeros((4, 6))
3508        >>> b = ms.ops.ones((4, 3))
3509        >>> output = ms.ops.slice_scatter(a, b, axis=1, start=0, end=5, step=2)
3510        >>> print(output)
3511        [[1. 0. 1. 0. 1. 0.]
3512         [1. 0. 1. 0. 1. 0.]
3513         [1. 0. 1. 0. 1. 0.]
3514         [1. 0. 1. 0. 1. 0.]]
3515    """
3516    _check_is_tensor("input", input, "slice_scatter")
3517    _check_is_tensor("src", src, "slice_scatter")
3518    input_shape = input.shape
3519    input_rank, index, axis = _get_slice_scatter_const(input_shape, axis, start, end, step)
3520
3521    src_shape = src.shape
3522    index_shape = input_shape[:axis] + (len(index),) + input_shape[axis + 1:]
3523    index_tensor = ms.Tensor(index)
3524    for _ in builtins.range(axis):
3525        index_tensor = index_tensor.expand_dims(0)
3526
3527    if index_shape != src_shape:
3528        raise ValueError(f"For slice_scatter, src shape should be equal to the slice size,"
3529                         f"but got src shape {src_shape} and slice shape {index_shape}")
3530    for _ in builtins.range(input_rank - axis - 1):
3531        index_tensor = index_tensor.expand_dims(-1)
3532    index_tensor = index_tensor.broadcast_to(src.shape)
3533    if index_tensor.dtype not in mstype.int_type:
3534        index_tensor = index_tensor.astype(mstype.int64)
3535    return tensor_scatter_elements(input, axis=axis, indices=index_tensor, updates=src)
3536
3537
3538def select_scatter(input, src, axis, index):
3539    r"""
3540    On the specified dimension `axis` of `input` , `src` is scattered into `input` on the specified `index` of `input` .
3541
3542    Args:
3543        input (Tensor): The target Tensor.
3544        src (Tensor): The source Tensor.
3545        axis (int): The dimension of `input` to be embedded.
3546        index (int): The location of scattering on the specified dimension.
3547
3548    Returns:
3549        Tensor after embedding, has the same shape and type as `input` .
3550
3551    Raises:
3552        ValueError: The shape of `src` is not the same as the shape scattered over `input` .
3553        TypeError: If `input` is not a Tensor.
3554        TypeError: If `src` is not a Tensor.
3555        TypeError: If `axis` or `index` is not an integer.
3556
3557    Supported Platforms:
3558        ``Ascend`` ``GPU`` ``CPU``
3559
3560    Examples:
3561        >>> import mindspore as ms
3562        >>> a = ms.ops.zeros((2, 3, 3))
3563        >>> b = ms.ops.ones((2, 3))
3564        >>> output = ms.ops.select_scatter(a, b, axis=1, index=1)
3565        >>> print(output)
3566        [[[0. 0. 0.]
3567          [1. 1. 1.]
3568          [0. 0. 0.]]
3569         [[0. 0. 0.]
3570          [1. 1. 1.]
3571          [0. 0. 0.]]]
3572    """
3573    _check_is_tensor("input", input, "select_scatter")
3574    _check_is_tensor("src", src, "select_scatter")
3575    src = src.expand_dims(axis=axis)
3576    x_rank = input.ndim
3577    axis = axis if axis >= 0 else axis + x_rank
3578    index = index if index >= 0 else index + input.shape[axis]
3579    return slice_scatter(input, src, axis, start=index, end=index + 1)
3580
3581
3582def space_to_batch_nd(input_x, block_size, paddings):
3583    r"""
3584    Divides a tensor's spatial dimensions into blocks and combines the block sizes with the original batch.
3585
3586    This operation will divide spatial dimensions into blocks with `block_size`,
3587    and after division, the output tensor's spatial dimension is the corresponding number of blocks.
3588    The output tensor's batch dimension is the product of the original batch and the product of `block_size`.
3589    Before division, the spatial dimensions of the input are zero padded according to paddings if necessary.
3590    Assume input shape is :math:`(n, c_1, ... c_k, w_1, ..., w_M)`, then the shape of the output tensor will be
3591    :math:`(n', c_1, ... c_k, w'_1, ..., w'_M)`, where
3592
3593    .. math::
3594        \begin{array}{ll} \\
3595            n' = n*(block\_size[0] * ... * block\_size[M]) \\
3596            w'_i = (w_i + paddings[i][0] + paddings[i][1])//block\_size[i]
3597        \end{array}
3598
3599    Args:
3600        input_x (Tensor): The input tensor. It must be a 4-D tensor on Ascend.
3601        block_size (Union[list(int), tuple(int), int]): The block size of dividing block with all value greater
3602            than 1. If `block_size` is a tuple or list, the length of `block_size` is M corresponding to the
3603            number of spatial dimensions. If `block_size` is an int, the block size of M dimensions are the same,
3604            equal to `block_size`. M must be 2 on Ascend.
3605        paddings (Union[tuple, list]): The padding values for spatial dimensions, containing M subtraction list.
3606            Each contains 2 integer values. All values must be greater than 0.
3607            `paddings[i]` specifies the paddings for the spatial dimension i,
3608            which corresponds to the input dimension i + offset.
3609            It is required that input_shape[i+offset]+paddings[i][0]+paddings[i][1] is divisible by block_size[i].
3610            M must be 2 on Ascend.
3611
3612    Returns:
3613        Tensor, the output tensor with the same data type as input.
3614
3615    Raises:
3616        ValueError: If `block_size` is not one dimensional when `block_size` is a list or tuple.
3617        ValueError: If the length of `block_size` is not 2 on Ascend.
3618        ValueError: If the element of `block_size` is not an integer larger than 1.
3619        ValueError: If shape of `paddings` is not (M, 2), where M is the length of `block_size`.
3620        ValueError: If the element of `paddings` is not an integer larger than 0.
3621        TypeError: If `block_size` is not one of list, tuple, int.
3622        TypeError: If `paddings` is neither list nor tuple.
3623
3624    Supported Platforms:
3625        ``Ascend`` ``GPU`` ``CPU``
3626
3627    Examples:
3628        >>> import numpy as np
3629        >>> import mindspore
3630        >>> from mindspore import Tensor, ops
3631        >>> block_size = [2, 2]
3632        >>> paddings = [[0, 0], [0, 0]]
3633        >>> input_x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mindspore.float32)
3634        >>> output = ops.space_to_batch_nd(input_x, block_size, paddings)
3635        >>> print(output)
3636        [[[[1.]]]
3637         [[[2.]]]
3638         [[[3.]]]
3639         [[[4.]]]]
3640    """
3641    _space_to_batch_nd = _get_cache_prim(P.SpaceToBatchND)(block_size, paddings)
3642    return _space_to_batch_nd(input_x)
3643
3644
3645def batch_to_space_nd(input_x, block_shape, crops):
3646    r"""
3647    Divides batch dimension with blocks and interleaves these blocks back into spatial dimensions.
3648
3649    This operation will divide batch dimension N into blocks with block_shape, the output tensor's N dimension
3650    is the corresponding number of blocks after division. The output tensor's :math:`w_1, ..., w_M` dimension is
3651    the product of original :math:`w_1, ..., w_M` dimension and block_shape with given amount to crop from dimension,
3652    respectively.
3653
3654    If the input shape is :math:`(n, c_1, ... c_k, w_1, ..., w_M)`, the output shape is
3655    :math:`(n', c_1, ... c_k, w'_1, ..., w'_M)`, where
3656
3657    .. math::
3658        \begin{array}{ll} \\
3659            n' = n//(block\_shape[0]*...*block\_shape[M-1]) \\
3660            w'_i = w_i*block\_shape[i-1]-crops[i-1][0]-crops[i-1][1]
3661        \end{array}
3662
3663    Args:
3664        input_x (Tensor): The input tensor. It must be greater or equal to 2-D tensor(equal to 4-D tensor on Ascend),
3665            batch dimension must be divisible by product of `block_shape`.
3666        block_shape (Union[list(int), tuple(int), int]): The block shape of dividing block with all value greater
3667            than or equal to 1. If `block_shape` is a tuple or list, the length of `block_shape` is M corresponding
3668            to the number of spatial dimensions. If `block_shape` is an int, the block size of M dimensions are the
3669            same, equal to `block_shape`. In this case of Ascend, M must be 2.
3670        crops (Union[list(int), tuple(int)]): The crops values for spatial dimensions, containing M subtraction list.
3671            Each contains 2 integer values. All values must be >= 0. crops[i] specifies the crops values for spatial
3672            dimension i, which corresponds to input dimension i + offset,where offset = N-M, and N is the number of
3673            input dimensions. It is required that
3674            :math:`input\_shape[i+offset]*block\_shape[i] > crops[i][0]+crops[i][1]`
3675
3676    Returns:
3677        Tensor, the output tensor with the same type as input.
3678
3679    Raises:
3680        TypeError: If `block_shape` is not one of list, tuple, int.
3681        TypeError: If `crops` is neither list nor tuple.
3682        ValueError: If `block_shape` is not one dimensional when `block_shape` is a list or tuple.
3683        ValueError: If the length of `block_shape` is not 2 on Ascend.
3684        ValueError: If the element of `block_shape` is not an integer larger than or euqal to 1.
3685        ValueError: If shape of `crops` is not (M, 2), where M is the length of `block_shape`.
3686        ValueError: If the element of `crops` is not an integer larger than or euqal to 0.
3687
3688    Supported Platforms:
3689        ``Ascend`` ``GPU`` ``CPU``
3690
3691    Examples:
3692        >>> import mindspore
3693        >>> import numpy as np
3694        >>> from mindspore import Tensor, ops
3695        >>> block_shape = [2, 2]
3696        >>> crops = [[0, 0], [0, 0]]
3697        >>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32)
3698        >>> output = ops.batch_to_space_nd(input_x, block_shape, crops)
3699        >>> print(output)
3700        [[[[1.  2.]
3701           [3.  4.]]]]
3702    """
3703    if isinstance(block_shape, Tensor):
3704        return batch_to_space_nd_v2_(input_x, block_shape, crops)
3705    _batch_to_space_nd = _get_cache_prim(P.BatchToSpaceND)(block_shape, crops)
3706    return _batch_to_space_nd(input_x)
3707
3708
3709def matrix_diag(x, k=0, num_rows=-1, num_cols=-1, padding_value=0, align="RIGHT_LEFT"):
3710    r"""
3711    Returns a Tensor with the contents in `x` as k[0]-th to k[1]-th diagonals of a matrix, with everything else padded
3712    with `padding_value`. `num_rows` and `num_cols` specify the dimension of the innermost matrix of the output. If both
3713    are not specified, the op assumes the innermost matrix of output Tensor is square and infers its size from `k` and
3714    the innermost dimension of `x`. If the `num_rows` and `num_cols` specify only one of them, the operator will derive
3715    the smallest legal value as the dimension of output. Moreover, when only one diagonal is given
3716    (k is an integer or k[0] == k[1]), the first to the second innermost dimension of `x` is the batch size. Otherwise,
3717    the second innermost dimension is not a part of batch size.
3718
3719    Args:
3720        x (Tensor): The diagonal Tensor.
3721        k (Union[int, Tensor], optional): Diagonal offsets. A Tensor of type int32. Positive value means superdiagonal,
3722            0 refers to the main diagonal, and negative value means subdiagonals. `k` can be a single integer
3723            (for a single diagonal) or a pair of integers specifying the low and high ends of a matrix band.
3724            k[0] must not be larger than k[1]. The value must be in the range of given or derivated `num_rows`
3725            and `num_cols`, meaning value of k must be in (-num_rows, num_cols). Default: ``0`` .
3726        num_rows (Union[int, Tensor], optional): The number of rows of the output Tensor. A Tensor of type int32 with
3727            only one value. If `num_rows` is -1, indicating that the innermost matrix of the output Tensor is a square
3728            matrix, and the real number of rows will be derivated by other inputs. That is
3729            :math:`num\_rows = x.shape[-1] - min(k[1], 0)`. Otherwise, the value must be equal or greater than
3730            :math:`x.shape[-1] - min(k[1], 0)`. Default: ``-1`` .
3731        num_cols (Union[int, Tensor], optional): The number of columns of
3732            the output Tensor. A Tensor of type int32 with only one value.
3733            If `num_cols` is -1, indicating that the innermost matrix of the output
3734            Tensor is a square matrix, and the real number of columns will be derivated by other inputs.
3735            That is :math:`num\_cols = x.shape[-1] + max(k[0], 0)`. Otherwise, the value must be equal or
3736            greater than :math:`x.shape[-1] - min(k[1], 0)`.  Default: ``-1`` .
3737        padding_value (Union[int, float, Tensor], optional): The number to fill the area outside the specified
3738            diagonal band. A Tensor with only one value. Have the same dtype as x. Default: ``0`` .
3739        align (str, optional): specifies how superdiagonals and subdiagonals should be aligned.
3740            Supported values: ``"RIGHT_LEFT"`` , ``"LEFT_RIGHT"`` , ``"LEFT_LEFT"`` , ``"RIGHT_RIGHT"`` .
3741            Default: ``"RIGHT_LEFT"`` .
3742
3743            - When set to "RIGHT_LEFT", the alignment of superdiagonals will be towards the right side
3744              (padding the row on the left), while subdiagonals will be towards the left side
3745              (padding the row on the right)
3746            - When set to "LEFT_RIGHT", the alignment of superdiagonals will be towards the left side
3747              (padding the row on the right), while subdiagonals will be towards the right side
3748              (padding the row on the left)
3749            - When set to "LEFT_LEFT", the alignment of  both superdiagonals and subdiagonals will be towards
3750              the left side(padding the row on the right).
3751            - When set to "RIGHT_RIGHT", the alignment of both superdiagonals and subdiagonals will be towards
3752              the right side(padding the row on the left).
3753
3754    Returns:
3755        A Tensor. Has the same type as `x`.
3756        Suppose `x` has r dimensions with shape :math:`(I, J, ..., M, N)` . The output Tensor has rank r + 1 with shape
3757        :math:`(I, J, ..., M, num\_rows, num\_cols)` when only one diagonal is given (k is an integer or k[0] == k[1]).
3758        Otherwise, it has rank r with shape :math:`(I, J, ..., num\_rows, num\_cols)` .
3759
3760    Raises:
3761        TypeError: If `x` is not Tensor.
3762        TypeError: If input `x` and `padding_value` are not the same dtype.
3763        TypeError: If `k`, `num_rows` or `num_cols` is not int32 dtype.
3764        ValueError: If rank of `k` is not equal to 0 or 1.
3765        ValueError: If rank of `num_rows`, `num_cols` or `padding_value` is not equal to 0.
3766        ValueError: If size of `k` is not equal to 1 or 2.
3767        ValueError: If the value of `k` is not in (-num_rows, num_cols).
3768        ValueError: If k[1] is not greater equal to k[0] when k[0] != k[1].
3769        ValueError: If rank of `x` is not greater than or is equal to 1 when k is an integer or k[0] == k[1].
3770        ValueError: If rank of `x` is not greater than or is equal to 2 when k[0] != k[1].
3771        ValueError: If x.shape[-2] is not equal to k[1] - k[0] + 1 when k[0] != k[1].
3772        ValueError: If `num_rows` and `num_cols` do not match the dimensions of `x` and the values of `k`.
3773        ValueError: If `align` is not a string or not in the valid set of values.
3774
3775    Supported Platforms:
3776        ``Ascend`` ``GPU`` ``CPU``
3777
3778    Examples:
3779        >>> import mindspore
3780        >>> import numpy as np
3781        >>> from mindspore import Tensor
3782        >>> from mindspore import ops
3783        >>> x = Tensor(np.array([[8, 9, 0],
3784        ...                      [1, 2, 3],
3785        ...                      [0, 4, 5]]), mindspore.float32)
3786        >>> k =Tensor(np.array([-1, 1]), mindspore.int32)
3787        >>> num_rows = Tensor(np.array(3), mindspore.int32)
3788        >>> num_cols = Tensor(np.array(3), mindspore.int32)
3789        >>> padding_value = Tensor(np.array(11), mindspore.float32)
3790        >>> output = ops.matrix_diag(x, k, num_rows, num_cols, padding_value, align='LEFT_RIGHT')
3791        >>> print(output)
3792        [[ 1.  8. 11.]
3793         [ 4.  2.  9.]
3794         [11.  5.  3.]]
3795        >>> print(output.shape)
3796        (3, 3)
3797    """
3798    if isinstance(k, int) and not isinstance(k, bool):
3799        k = cast_(k, mstype.int32)
3800    if isinstance(num_rows, int) and not isinstance(num_rows, bool):
3801        num_rows = cast_(num_rows, mstype.int32)
3802    if isinstance(num_cols, int) and not isinstance(num_cols, bool):
3803        num_cols = cast_(num_cols, mstype.int32)
3804    if isinstance(padding_value, (float, int)) and not isinstance(padding_value, bool):
3805        padding_value = cast_(padding_value, x.dtype)
3806    matrix_diag_v3 = _get_cache_prim(MatrixDiagV3)(align)
3807    return matrix_diag_v3(x, k, num_rows, num_cols, padding_value)
3808
3809
3810def matrix_diag_part(x, k, padding_value, align="RIGHT_LEFT"):
3811    r"""
3812    Returns the diagonal part of input tensor.
3813    Returns a tensor with the k[0]-th to k[1]-th diagonals of `x`. Some diagonals are shorter than
3814    max_diag_len and need to be padded. Input k and padding_value must be const Tensor when taking Graph mode.
3815
3816    Args:
3817        x (Tensor): The input Tensor with rank r, where r >= 2.
3818        k (Tensor): A Tensor of type int32. Diagonal offset(s). Positive value means
3819            superdiagonal, 0 refers to the main diagonal, and negative value means subdiagonals. k can be
3820            a single integer (for a single diagonal) or a pair of integers specifying the low and high ends
3821            of a matrix band. k[0] must not be larger than k[1]. The value of k has restructions, meaning
3822            value of k must be in (-x.shape[-2], x.shape[-1]).
3823        padding_value (Tensor): A Tensor with only one value. Have the same dtype as x.
3824            The number to fill the area outside the specified diagonal band.
3825        align (str, optional): An optional string from: ``"RIGHT_LEFT"`` , ``"LEFT_RIGHT"`` ,
3826            ``"LEFT_LEFT"`` , ``"RIGHT_RIGHT"`` . Align is a string specifying how superdiagonals and subdiagonals
3827            should be aligned, respectively. ``"RIGHT_LEFT"`` aligns superdiagonals to the right (left-pads the row)
3828            and subdiagonals to the left (right-pads the row). Default: ``"RIGHT_LEFT"`` . Default: ``"RIGHT_LEFT"``.
3829
3830    Returns:
3831        A Tensor. Has the same type as `x`.
3832        Assume `x` has r dimensions :math:`(I, J, ..., M, N)` . Let `max_diag_len` be the maximum length among all
3833        diagonals to be extracted, :math:`max\_diag\_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
3834        Let `num_diags` be the number of diagonals to extract, :math:`num\_diags = k[1] - k[0] + 1`.
3835        If :math:`num\_diags == 1`, the output tensor is of rank r - 1 with shape :math:`(I, J, ..., L, max\_diag\_len)`
3836        Otherwise, the output tensor has rank r with dimensions :math:`(I, J, ..., L, num\_diags, max\_diag\_len)` .
3837
3838    Raises:
3839        TypeError: If `x` is not Tensor.
3840        TypeError: If input `x` and `padding_value` are not the same dtype.
3841        TypeError: If `k` is not int32 dtype.
3842        ValueError: If `align` is not a string or not in the valid range.
3843        ValueError: If rank of `k` is not equal to 0 or 1.
3844        ValueError: If rank of `padding_value` is not equal to 0.
3845        ValueError: If rank of `x` is not greater equal to 2.
3846        ValueError: If size of `k` is not equal to 1 or 2.
3847        ValueError: If k[1] is not greater equal to k[0] in case the size of `k` is 2.
3848        ValueError: If the value of `k` is not in (-x.shape[-2], x.shape[-1]).
3849
3850    Supported Platforms:
3851        ``Ascend`` ``GPU`` ``CPU``
3852
3853    Examples:
3854        >>> import mindspore
3855        >>> import numpy as np
3856        >>> from mindspore import Tensor, ops
3857        >>> x = Tensor(np.array([[1, 2, 3, 4],
3858        ...                      [5, 6, 7, 8],
3859        ...                      [9, 8, 7, 6]]), mindspore.float32)
3860        >>> k =Tensor(np.array([1, 3]), mindspore.int32)
3861        >>> padding_value = Tensor(np.array(9), mindspore.float32)
3862        >>> output = ops.matrix_diag_part(x, k, padding_value, align='RIGHT_LEFT')
3863        >>> print(output)
3864        [[9. 9. 4.]
3865         [9. 3. 8.]
3866         [2. 7. 6.]]
3867        >>> print(output.shape)
3868        (3, 3)
3869    """
3870    matrix_diag_part_v3 = _get_cache_prim(MatrixDiagPartV3)(align)
3871    return matrix_diag_part_v3(x, k, padding_value)
3872
3873
3874def matrix_set_diag(x, diagonal, k=0, align="RIGHT_LEFT"):  # pylint: disable=redefined-outer-name
3875    r"""
3876    Returns a batched matrix tensor with new batched diagonal values.
3877    Given x and diagonal, this operation returns a tensor with the same shape and values as x, except for the specified
3878    diagonals of the innermost matrices. These will be overwritten by the values in diagonal. Some diagonals are shorter
3879    than max_diag_len and need to be padded.
3880    The diagonal :math:`shape[-2]` must be equal to num_diags calculated by :math:`k[1] - k[0] + 1`.
3881    The diagonal :math:`shape[-1]` must be
3882    equal to the longest diagonal value max_diag_len calculated
3883    by :math:`min(x.shape[-2] + min(k[1], 0), x.shape[-1] + min(-k[0], 0))`.
3884    Let x have r + 1 dimensions :math:`(I, J, ..., L, M, N)` .
3885    The diagonal tensor has rank r with shape :math:`(I, J, ..., L, max\_diag\_len)`
3886    when k is an integer or :math:`k[0] == k[1]`. Otherwise, it has rank r + 1
3887    with shape :math:`(I, J, ... L, num\_diags, max\_diag\_len)` .
3888
3889    Args:
3890        x (Tensor): Rank r + 1, where r >= 1.
3891        diagonal (Tensor): A Tensor. Have the same dtype as x. Rank r when k is an integer or :math:`k[0] == k[1]`.
3892            Otherwise, it has rank r + 1.
3893        k (Union[int, Tensor], optional): A int32 Scalar or int32 Tensor. Diagonal offset(s). Positive value means
3894            superdiagonal, 0 refers to the main diagonal, and negative value means subdiagonals. k can be a
3895            single integer (for a single diagonal) or a pair of integers specifying the low and high ends of
3896            a matrix band. k[0] must not be larger than k[1].
3897            The alue of k has restructions, meaning value of k must be in :math:`(-x.shape[-2], x.shape[-1])`.
3898            Input k must be const Tensor when taking Graph mode. Default: ``0`` .
3899        align (str, optional): An optional string from: ``"RIGHT_LEFT"`` (default), ``"LEFT_RIGHT"`` , ``"LEFT_LEFT"`` ,
3900            ``"RIGHT_RIGHT"`` . Align is a string specifying how superdiagonals and subdiagonals should be aligned,
3901            respectively. ``"RIGHT_LEFT"`` aligns superdiagonals to the right (left-pads the row) and subdiagonals
3902            to the left (right-pads the row).
3903
3904    Returns:
3905        Tensor, The same type as x. Let x has r+1 dimensions :math:`(I, J, ..., L, M, N)` .
3906        The output is a tensor of rank r+1 with dimensions :math:`(I, J, ..., L, M, N)` , the same as input x.
3907
3908    Raises:
3909        TypeError: If input `x` or `diagonal` is not Tensor.
3910        TypeError: If input `x` and `diagonal` are not the same dtype.
3911        TypeError: If `k` is not int32 dtype.
3912        ValueError: If `align` is not a string or not in the valid range.
3913        ValueError: If rank of `k` is not equal to 0 or 1.
3914        ValueError: If rank of `x` is not greater equal to 2.
3915        ValueError: If size of `k` is not equal to 1 or 2.
3916        ValueError: If k[1] is not greater equal to k[0] in case the size of `k` is 2.
3917        ValueError: If the `diagonal` rank size don't match with input `x` rank size.
3918        ValueError: If the `diagonal` shape value don't match with input `x` shape value.
3919        ValueError: If the diagonal :math:`shape[-2]` is not equal to num_diags calculated by :math:`k[1]-k[0]+1`.
3920        ValueError: If the value of `k` is not in :math:`(-x.shape[-2], x.shape[-1])`.
3921        ValueError: If the diagonal.shape[-1] is not equal to the max_diag_len calculated by
3922            :math:`min(x.shape[-2] + min(k[1],
3923            0), x.shape[-1] + min(-k[0], 0))`.
3924
3925    Supported Platforms:
3926        ``Ascend`` ``GPU`` ``CPU``
3927
3928    Examples:
3929        >>> import mindspore
3930        >>> import numpy as np
3931        >>> from mindspore import Tensor, ops
3932        >>> x = Tensor(np.array([[7, 7, 7, 7],
3933        ...                      [7, 7, 7, 7],
3934        ...                      [7, 7, 7, 7]]), mindspore.float32)
3935        >>> diagonal = Tensor(np.array([[0, 9, 1],
3936        ...                             [6, 5, 8],
3937        ...                             [1, 2, 3],
3938        ...                             [4, 5, 0]]), mindspore.float32)
3939        >>> k = Tensor(np.array([-1, 2]), mindspore.int32)
3940        >>> align = 'RIGHT_LEFT'
3941        >>> output = ops.matrix_set_diag(x, diagonal, k, align)
3942        >>> print(output)
3943        [[1. 6. 9. 7.]
3944         [4. 2. 5. 1.]
3945         [7. 5. 3. 8.]]
3946        >>> print(output.shape)
3947        (3, 4)
3948    """
3949    matrix_set_diag_v3_op = _get_cache_prim(MatrixSetDiagV3)(align)
3950    if isinstance(k, int) and not isinstance(k, bool):
3951        k = cast_(k, mstype.int32)
3952    return matrix_set_diag_v3_op(x, diagonal, k)
3953
3954
3955def meshgrid(*inputs, indexing='xy'):
3956    """
3957    Generates coordinate matrices from given coordinate tensors.
3958
3959    Given N one-dimensional coordinate tensors, returns a tuple outputs of N N-D
3960    coordinate tensors for evaluating expressions on an N-D grid.
3961
3962    Args:
3963        inputs (List[Tensor]): List of 1-D tensors.
3964            The length of inputs should be greater than 1. The data type is Number.
3965
3966    Keyword Args:
3967        indexing (str, optional): Cartesian ('xy', default) or
3968            matrix ('ij') indexing of output. Valid options: xy' or ``'ij'``. In the 2-D case with
3969            inputs of length `M` and `N`, the outputs are of shape :math:`(N, M)`
3970            for ``'xy'`` indexing and :math:`(M, N)` for ``'ij'`` indexing. In the 3-D
3971            case with inputs of length `M`, `N` and `P`, outputs are of shape
3972            :math:`(N, M, P)` for ``'xy'`` indexing and :math:`(M, N, P)` for ``'ij'`` indexing.
3973            Default: ``'xy'`` .
3974
3975    Returns:
3976        Tensors, a Tuple of N N-D Tensor objects. The data type is the same with the Inputs.
3977
3978    Raises:
3979        TypeError: If `indexing` is not a str or `inputs` is not a tuple.
3980        ValueError: If `indexing` is neither ``'xy'`` nor ``'ij'``.
3981
3982    Supported Platforms:
3983        ``Ascend`` ``GPU`` ``CPU``
3984
3985    Examples:
3986        >>> import numpy as np
3987        >>> from mindspore import Tensor
3988        >>> from mindspore import ops
3989        >>> x = Tensor(np.array([1, 2, 3, 4]).astype(np.int32))
3990        >>> y = Tensor(np.array([5, 6, 7]).astype(np.int32))
3991        >>> z = Tensor(np.array([8, 9, 0, 1, 2]).astype(np.int32))
3992        >>> output = ops.meshgrid(x, y, z, indexing='xy')
3993        >>> print(output)
3994        (Tensor(shape=[3, 4, 5], dtype=Int32, value=
3995         [[[1, 1, 1, 1, 1],
3996           [2, 2, 2, 2, 2],
3997           [3, 3, 3, 3, 3],
3998           [4, 4, 4, 4, 4]],
3999          [[1, 1, 1, 1, 1],
4000           [2, 2, 2, 2, 2],
4001           [3, 3, 3, 3, 3],
4002           [4, 4, 4, 4, 4]],
4003          [[1, 1, 1, 1, 1],
4004           [2, 2, 2, 2, 2],
4005           [3, 3, 3, 3, 3],
4006           [4, 4, 4, 4, 4]]]),
4007         Tensor(shape=[3, 4, 5], dtype=Int32, value=
4008         [[[5, 5, 5, 5, 5],
4009           [5, 5, 5, 5, 5],
4010           [5, 5, 5, 5, 5],
4011           [5, 5, 5, 5, 5]],
4012          [[6, 6, 6, 6, 6],
4013           [6, 6, 6, 6, 6],
4014           [6, 6, 6, 6, 6],
4015           [6, 6, 6, 6, 6]],
4016          [[7, 7, 7, 7, 7],
4017           [7, 7, 7, 7, 7],
4018           [7, 7, 7, 7, 7],
4019           [7, 7, 7, 7, 7]]]),
4020         Tensor(shape=[3, 4, 5], dtype=Int32, value=
4021         [[[8, 9, 0, 1, 2],
4022           [8, 9, 0, 1, 2],
4023           [8, 9, 0, 1, 2],
4024           [8, 9, 0, 1, 2]],
4025          [[8, 9, 0, 1, 2],
4026           [8, 9, 0, 1, 2],
4027           [8, 9, 0, 1, 2],
4028           [8, 9, 0, 1, 2]],
4029          [[8, 9, 0, 1, 2],
4030           [8, 9, 0, 1, 2],
4031           [8, 9, 0, 1, 2],
4032           [8, 9, 0, 1, 2]]]))
4033    """
4034    meshgrid_op = _get_cache_prim(P.Meshgrid)(indexing)
4035    return meshgrid_op(inputs)
4036
4037
4038def affine_grid(theta, size, align_corners=False):
4039    r"""
4040    Returns a 2D or 3D flow field (sampling grid) based on `theta`, a batch of affine matrices.
4041
4042    Args:
4043        theta (Tensor): The input tensor of flow field whose dtype is float16, float32.
4044            Input batch of affine matrices with shape :math:`(N, 2, 3)` for 2D grid or :math:`(N, 3, 4)` for 3D grid.
4045        size (tuple[int]): The target output image size.
4046            The value of target output with format :math:`(N, C, H, W)` for 2D grid or :math:`(N, C, D, H, W)` for 3D
4047            grid.
4048        align_corners (bool, optional): Geometrically, each pixel of input is viewed as a squqre instead of dot.
4049            If ``True`` , consider extremum -1 and 1 referring to the centers of the pixels rather than pixel corners.
4050            The default value is ``False`` , extremum -1 and 1 refer to the corners of the pixels, so that sampling is
4051            irrelevant to resolution of the image. Default: ``False`` .
4052
4053    Returns:
4054        Tensor, a tensor whose data type is same as 'theta', and the shape is :math:`(N, H, W, 2)` for 2D grid
4055        or :math:`(N, D, H, W, 3)` for 3D grid.
4056
4057    Raises:
4058        TypeError: If `theta` is not a Tensor or `size` is not a tuple.
4059        ValueError: If the shape of `theta` is not :math:`(N, 2, 3)` or :math:`(N, 3, 4)`.
4060        ValueError: If the size of `size` is not 4 or 5.
4061        ValueError: If the shape of `theta` is :math:`(N, 2, 3)`, the size of `size` is not 4;
4062                    If the shape of `theta` is :math:`(N, 3, 4)`, the size of `size` is not 5.
4063        ValueError: If the size[0] is not equal to the shape[0] of theta.
4064
4065    Supported Platforms:
4066        ``Ascend`` ``GPU`` ``CPU``
4067
4068    Examples:
4069        >>> import mindspore
4070        >>> from mindspore import Tensor
4071        >>> from mindspore import ops
4072        >>> theta = Tensor([[[0.8, 0.5, 0],[-0.5, 0.8, 0]]], mindspore.float32)
4073        >>> out_size = (1, 3, 2, 3)
4074        >>> output = ops.affine_grid(theta, out_size, False)
4075        >>> print(output)
4076        [[[[-0.78333336 -0.06666666]
4077        [-0.25       -0.4       ]
4078        [ 0.28333336 -0.73333335]]
4079        [[-0.28333336  0.73333335]
4080        [ 0.25        0.4       ]
4081        [ 0.78333336  0.06666666]]]]
4082    """
4083    affine_grid_op = AffineGrid(align_corners)
4084    return affine_grid_op(theta, size)
4085
4086
4087def unsorted_segment_min(x, segment_ids, num_segments):
4088    r"""
4089    Computes the minimum of a tensor along segments.
4090
4091    The following figure shows the calculation process of unsorted_segment_min:
4092
4093    .. image:: UnsortedSegmentMin.png
4094
4095    .. math::
4096
4097        \text { output }_i=\text{min}_{j \ldots} \text { data }[j \ldots]
4098
4099    where :math:`min` over tuples :math:`j...` such that :math:`segment\_ids[j...] == i`.
4100
4101    Note:
4102        - If the segment_id i is absent in the segment_ids, then output[i] will be filled with
4103          the maximum value of the x's type.
4104        - The `segment_ids` must be non-negative tensor.
4105
4106    Args:
4107        x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`. With float16, float32 or int32 data type.
4108        segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
4109            Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
4110        num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
4111
4112    Returns:
4113        Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
4114
4115    Raises:
4116        TypeError: If `num_segments` is not an int.
4117
4118    Supported Platforms:
4119        ``Ascend`` ``GPU`` ``CPU``
4120
4121    Examples:
4122        >>> from mindspore import Tensor
4123        >>> from mindspore import ops
4124        >>> import numpy as np
4125        >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32))
4126        >>> segment_ids = Tensor(np.array([0, 1, 1]).astype(np.int32))
4127        >>> num_segments = 2
4128        >>> output = ops.unsorted_segment_min(x, segment_ids, num_segments)
4129        >>> print(output)
4130        [[1. 2. 3.]
4131         [4. 2. 1.]]
4132    """
4133    return unsorted_segment_min_(x, segment_ids, num_segments)
4134
4135
4136def unsorted_segment_max(x, segment_ids, num_segments):
4137    r"""
4138    Computes the maximum along segments of a tensor.
4139
4140    The following figure shows the calculation process of unsorted_segment_max:
4141
4142    .. image:: UnsortedSegmentMax.png
4143
4144    .. math::
4145
4146        \text { output }_i=\text{max}_{j \ldots} \text { data }[j \ldots]
4147
4148    where :math:`max` over tuples :math:`j...` such that :math:`segment\_ids[j...] == i`.
4149
4150    Note:
4151        - If the segment_id i is absent in the segment_ids, then output[i] will be filled with
4152          the minimum value of the x's type.
4153        - The `segment_ids` must be non-negative tensor.
4154
4155    Args:
4156        x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`. With float16, float32 or int32 data type.
4157        segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
4158            Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
4159        num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
4160
4161    Returns:
4162        Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
4163
4164    Raises:
4165        TypeError: If `num_segments` is not an int.
4166
4167    Supported Platforms:
4168        ``Ascend`` ``GPU`` ``CPU``
4169
4170    Examples:
4171        >>> from mindspore import Tensor
4172        >>> from mindspore import ops
4173        >>> import numpy as np
4174        >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32))
4175        >>> segment_ids = Tensor(np.array([0, 1, 1]).astype(np.int32))
4176        >>> num_segments = 2
4177        >>> output = ops.unsorted_segment_max(x, segment_ids, num_segments)
4178        >>> print(output)
4179        [[1. 2. 3.]
4180         [4. 5. 6.]]
4181    """
4182    return unsorted_segment_max_(x, segment_ids, num_segments)
4183
4184
4185def unsorted_segment_prod(x, segment_ids, num_segments):
4186    r"""
4187    Computes the product of a tensor along segments.
4188
4189    The following figure shows the calculation process of unsorted_segment_prod:
4190
4191    .. image:: UnsortedSegmentProd.png
4192
4193    Note:
4194        - If the segment_id i is absent in the segment_ids, then output[i] will be filled with 1.
4195        - The `segment_ids` must be non-negative tensor.
4196
4197    Args:
4198        x (Tensor): The shape is :math:`(x_1, x_2, ..., x_R)`. With float16, float32 or int32 data type.
4199        segment_ids (Tensor): TThe label indicates the segment to which each element belongs.
4200            Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R. The data type must be int32.
4201        num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
4202
4203    Returns:
4204        Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
4205
4206    Raises:
4207        TypeError: If `num_segments` is not an int.
4208
4209    Supported Platforms:
4210        ``Ascend`` ``GPU`` ``CPU``
4211
4212    Examples:
4213        >>> from mindspore import Tensor
4214        >>> from mindspore import ops
4215        >>> import numpy as np
4216        >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32))
4217        >>> segment_ids = Tensor(np.array([0, 1, 0]).astype(np.int32))
4218        >>> num_segments = 2
4219        >>> output = ops.unsorted_segment_prod(x, segment_ids, num_segments)
4220        >>> print(output)
4221        [[4. 4. 3.]
4222         [4. 5. 6.]]
4223    """
4224    return unsorted_segment_prod_(x, segment_ids, num_segments)
4225
4226
4227def index_fill(x, axis, index, value):
4228    """
4229    Fills the elements under the `axis` dimension of the input Tensor `x` with the input `value`
4230    by selecting the indices in the order given in `index`.
4231
4232    Args:
4233        x (Tensor): Input Tensor.  The supported data type is Number or Bool.
4234        axis (Union[int, Tensor]): Dimension along which to fill the input Tensor. Only supports
4235            an int number or a 0-dimensional Tensor, whose data type is int32 or int64.
4236        index (Tensor): Indices of the input Tensor to fill in. The dtype must be int32.
4237        value (Union[bool, int, float, Tensor]): Value to fill the returned Tensor. If `value` is
4238            a Tensor, it must be a 0-dimensional Tensor and has the same dtype as `x`. Otherwise,
4239            the `value` will be cast to a 0-dimensional Tensor with the same data type as `x`.
4240
4241    Returns:
4242        Tensor, has the same dtype and shape as input Tensor.
4243
4244    Raises:
4245        TypeError: If `x` is not a Tensor.
4246        TypeError: If `axis` is neither int number nor Tensor.
4247        TypeError: When `axis` is a Tensor, its dtype is not int32 or int64.
4248        TypeError: If `index` is not a Tensor.
4249        TypeError: If dtype of `index` is not int32.
4250        TypeError: If `value` is not a bool, int, float, or Tensor.
4251        TypeError: When `value` is a Tensor, the dtype of `x` and `value` are not the same.
4252        ValueError: If `axis` is a Tensor and its rank is not equal to 0.
4253        ValueError: If the rank of `index` is greater than 1D.
4254        ValueError: When `value` is a Tensor and its rank is not equal to 0.
4255        RuntimeError: If the value of `axis` is out the range of `[-x.ndim, x.ndim - 1]`.
4256        RuntimeError: If the values of `index` are out the range of `[-x.shape[axis], x.shape[axis]-1]`.
4257
4258    Supported Platforms:
4259        ``Ascend`` ``GPU`` ``CPU``
4260
4261    Examples:
4262        >>> import mindspore
4263        >>> import numpy as np
4264        >>> from mindspore import ops
4265        >>> from mindspore import Tensor
4266        >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32))
4267        >>> index = Tensor([0, 2], mindspore.int32)
4268        >>> value = Tensor(-2.0, mindspore.float32)
4269        >>> y = ops.index_fill(x, 1, index, value)
4270        >>> print(y)
4271        [[-2. 2. -2.]
4272         [-2. 5. -2.]
4273         [-2. 8. -2.]]
4274    """
4275    if isinstance(axis, int) and not isinstance(axis, bool):
4276        axis = cast_(axis, mstype.int32)
4277    if isinstance(value, (bool, float, int)):
4278        value = cast_(value, x.dtype)
4279    return index_fill_(x, axis, index, value)
4280
4281
4282@constexpr
4283def _check_check_axis_in_range(axis, ndim):
4284    """Checks axes are with the bounds of ndim"""
4285    axis = validator.check_axis_in_range(axis, ndim)
4286    return axis
4287
4288
4289def index_select(input, axis, index):
4290    """
4291    Generates a new Tensor that accesses the values of `input` along the specified `axis` dimension
4292    using the indices specified in `index`. The new Tensor has the same number of dimensions as `input`,
4293    with the size of the `axis` dimension being equal to the length of `index`, and the size of all other
4294    dimensions will be unchanged from the original `input` Tensor.
4295
4296    .. note::
4297        The value of index must be in the range of `[0, input.shape[axis])`, the result is undefined out of range.
4298
4299    Args:
4300        input (Tensor): The input Tensor.
4301        axis (int): The dimension to be indexed.
4302        index (Tensor): A 1-D Tensor with the indices to access in `input` along the specified axis.
4303
4304    Returns:
4305        Tensor, has the same dtype as input Tensor.
4306
4307    Raises:
4308        TypeError: If `input` or `index` is not a Tensor.
4309        TypeError: If `axis` is not int number.
4310        ValueError: If the value of `axis` is out the range of `[-input.ndim, input.ndim - 1]`.
4311        ValueError: If the dimension of `index` is not equal to 1.
4312
4313    Supported Platforms:
4314        ``Ascend`` ``GPU`` ``CPU``
4315
4316    Examples:
4317        >>> import mindspore
4318        >>> from mindspore import Tensor, ops
4319        >>> import numpy as np
4320        >>> input = Tensor(np.arange(16).astype(np.float32).reshape(2, 2, 4))
4321        >>> print(input)
4322        [[[ 0.  1.  2.  3.]
4323          [ 4.  5.  6.  7.]]
4324         [[ 8.  9. 10. 11.]
4325          [12. 13. 14. 15.]]]
4326        >>> index = Tensor([0,], mindspore.int32)
4327        >>> y = ops.index_select(input, 1, index)
4328        >>> print(y)
4329        [[[ 0.  1.  2.  3.]]
4330         [[ 8.  9. 10. 11.]]]
4331    """
4332    if not (isinstance(input, Tensor) and isinstance(index, Tensor)):
4333        raise TypeError(f"For 'index_select', `input` and `index` must be all tensors.")
4334    if index.ndim != 1:
4335        raise ValueError(f"For 'index_select', the dimension of `index` must be 1, but got {index.ndim}")
4336    axis = _check_check_axis_in_range(axis, input.ndim)
4337    return gather_(input, index, axis)
4338
4339
4340def population_count(input_x):
4341    r"""
4342    Computes element-wise population count(a.k.a bitsum, bitcount).
4343    For each entry in `input_x`, calculates the number of 1 bits in the binary representation of that entry.
4344
4345    Args:
4346        input_x (Tensor): Tensor of any dimension. The data type must be int16 or uint16 (Ascend).
4347            The data type must be int8, int16, int32, int64, uint8, uint16, uint32, uint64 (CPU and GPU).
4348
4349    Returns:
4350        Tensor, with the same shape as the input, and the data type is uint8.
4351
4352    Raises:
4353        TypeError: If `input_x` is not a Tensor.
4354        TypeError: If dtype of `input_x` is not int16, uint16 (Ascend).
4355        TypeError: If dtype of `input_x` is not int8, int16, int32, int64, uint8, uint16, uint32, uint64 (CPU and GPU).
4356
4357    Supported Platforms:
4358        ``Ascend`` ``GPU`` ``CPU``
4359
4360    Examples:
4361        >>> import mindspore
4362        >>> from mindspore import Tensor, ops
4363        >>> input_x = Tensor([0, 1, 3], mindspore.int16)
4364        >>> output = ops.population_count(input_x)
4365        >>> print(output)
4366        [0 1 2]
4367    """
4368    return population_count_(input_x)
4369
4370
4371##############################
4372# Type Conversion Functions.
4373##############################
4374
4375
4376def is_tensor(obj):
4377    r"""
4378    Check whether the input object is a :class:`mindspore.Tensor` .
4379
4380    Args:
4381        obj (Object): input object.
4382
4383    Returns:
4384        Bool. Return True if `obj` is a Tensor, otherwise, return False.
4385
4386    Supported Platforms:
4387        ``Ascend`` ``GPU`` ``CPU``
4388
4389    Examples:
4390        >>> from mindspore import Tensor, ops
4391        >>> a = Tensor([1.9, 2.2, 3.1])
4392        >>> ops.is_tensor(a)
4393        True
4394    """
4395    return isinstance(obj, Tensor)
4396
4397
4398def is_nonzero(input):
4399    """
4400    Determine whether the input Tensor contains 0 or False. The input can only be a single element.
4401
4402    Args:
4403        input (Tensor): The input tensor.
4404
4405    Returns:
4406        Bool, returns False if the input Tensor contains a unit element of 0 or a single element of False,
4407        otherwise returns True.
4408
4409    Raises:
4410        TypeError: If `input` is not Tensor.
4411        ValueError: If the element number of `input` is not equal to 1.
4412
4413    Supported Platforms:
4414        ``Ascend`` ``GPU`` ``CPU``
4415
4416    Examples:
4417        >>> from mindspore import Tensor, ops
4418        >>> x1 = Tensor([[[False]]])
4419        >>> x2 = Tensor([[3.5]])
4420        >>> out1 = ops.is_nonzero(x1)
4421        >>> print(out1)
4422        False
4423        >>> out2 = ops.is_nonzero(x2)
4424        >>> print(out2)
4425        True
4426    """
4427    if not isinstance(input, Tensor):
4428        raise TypeError(f'For is_nonzero, the input must be a Tensor, but got {type(input)}.')
4429    if input.numel() != 1:
4430        raise ValueError(f"For is_nonzero, the numel of input must be 1, but got {input.numel()}.")
4431    out = ops.squeeze(input)
4432    return bool(out)
4433
4434
4435def tensor_scatter_mul(input_x, indices, updates):
4436    r"""
4437    Creates a new tensor by multiplying the values from the positions in `input_x` indicated by
4438    `indices`, with values from `updates`. When divided values are provided for the same
4439    index, the result of the update will multiply these values respectively. Except that
4440    the updates are applied on output `Tensor` instead of input `Parameter`.
4441
4442    The last axis of `indices` is the depth of each index vectors. For each index vector,
4443    there must be a corresponding value in `updates`. The shape of `updates` should be
4444    equal to the shape of `input_x[indices]`. For more details, see Examples.
4445
4446    .. math::
4447        output\left [indices  \right ] = input\_x\times  update
4448
4449    Note:
4450        - If some values of the `indices` are out of bound, instead of raising an index error,
4451          the corresponding `updates` will not be updated to `input_x`.
4452
4453    Args:
4454        input_x (Tensor): The input tensor. The dimension of `input_x` must be no less than indices.shape[-1].
4455        indices (Tensor): The index of input tensor whose data type is int32 or int64. The rank must be at least 2.
4456        updates (Tensor): The tensor to update the input tensor, has the same type as `input_x`,
4457            and the shape of `updates` should be equal to
4458            :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
4459
4460    Returns:
4461        Tensor, has the same shape and type as `input_x`.
4462
4463    Raises:
4464        TypeError: If dtype of `indices` is neither int32 nor int64.
4465        ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.
4466        RuntimeError: If a value of `indices` is not in `input_x` on CPU backend.
4467
4468    Supported Platforms:
4469        ``GPU`` ``CPU``
4470
4471    Examples:
4472        >>> import mindspore
4473        >>> import numpy as np
4474        >>> from mindspore import Tensor, ops
4475        >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
4476        >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)
4477        >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)
4478        >>> # Next, demonstrate the approximate operation process of this operator:
4479        >>> # 1, indices[0] = [0, 0], indices[1] = [0, 0]
4480        >>> # 2, And input_x[0, 0] = -0.1
4481        >>> # 3, So input_x[indices] = [-0.1, -0.1]
4482        >>> # 4, Satisfy the above formula: input_x[indices].shape=(2) == updates.shape=(2)
4483        >>> # 5, Perform the multiply operation for the first time:
4484        >>> #      first_input_x = input_x[0][0] * updates[0] = [[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]
4485        >>> # 6, Perform the multiply operation for the second time:
4486        >>> #      second_input_x = input_x[0][0] * updates[1] = [[-0.22, 0.3, 3.6], [0.4, 0.5, -3.2]]
4487        >>> output = ops.tensor_scatter_mul(input_x, indices, updates)
4488        >>> print(output)
4489        [[-0.22  0.3   3.6  ]
4490         [ 0.4   0.5   -3.2 ]]
4491    """
4492    return tensor_scatter_mul_(input_x, indices, updates)
4493
4494
4495def tensor_scatter_div(input_x, indices, updates):
4496    r"""
4497    Creates a new tensor by dividing the values from the positions in `input_x` indicated by
4498    `indices`, with values from `updates`. When divided values are provided for the same
4499    index, the result of the update will be to divided these values respectively. Except that
4500    the updates are applied on output `Tensor` instead of input `Parameter`.
4501
4502    The last axis of `indices` is the depth of each index vectors. For each index vector,
4503    there must be a corresponding value in `updates`. The shape of `updates` should be
4504    equal to the shape of `input_x[indices]`. For more details, see Examples.
4505
4506    .. math::
4507        output\left [indices  \right ] = input\_x \div update
4508
4509    Note:
4510        - On GPU, if some values of the `indices` are out of bound, instead of raising an index error,
4511          the corresponding `updates` will not be updated to self tensor.
4512        - On CPU, if some values of the `indices` are out of bound, raising an index error.
4513        - On Ascend, out of bound checking is not supported, if some values of the `indices` are out of bound,
4514          unknown errors may be caused.
4515        - The operator can't handle division by 0 exceptions, so the user needs to make sure
4516          there is no 0 value in `updates`.
4517
4518    Args:
4519        input_x (Tensor): The input tensor. The dimension of input_x must be no less than indices.shape[-1].
4520        indices (Tensor): The index of input tensor whose data type is int32 or int64.
4521            The rank must be at least 2.
4522        updates (Tensor): The tensor to update the `input_x` tensor, has the same type as `input_x`.
4523            And the shape of `updates` should be
4524            equal to :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
4525
4526    Returns:
4527        Tensor, has the same shape and type as `input_x`.
4528
4529    Raises:
4530        TypeError: If dtype of `indices` is neither int32 nor int64.
4531        ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.
4532        RuntimeError: If a value of `indices` is not in `input_x`.
4533
4534    Supported Platforms:
4535        ``GPU`` ``CPU``
4536
4537    Examples:
4538        >>> import numpy as np
4539        >>> import mindspore
4540        >>> from mindspore import Tensor, nn, ops
4541        >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
4542        >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)
4543        >>> updates = Tensor(np.array([1.0, 2.0]), mindspore.float32)
4544        >>> output = ops.tensor_scatter_div(input_x, indices, updates)
4545        >>> print(output)
4546        [[-0.05  0.3  3.6  ]
4547         [ 0.4   0.5  -3.2 ]]
4548    """
4549    return tensor_scatter_div_(input_x, indices, updates)
4550
4551
4552def scalar_to_array(input_x):
4553    """
4554    The  interface is deprecated. Please use the :func:`mindspore.ops.scalar_to_tensor` instead.
4555    """
4556    return P.ScalarToArray()(input_x)
4557
4558
4559def scalar_to_tensor(input_x, dtype=mstype.float32):
4560    """
4561    Converts a scalar to a `Tensor`, and converts the data type to the specified type.
4562
4563    Args:
4564        input_x (Union[bool, int, float]): The input is a scalar. Only constant value is allowed.
4565        dtype (mindspore.dtype): The target data type. Only constant value is allowed. Default: ``mstype.float32``.
4566
4567    Returns:
4568        Tensor. 0-D Tensor and the content is the input.
4569
4570    Raises:
4571        TypeError: If `input_x` is neither bool nor int nor float.
4572
4573    Supported Platforms:
4574        ``Ascend`` ``GPU`` ``CPU``
4575
4576    Examples:
4577        >>> import mindspore
4578        >>> from mindspore import ops
4579        >>> data = 1
4580        >>> output = ops.scalar_to_tensor(data, mindspore.float32)
4581        >>> print(output)
4582        1.0
4583    """
4584    return scalar_to_tensor_(input_x, dtype)
4585
4586
4587def tuple_to_array(input_x):
4588    """
4589    Converts a tuple to a tensor.
4590
4591    If the type of the first number in the tuple is integer, the data type of the output tensor is int.
4592    Otherwise, the data type of the output tensor is float.
4593
4594    Args:
4595        input_x (tuple): A tuple of numbers. These numbers have the same type. Only constant value is allowed.
4596            The shape is :math:`(N,*)` where :math:`*` means any number of additional dimensions.
4597
4598    Returns:
4599        Tensor, if the input tuple contains `N` numbers, then the shape of the output tensor is (N,).
4600
4601    Raises:
4602        TypeError: If `input_x` is not a tuple.
4603        ValueError: If length of `input_x` is less than or equal to 0.
4604
4605    Supported Platforms:
4606        ``Ascend`` ``GPU`` ``CPU``
4607
4608    Examples:
4609        >>> input_x = (1,2,3)
4610        >>> print(type(input_x))
4611        <class 'tuple'>
4612        >>> output = ops.tuple_to_array(input_x)
4613        >>> print(type(output))
4614        <class 'mindspore.common.tensor.Tensor'>
4615        >>> print(output)
4616        [1 2 3]
4617    """
4618    if isinstance(input_x[0], int):
4619        dtype = mstype.int32
4620    else:
4621        dtype = mstype.float32
4622    return tuple_to_tensor_(input_x, dtype)
4623
4624
4625def masked_select(input, mask):
4626    """
4627    Returns a new 1-D Tensor which indexes the `x` tensor according to the boolean `mask`.
4628    The shapes of the `mask` tensor and the `x` tensor don't need to match, but they must be broadcastable.
4629
4630    Args:
4631        input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
4632        mask (Tensor[bool]): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
4633
4634    Returns:
4635        A 1-D Tensor, with the same type as `input`.
4636
4637    Raises:
4638        TypeError: If `input` or `mask` is not a Tensor.
4639        TypeError: If dtype of `mask` is not bool.
4640
4641    Supported Platforms:
4642        ``Ascend`` ``GPU`` ``CPU``
4643
4644    Examples:
4645        >>> import numpy as np
4646        >>> import mindspore
4647        >>> from mindspore import Tensor, ops
4648        >>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)
4649        >>> mask = Tensor(np.array([1, 0, 1, 0]), mindspore.bool_)
4650        >>> output = ops.masked_select(x, mask)
4651        >>> print(output)
4652        [1 3]
4653    """
4654    return masked_select_(input, mask)
4655
4656
4657def diagflat(input, offset=0):
4658    r"""
4659    Create a 2-D Tensor which diagonal is the flattened `input` .
4660
4661    Args:
4662        input (Tensor): Input Tensor, which is flattened and set as the diagonal of the output.
4663        offset (int, optional): `offset` controls which diagonal to choose. Default: ``0`` .
4664
4665            - When `offset` is zero, the diagonal chosen is the main diagonal.
4666            - When `offset` is a positive integer, the diagonal chosen is up the main diagonal.
4667            - When `offset` is a negative integer, the diagonal chosen is down the main diagonal.
4668
4669    Returns:
4670        The 2-D Tensor, whose diagonal is the flattened `input`.
4671
4672    Raises:
4673        TypeError: If `input` is not a tensor.
4674        TypeError: If `offset` is not an integer.
4675
4676    Supported Platforms:
4677        ``Ascend`` ``GPU`` ``CPU``
4678
4679    Examples:
4680        >>> import mindspore
4681        >>> from mindspore import Tensor, ops
4682        >>> x = Tensor([1, 2], mindspore.float32)
4683        >>> output = ops.diagflat(x, 1)
4684        >>> print(output)
4685        [[0. 1. 0.]
4686         [0. 0. 2.]
4687         [0. 0. 0.]]
4688    """
4689    if not isinstance(input, Tensor):
4690        raise TypeError(f"For diagflat, the input x must be tensor, but got {type(input)}")
4691    if not isinstance(offset, int):
4692        raise TypeError(f"For diagflat, the offset must be int, but got {type(offset)}")
4693    offset_abs = abs(offset)
4694    if input.size == 0:
4695        return zeros((offset_abs, offset_abs), input.dtype)
4696    input = input.ravel()
4697    res = diag(input)
4698    if offset != 0:
4699        pad_y = zeros((input.size + offset_abs, offset_abs), input.dtype)
4700        pad_x = zeros((offset_abs, input.size), input.dtype)
4701        if offset < 0:
4702            res = cat((pad_x, res), axis=0)
4703            res = cat((res, pad_y), axis=1)
4704        else:
4705            res = cat((res, pad_x), axis=0)
4706            res = cat((pad_y, res), axis=1)
4707    return res
4708
4709
4710def col2im(input_x, output_size, kernel_size, dilation, padding_value, stride):
4711    """
4712    Combines an array of sliding local blocks into a large containing tensor.
4713
4714    Args:
4715        input_x (Tensor): 4D tensor with data type float16 or float32.
4716        output_size (Tensor): 1D tensor with 2 elements of data type int.
4717        kernel_size (Union[int, tuple[int], list[int]]): The size of the kernel, should be two int
4718            for height and width. If type is int, it means that height equal with width. Must be specified.
4719        dilation (Union[int, tuple[int], list[int]]): The size of the dilation, should be two int
4720            for height and width. If type is int, it means that height equal with width.
4721        padding_value (Union[int, tuple[int], list[int]]): The size of the padding, should be two int
4722            for height and width. If type is int, it means that height equal with width.
4723        stride (Union[int, tuple[int], list[int]]): The size of the stride, should be two int
4724            for height and width. If type is int, it means that height equal with width.
4725
4726    Returns:
4727        A 4D Tensor, with same type as 'input_x'.
4728
4729    Raises:
4730        TypeError: If :attr:`kernel_size`, `dilation`, `padding_value`, `stride` data type is not in
4731            Union[int, tuple[int], list[int]].
4732        ValueError: If :attr:`kernel_size`, `dilation`, `padding_value`, `stride` value is not
4733            greater than zero or elements number more than 2.
4734        ValueError: If :attr:`padding_value` value is less than zero or elements number more than 2.
4735        ValueError: If input_x.shape[2] != kernel_size[0] * kernel_size[1].
4736        ValueError: If input_x.shape[3] does not match the calculated number of sliding blocks.
4737
4738    Supported Platforms:
4739        ``Ascend`` ``GPU`` ``CPU``
4740
4741    Examples:
4742        >>> import numpy as np
4743        >>> from mindspore import Tensor, ops
4744        >>> from mindspore import dtype as mstype
4745        >>> x = Tensor(input_data=np.random.rand(16, 16, 4, 25), dtype=mstype.float32)
4746        >>> output_size = Tensor(input_data=[8, 8], dtype=mstype.int32)
4747        >>> output = ops.col2im(x, output_size, [2, 2], [2, 2], [2, 2], [2, 2])
4748        >>> print(output.shape)
4749        (16, 16, 8, 8)
4750    """
4751    c2i = _get_cache_prim(Col2Im)(kernel_size, dilation, padding_value, stride)
4752    return c2i(input_x, output_size)
4753
4754
4755def _split_int(x, split_size_or_sections, axis):
4756    """
4757    Splits the input tensor `x` into multiple sub-tensors along the axis according to the given `split_size_or_sections`
4758    with int type.
4759    """
4760    arr_shape = x.shape
4761    length_along_dim = arr_shape[axis]
4762    if split_size_or_sections > length_along_dim:
4763        res = _get_cache_prim(P.Split)(axis, 1)(x)
4764    elif length_along_dim % split_size_or_sections == 0:
4765        sections = length_along_dim // split_size_or_sections
4766        res = _get_cache_prim(P.Split)(axis, sections)(x)
4767    else:
4768        num_sections = length_along_dim // split_size_or_sections
4769        length1 = num_sections * split_size_or_sections
4770        length2 = length_along_dim - length1
4771        start1 = _list_comprehensions(rank_(x), 0, True)
4772        size1 = _tuple_setitem(arr_shape, axis, length1)
4773        start2 = _tuple_setitem(start1, axis, length1)
4774        size2 = _tuple_setitem(arr_shape, axis, length2)
4775        res = _get_cache_prim(P.Split)(axis, num_sections)(tensor_slice(x, start1, size1)) + \
4776              _get_cache_prim(P.Split)(axis, 1)(tensor_slice(x, start2, size2))
4777    return res
4778
4779
4780def _split_sub_tensors(x, split_size_or_sections, axis):
4781    """
4782    Splits the input tensor `x` into multiple sub-tensors along the axis according to the given `split_size_or_sections`
4783    with type of tuple or list.
4784    """
4785    new_indices = [0]
4786    for i, split_size in enumerate(split_size_or_sections):
4787        new_indices.append(new_indices[i] + split_size)
4788    new_indices = new_indices[1:]
4789    sub_tensors = []
4790    strides = _list_comprehensions(x.ndim, 1, True)
4791    begin = _list_comprehensions(x.ndim, 0)
4792    end = _list_comprehensions(x.shape)
4793    for i in ms_arrange(len(new_indices)):
4794        idx = new_indices[i]
4795        begin[axis] = 0 if i == 0 else new_indices[i - 1]
4796        end[axis] = idx
4797        sliced_tensor = strided_slice(x, tuple(begin), tuple(end), strides)
4798        sub_tensors.append(sliced_tensor)
4799    return sub_tensors
4800
4801def split(tensor, split_size_or_sections, axis=0):
4802    """
4803    Splits the Tensor into chunks along the given axis.
4804
4805    Args:
4806        tensor (Tensor): A Tensor to be divided.
4807        split_size_or_sections (Union[int, tuple(int), list(int)]):
4808            If `split_size_or_sections` is an int type, `tensor` will be split into equally sized chunks,
4809            each chunk with size `split_size_or_sections`. Last chunk will be smaller than `split_size_or_sections`
4810            if `tensor.shape[axis]` is not divisible by `split_size_or_sections`.
4811            If `split_size_or_sections` is a list type, then `tensor` will be split into len(split_size_or_sections)
4812            chunks with sizes `split_size_or_sections` along the given `axis`.
4813        axis (int): The axis along which to split. Default: ``0`` .
4814
4815    Returns:
4816        A tuple of sub-tensors.
4817
4818    Raises:
4819        TypeError: If argument `tensor` is not Tensor.
4820        TypeError: If argument `axis` is not Tensor.
4821        ValueError: If argument `axis` is out of range of :math:`[-tensor.ndim, tensor.ndim)` .
4822        TypeError: If each element in `split_size_or_sections` is not integer.
4823        TypeError: If argument `split_size_or_sections` is not int, tuple(int) or list(int).
4824        ValueError: The sum of `split_size_or_sections` is not equal to x.shape[axis].
4825
4826    Supported Platforms:
4827        ``Ascend`` ``GPU`` ``CPU``
4828
4829    Examples:
4830        >>> import numpy as np
4831        >>> from mindspore import ops, Tensor
4832        >>> input_x = np.arange(9).astype("float32")
4833        >>> output = ops.split(Tensor(input_x), 3)
4834        >>> print(output)
4835        (Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00,  1.00000000e+00,  2.00000000e+00]),
4836         Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00,  4.00000000e+00,  5.00000000e+00]),
4837         Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00,  7.00000000e+00,  8.00000000e+00]))
4838    """
4839    if not isinstance(tensor, Tensor):
4840        raise TypeError(f'expect `tensor` is a Tensor, but got {type(tensor)}')
4841    if type(axis) is not int:
4842        raise TypeError(f"Type of Argument `axis` should be integer but got {type(axis)}")
4843    arr_axis = _canonicalize_axis(axis, tensor.ndim)
4844
4845    if type(split_size_or_sections) is int:
4846        if split_size_or_sections > 0:
4847            res = _split_int(tensor, split_size_or_sections, arr_axis)
4848        else:
4849            raise ValueError(f"For split, the value of 'split_size_or_sections' must be more than zero, "
4850                             f"but got {split_size_or_sections}.")
4851    elif isinstance(split_size_or_sections, (list, tuple)):
4852        for item in split_size_or_sections:
4853            if type(item) is not int:
4854                raise TypeError(f"Each element in 'split_size_or_sections' should be integer, but got {type(item)}.")
4855            if item < 0:
4856                raise TypeError(f"Each element in 'split_size_or_sections' should be non-negative, "
4857                                f"but got {split_size_or_sections}.")
4858
4859        if sum(split_size_or_sections) != tensor.shape[arr_axis]:
4860            raise ValueError(f"The sum of 'split_size_or_sections' should be equal to {tensor.shape[arr_axis]}, "
4861                             f"but got {sum(split_size_or_sections)}.")
4862        res = _split_sub_tensors(tensor, split_size_or_sections, arr_axis)
4863    else:
4864        raise TypeError(f"Type of Argument `split_size_or_sections` should be integer, tuple(int) or list(int), " \
4865                        f"but got {type(split_size_or_sections)}")
4866    return tuple(res)
4867
4868def split_ext(tensor, split_size_or_sections, axis=0):
4869    """
4870    Splits the Tensor into chunks along the given axis.
4871
4872    Args:
4873        tensor (Tensor): A Tensor to be divided.
4874        split_size_or_sections (Union[int, tuple(int), list(int)]):
4875            If `split_size_or_sections` is an int type, `tensor` will be split into equally sized chunks,
4876            each chunk with size `split_size_or_sections`. Last chunk will be smaller than `split_size_or_sections`
4877            if `tensor.shape[axis]` is not divisible by `split_size_or_sections`.
4878            If `split_size_or_sections` is a list type, then `tensor` will be split into len(split_size_or_sections)
4879            chunks with sizes `split_size_or_sections` along the given `axis`.
4880        axis (int): The axis along which to split. Default: ``0`` .
4881
4882    Returns:
4883        A tuple of sub-tensors.
4884
4885    Raises:
4886        TypeError: If argument `tensor` is not Tensor.
4887        TypeError: If argument `axis` is not int.
4888        ValueError: If argument `axis` is out of range of :[-tensor.ndim, tensor.ndim).
4889        TypeError: If each element in `split_size_or_sections` is not integer.
4890        TypeError: If argument `split_size_or_sections` is not int, tuple(int) or list(int).
4891        ValueError: The sum of `split_size_or_sections` is not equal to x.shape[axis].
4892
4893    Supported Platforms:
4894        ``Ascend``
4895
4896    Examples:
4897        >>> import numpy as np
4898        >>> from mindspore import ops, Tensor
4899        >>> input_x = np.arange(9).astype("float32")
4900        >>> output = ops.split_ext(Tensor(input_x), 3)
4901        >>> print(output)
4902        (Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00,  1.00000000e+00,  2.00000000e+00]),
4903         Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00,  4.00000000e+00,  5.00000000e+00]),
4904         Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00,  7.00000000e+00,  8.00000000e+00]))
4905    """
4906    if isinstance(split_size_or_sections, int):
4907        res = split_tensor(tensor, split_size_or_sections, axis)
4908    elif isinstance(split_size_or_sections, (list, tuple)):
4909        res = split_with_size(tensor, split_size_or_sections, axis)
4910    else:
4911        raise TypeError(f"Type of Argument `split_size_or_sections` should be integer, tuple(int) or list(int), " \
4912                        f"but got {type(split_size_or_sections)}")
4913    return res
4914
4915
4916def tril(input, diagonal=0):  # pylint: disable=redefined-outer-name
4917    """
4918    Returns the lower triangle part of 'input' (elements that contain the diagonal and below),
4919    and set the other elements to zeros.
4920
4921    Args:
4922        input (Tensor): A Tensor with shape :math:`(x_1, x_2, ..., x_R)`. The rank must be at least 2.
4923          Supporting all number types including bool.
4924        diagonal (int, optional): An optional attribute indicates the diagonal to consider, default: 0,
4925            indicating the main diagonal.
4926
4927    Returns:
4928        Tensor, the same shape and data type as the input `x`.
4929
4930    Raises:
4931        TypeError: If `x` is not a Tensor.
4932        TypeError: If `diagonal` is not an int.
4933        TypeError: If the type of `x` is neither number nor bool.
4934        ValueError: If the rank of `x` is less than 2.
4935
4936    Supported Platforms:
4937        ``Ascend`` ``GPU`` ``CPU``
4938
4939    Examples:
4940        >>> import numpy as np
4941        >>> from mindspore import Tensor, ops
4942        >>> x = Tensor(np.array([[ 1,  2,  3,  4],
4943        ...                      [ 5,  6,  7,  8],
4944        ...                      [10, 11, 12, 13],
4945        ...                      [14, 15, 16, 17]]))
4946        >>> result = ops.tril(x)
4947        >>> print(result)
4948        [[ 1  0  0  0]
4949         [ 5  6  0  0]
4950         [10 11 12  0]
4951         [14 15 16 17]]
4952        >>> x = Tensor(np.array([[ 1,  2,  3,  4],
4953        ...                      [ 5,  6,  7,  8],
4954        ...                      [10, 11, 12, 13],
4955        ...                      [14, 15, 16, 17]]))
4956        >>> result = ops.tril(x, diagonal=1)
4957        >>> print(result)
4958        [[ 1  2  0  0]
4959         [ 5  6  7  0]
4960         [10 11 12 13]
4961         [14 15 16 17]]
4962        >>> x = Tensor(np.array([[ 1,  2,  3,  4],
4963        ...                      [ 5,  6,  7,  8],
4964        ...                      [10, 11, 12, 13],
4965        ...                      [14, 15, 16, 17]]))
4966        >>> result = ops.tril(x, diagonal=-1)
4967        >>> print(result)
4968        [[ 0  0  0  0]
4969         [ 5  0  0  0]
4970         [10 11  0  0]
4971         [14 15 16  0]]
4972    """
4973    tril_ = Tril(diagonal)
4974    return tril_(input)
4975
4976
4977@_primexpr
4978def _canonicalize_axis(axis, ndim):
4979    """
4980    Check axes are within the number of dimensions of tensor x and normalize the negative axes.
4981
4982    Args:
4983        axis (Union[int, tuple(int), list(int)]): Axes of the tensor.
4984        ndim (int): The number of dimensions of the tensor.
4985
4986    Return:
4987        Axis (Union[int, tuple(int)]). If input is integer, return integer, else tuple.
4988    """
4989    if isinstance(axis, int):
4990        axis = [axis]
4991    for ax in axis:
4992        if not isinstance(ax, int):
4993            raise TypeError(f'axis should be integers, not {type(ax)}')
4994        if not -ndim <= ax < ndim:
4995            raise ValueError(f'axis {ax} is out of bounds for array of dimension {ndim}')
4996
4997    def canonicalizer(ax):
4998        return ax + ndim if ax < 0 else ax
4999
5000    axis = tuple([canonicalizer(ax) for ax in axis])
5001    if all(axis.count(el) <= 1 for el in axis):
5002        return tuple(sorted(axis)) if len(axis) > 1 else axis[0]
5003    raise ValueError(f"duplicate axis in {axis}.")
5004
5005
5006@_primexpr
5007def _list_comprehensions(obj, item=None, return_tuple=False):
5008    """
5009    Generates a new list or tuple by list comprehension.
5010
5011    Args:
5012        obj (Union[int, list, tuple]):
5013            If integer, it will be the length of the returned tuple/list.
5014        item: The value to be filled. Default: ``None`` .
5015            If ``None`` , the values in the new list/tuple are the same as obj
5016            or range(obj) when obj is integer.
5017        return_tuple(bool): If ``true`` , returns tuple, else returns list.
5018
5019    Returns:
5020        List or tuple.
5021    """
5022    lst = obj
5023    if isinstance(obj, int):
5024        lst = []
5025        for i in ms_arrange(obj):
5026            lst.append(i)
5027    if item is None:
5028        res = list(lst)
5029    else:
5030        res = [item for _ in lst]
5031    if return_tuple:
5032        return tuple(res)
5033    return res
5034
5035
5036@_primexpr
5037def _tuple_setitem(tup, idx, value):
5038    """
5039    Returns a tuple with specified `idx` set to `value`.
5040    """
5041    tup = list(tup)
5042    tup[idx] = value
5043    return tuple(tup)
5044
5045
5046def _tensor_split_sub_tensors(x, indices_or_sections, axis):
5047    """
5048    Splits the input tensor `x` into multiple sub-tensors along the axis according to the given `indices_or_sections`
5049    with type of tuple or list.
5050    """
5051    length_along_dim = x.shape[axis]
5052    indices_or_sections = tuple(indices_or_sections)
5053    indices_or_sections += (length_along_dim,)
5054
5055    sub_tensors = []
5056    strides = _list_comprehensions(x.ndim, 1, True)
5057    begin = _list_comprehensions(x.ndim, 0)
5058    end = _list_comprehensions(x.shape)
5059    for i in ms_arrange(len(indices_or_sections)):
5060        idx = indices_or_sections[i]
5061        begin[axis] = 0 if i == 0 else indices_or_sections[i - 1]
5062        end[axis] = idx
5063        sliced_tensor = strided_slice(x, tuple(begin), tuple(end), strides)
5064        sub_tensors.append(sliced_tensor)
5065    return tuple(sub_tensors)
5066
5067
5068def _tensor_split_sub_int(x, indices_or_sections, axis):
5069    """
5070    Splits the input tensor `x` into multiple sub-tensors along the axis according to the given `indices_or_sections`
5071    with type if int.
5072    """
5073    arr_shape = x.shape
5074    length_along_dim = arr_shape[axis]
5075    if indices_or_sections > length_along_dim:
5076        res = _get_cache_prim(P.Split)(axis, length_along_dim)(x)
5077        indices_or_sections_n = [length_along_dim, length_along_dim + 1]
5078        res2 = _tensor_split_sub_tensors(x, indices_or_sections_n, axis)
5079        for _ in np.arange(length_along_dim, indices_or_sections):
5080            res += tuple(res2)[1:]
5081    elif length_along_dim % indices_or_sections == 0:
5082        res = _get_cache_prim(P.Split)(axis, indices_or_sections)(x)
5083    else:
5084        num_long_tensor = length_along_dim % indices_or_sections
5085        num_short_tensor = indices_or_sections - num_long_tensor
5086        length1 = num_long_tensor * (length_along_dim // indices_or_sections + 1)
5087        length2 = length_along_dim - length1
5088        start1 = _list_comprehensions(rank_(x), 0, True)
5089        size1 = _tuple_setitem(arr_shape, axis, length1)
5090        start2 = _tuple_setitem(start1, axis, length1)
5091        size2 = _tuple_setitem(arr_shape, axis, length2)
5092        res = _get_cache_prim(P.Split)(axis, num_long_tensor)(tensor_slice(x, start1, size1)) + \
5093              _get_cache_prim(P.Split)(axis, num_short_tensor)(tensor_slice(x, start2, size2))
5094    return res
5095
5096
5097def tensor_split(input, indices_or_sections, axis=0):
5098    r"""
5099    Splits a tensor into multiple sub-tensors along the given axis.
5100
5101    Args:
5102        input (Tensor): A Tensor to be divided.
5103        indices_or_sections (Union[int, tuple(int), list(int)]):
5104
5105            - If `indices_or_sections` is an integer n, input tensor will be split into n sections.
5106
5107              - If :math:`input.shape[axis]` can be divisible by n, sub-sections will have equal size
5108                :math:`input.shape[axis] / n` .
5109              - If :math:`input.shape[axis]` is not divisible by n, the first :math:`input.shape[axis] \bmod n` sections
5110                will have size :math:`input.shape[axis] // n + 1` , and the rest will have
5111                size :math:`input.shape[axis] // n` .
5112            - If `indices_or_sections` is of type tuple(int) or list(int), the input tensor will be split at the
5113              indices in the list or tuple. For example, given parameters :math:`indices\_or\_sections=[1, 4]`
5114              and :math:`axis=0` , the input tensor will be split into sections :math:`input[:1]` ,
5115              :math:`input[1:4]` , and :math:`input[4:]` .
5116
5117        axis (int): The axis along which to split. Default: ``0`` .
5118
5119    Returns:
5120        A tuple of sub-tensors.
5121
5122    Raises:
5123        TypeError: If argument `input` is not Tensor.
5124        TypeError: If argument `axis` is not int.
5125        ValueError: If argument `axis` is out of range of :math:`[-input.ndim, input.ndim)` .
5126        TypeError: If each element in 'indices_or_sections' is not integer.
5127        TypeError: If argument `indices_or_sections` is not int, tuple(int) or list(int).
5128
5129    Supported Platforms:
5130        ``Ascend`` ``GPU`` ``CPU``
5131
5132    Examples:
5133        >>> import numpy as np
5134        >>> from mindspore import Tensor, ops
5135        >>> input_x = np.arange(9).astype("float32")
5136        >>> output = ops.tensor_split(Tensor(input_x), 3)
5137        >>> print(output)
5138        (Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00,  1.00000000e+00,  2.00000000e+00]),
5139        Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00,  4.00000000e+00,  5.00000000e+00]),
5140        Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00,  7.00000000e+00,  8.00000000e+00]))
5141    """
5142    if not isinstance(input, Tensor):
5143        raise TypeError(f'expect `x` is a Tensor, but got {type(input)}')
5144
5145    if type(axis) is not int:
5146        raise TypeError(f"Type of Argument `axis` should be integer but got {type(axis)}")
5147    handle_axis = _canonicalize_axis(axis, input.ndim)
5148    if type(indices_or_sections) is int:
5149        if indices_or_sections > 0:
5150            res = _tensor_split_sub_int(input, indices_or_sections, handle_axis)
5151        else:
5152            raise ValueError(f"For tensor_split, the value of 'indices_or_sections' must be more than zero "
5153                             f"but got {indices_or_sections}")
5154    elif isinstance(indices_or_sections, (list, tuple)):
5155        for item in indices_or_sections:
5156            if type(item) is not int:
5157                raise TypeError(f"Each element in 'indices_or_sections' should be integer, but got {type(item)}.")
5158        res = _tensor_split_sub_tensors(input, indices_or_sections, handle_axis)
5159    else:
5160        raise TypeError(f"Type of Argument `indices_or_sections` should be integer, tuple(int) or list(int), " \
5161                        f"but got {type(indices_or_sections)}")
5162
5163    return res
5164
5165
5166def vsplit(input, indices_or_sections):
5167    """
5168    Splits `input` with two or more dimensions, into multiple sub-tensors vertically
5169    according to `indices_or_sections`.
5170
5171    It is equivalent to `ops.tensor_split` with :math:`axis=0` .
5172
5173    Args:
5174        input (Tensor): A Tensor to be divided.
5175        indices_or_sections (Union[int, tuple(int), list(int)]): See argument in :func:`mindspore.ops.tensor_split`.
5176
5177    Returns:
5178        A list of sub-tensors.
5179
5180    Supported Platforms:
5181        ``Ascend`` ``GPU`` ``CPU``
5182
5183    Examples:
5184        >>> import numpy as np
5185        >>> from mindspore import Tensor, ops
5186        >>> input_x = np.arange(9).reshape((3, 3)).astype('float32')
5187        >>> output = ops.vsplit(Tensor(input_x), 3)
5188        >>> print(output)
5189        (Tensor(shape=[1, 3], dtype=Float32, value=[[ 0.00000000e+00,  1.00000000e+00,  2.00000000e+00]]),
5190         Tensor(shape=[1, 3], dtype=Float32, value=[[ 3.00000000e+00,  4.00000000e+00,  5.00000000e+00]]),
5191         Tensor(shape=[1, 3], dtype=Float32, value=[[ 6.00000000e+00,  7.00000000e+00,  8.00000000e+00]]))
5192    """
5193    if not isinstance(input, Tensor):
5194        raise TypeError(f'expect `x` is a Tensor, but got {type(input)}')
5195    if input.ndim < 1:
5196        raise ValueError(f'vsplit expect `x` is a Tensor with at least 1 dimension, but got {input.ndim}')
5197    return tensor_split(input, indices_or_sections, 0)
5198
5199
5200def hsplit(input, indices_or_sections):
5201    """
5202    Splits a tensor into multiple sub-tensors horizontally.
5203    It is equivalent to `ops.tensor_split` with :math:`axis=1` .
5204
5205    Args:
5206        input (Tensor): A Tensor to be divided.
5207        indices_or_sections (Union[int, tuple(int), list(int)]): See argument in :func:`mindspore.ops.tensor_split`.
5208
5209    Returns:
5210        A list of sub-tensors.
5211
5212    Raises:
5213        TypeError: If `input` is not Tensor.
5214        ValueError: If dimension of `input` is less than 2.
5215
5216    Supported Platforms:
5217        ``Ascend`` ``GPU`` ``CPU``
5218
5219    Examples:
5220        >>> import numpy as np
5221        >>> from mindspore import Tensor, ops
5222        >>> input_x = np.arange(6).reshape((2, 3)).astype('float32')
5223        >>> output = ops.hsplit(Tensor(input_x), 3)
5224        >>> print(output)
5225        (Tensor(shape=[2, 1], dtype=Float32, value=[[ 0.00000000e+00], [ 3.00000000e+00]]),
5226         Tensor(shape=[2, 1], dtype=Float32, value=[[ 1.00000000e+00], [ 4.00000000e+00]]),
5227         Tensor(shape=[2, 1], dtype=Float32, value=[[ 2.00000000e+00], [ 5.00000000e+00]]))
5228    """
5229    if not isinstance(input, Tensor):
5230        raise TypeError(f'expect `x` is a Tensor, but got {type(input)}')
5231    if input.ndim < 2:
5232        raise ValueError(f'hsplit expect `x` is a Tensor with at least 2 dimension, but got {input.ndim}')
5233
5234    return tensor_split(input, indices_or_sections, 1)
5235
5236
5237def dsplit(input, indices_or_sections):
5238    """
5239    Splits a tensor into multiple sub-tensors along the 3rd axis.
5240    It is equivalent to `ops.tensor_split` with :math:`axis=2` .
5241
5242    Args:
5243        input (Tensor): A Tensor to be divided.
5244        indices_or_sections (Union[int, tuple(int), list(int)]): See argument in :func:`mindspore.ops.tensor_split`.
5245
5246    Returns:
5247        A list of sub-tensors.
5248
5249    Supported Platforms:
5250        ``Ascend`` ``GPU`` ``CPU``
5251
5252    Examples:
5253        >>> import numpy as np
5254        >>> from mindspore import Tensor, ops
5255        >>> input_x = np.arange(6).reshape((1, 2, 3)).astype('float32')
5256        >>> output = ops.dsplit(Tensor(input_x), 3)
5257        >>> print(output)
5258        (Tensor(shape=[1, 2, 1], dtype=Float32, value=[[[ 0.00000000e+00], [ 3.00000000e+00]]]),
5259         Tensor(shape=[1, 2, 1], dtype=Float32, value=[[[ 1.00000000e+00], [ 4.00000000e+00]]]),
5260         Tensor(shape=[1, 2, 1], dtype=Float32, value=[[[ 2.00000000e+00], [ 5.00000000e+00]]]))
5261    """
5262    if not isinstance(input, Tensor):
5263        raise TypeError(f'expect `x` is a Tensor, but got {type(input)}')
5264    if input.ndim < 3:
5265        raise ValueError(f'dsplit expect `x` is a Tensor with at least 3 dimension, but got {input.ndim}')
5266
5267    return tensor_split(input, indices_or_sections, 2)
5268
5269
5270def _init_and_select_elem(input, initial, where, cmp_fn):  # pylint: disable=redefined-outer-name
5271    """Initialize the input according to Initial, and select the element according to where."""
5272    if initial is not None:
5273        initial = ops.fill(input.dtype, input.shape, initial)
5274        input = cmp_fn(input, initial)
5275
5276    if where is not None and not isinstance(where, Tensor):
5277        where = Tensor(where, dtype=mstype.bool_)
5278
5279    if where is not None and (where.shape or not where):
5280        if initial is None:
5281            raise ValueError('initial value must be provided for where masks')
5282        where = where.broadcast_to(input.shape)
5283        initial = initial.broadcast_to(input.shape)
5284        input = ops.select(where, input, initial)
5285    return input
5286
5287
5288def max(input, axis=None, keepdims=False, *, initial=None, where=None):  # pylint: disable=redefined-outer-name
5289    """
5290    Calculates the maximum value along with the given axis for the input tensor. It returns the maximum values and
5291    indices.
5292
5293    Note:
5294        - In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
5295        - When `axis` is ``None``, `keepdims` and subsequent parameters have no
5296          effect. At the same time, the index is fixed to return 0.
5297
5298    .. warning::
5299        - If there are multiple maximum values, the index of the first maximum value is used.
5300        - The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "input".
5301
5302    Also see: :class:`mindspore.ops.ArgMaxWithValue`.
5303
5304    Args:
5305        input (Tensor): The input tensor, can be any dimension. Complex tensor is not supported for now.
5306        axis (int): The dimension to reduce. When `axis` is ``None``, computing the maximum value of all elements
5307            in `input` .Default: ``None`` .
5308        keepdims (bool): Whether to reduce dimension, if true, the output will keep same dimension with the input,
5309            the output will reduce dimension if false. Default: ``False`` .
5310
5311    Keyword Args:
5312        initial (scalar, optional): The minimum value of an output element. Must be present to allow computation
5313            on empty slice. Default: ``None`` .
5314        where (Tensor[bool], optional): A Tensor indicating whether to replace the primitive value in `input`
5315            with the value in `initial`. If ``True`` , do not replace, otherwise replace. For the index of ``True``
5316            in `where`, the corresponding value in `initial` must be assigned. Default: ``None`` , which indicates
5317            ``True`` by default.
5318
5319    Returns:
5320        tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the maximum value of the input
5321        tensor.
5322
5323        - values (Tensor) - The maximum value of input tensor, with the same shape as index, and same dtype as x.
5324        - index (Tensor) - The index for the maximum value of the input tensor, with dtype int64. If `keepdims`
5325          is true, the shape of output tensors is :math:`(input_1, input_2, ..., input_{axis-1}, 1, input_{axis+1},
5326          ..., input_N)` . Otherwise, the shape is :math:`(input_1, input_2, ..., input_{axis-1}, input_{axis+1},
5327          ..., input_N)` .
5328
5329    Raises:
5330        TypeError: If `input` is not Tensor.
5331        TypeError: If `keepdims` is not a bool.
5332        TypeError: If `axis` is not an int.
5333        TypeError: If `initial` is not a number.
5334
5335    Supported Platforms:
5336        ``Ascend`` ``GPU`` ``CPU``
5337
5338    Examples:
5339        >>> import mindspore
5340        >>> import numpy as np
5341        >>> from mindspore import Tensor, ops
5342        >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
5343        >>> output, index = ops.max(x)
5344        >>> print(output, index)
5345        0.7 0
5346        >>> y = Tensor(np.array([[0.0, 0.3, 0.4, 0.5, 0.1],
5347        ...                      [3.2, 0.4, 0.1, 2.9, 4.0]]), mindspore.float32)
5348        >>> output, index = ops.max(y, axis=0, keepdims=True)
5349        >>> print(output, index)
5350        [[3.2 0.4 0.4 2.9 4. ]] [[1 1 0 1 1]]
5351    """
5352    if not input.shape:
5353        return (input, Tensor(0, dtype=mstype.int64))
5354    if axis is None:
5355        return (max_(input), Tensor(0, dtype=mstype.int64))
5356    if initial is not None and not isinstance(initial, numbers.Number):
5357        raise TypeError(f"For 'max', 'initial' must be a scalar, but got {type(initial)}")
5358    if axis is not None and not isinstance(axis, int):
5359        raise TypeError(f"For 'max', 'axis' must be int, but got {type(axis)}")
5360    input = _init_and_select_elem(input, initial, where, ops.maximum)
5361    argmax_with_value_op = _get_cache_prim(ArgMaxWithValue)(axis, keepdims)
5362    indices, values = argmax_with_value_op(input)
5363    return values, indices
5364
5365
5366def argmax(input, dim=None, keepdim=False):
5367    """
5368    Return the indices of the maximum values of a tensor across a dimension.
5369
5370    Args:
5371        input (Tensor): Input tensor.
5372        dim (Union[int, None], optional): The dimension to reduce. If `dim` is ``None`` , the indices of the maximum
5373            value within the flattened input will be returned. Default: ``None`` .
5374        keepdim (bool, optional): Whether the output tensor retains the specified
5375            dimension. Ignored if `dim` is None. Default: ``False`` .
5376
5377    Returns:
5378        Tensor, indices of the maximum values across a dimension.
5379
5380    Raises:
5381        TypeError: If `keepdim` is not bool.
5382        ValueError: If `dim` is out of range.
5383
5384    Supported Platforms:
5385        ``Ascend`` ``GPU`` ``CPU``
5386
5387    Examples:
5388        >>> import numpy as np
5389        >>> from mindspore import Tensor, ops
5390        >>> x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
5391        >>> output = ops.argmax(x, dim=-1)
5392        >>> print(output)
5393        [1 0 0]
5394    """
5395    _check_attr_dtype("keepdim", keepdim, [bool], "argmax")
5396    if not input.shape:
5397        return Tensor(0)
5398    if input.dtype == mstype.bool_:
5399        input = input.astype(mstype.int32)
5400    is_dim_none = False
5401    if dim is None:
5402        input = reshape_(input, (-1,))
5403        dim = 0
5404        is_dim_none = True
5405    out = _get_cache_prim(Argmax)(dim, mstype.int64)(input)
5406    if keepdim and not is_dim_none:
5407        out = expand_dims(out, dim)
5408    return out
5409
5410
5411
5412def min(input, axis=None, keepdims=False, *, initial=None, where=None):  # pylint: disable=redefined-outer-name
5413    """
5414    Calculates the minimum value along with the given axis for the input tensor. It returns the minimum values and
5415    indices.
5416
5417    Note:
5418        - In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
5419        - When `axis` is ``None``, `keepdims` and subsequent parameters have no
5420          effect. At the same time, the index is fixed to return 0.
5421
5422    .. warning::
5423        - If there are multiple minimum values, the index of the first minimum value is used.
5424        - The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "x".
5425
5426    Args:
5427        input (Tensor): The input tensor, can be any dimension. Complex tensor is not supported for now.
5428        axis (int): The dimension to reduce. Default: ``None`` .
5429        keepdims (bool): Whether to reduce dimension, if ``True`` the output will keep the same dimension as the input,
5430            the output will reduce dimension if ``False`` . Default: ``False`` .
5431
5432    Keyword Args:
5433        initial (scalar, optional): The maximum value of an output element. Must be present to allow computation
5434            on empty slice. Default: ``None`` .
5435        where (Tensor[bool], optional): A Tensor indicating whether to replace the primitive value in `input`
5436            with the value in `initial`. If ``True`` , do not replace, otherwise replace. For the index of ``True``
5437            in `where`, the corresponding value in `initial` must be assigned. Default: ``None`` , which indicates
5438            ``True``  by default.
5439
5440    Returns:
5441        tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the minimum value of the input
5442        tensor.
5443
5444        - **values** (Tensor) - The minimum value of input tensor, with the same
5445          shape as `index`, and same dtype as `x`.
5446        - **index** (Tensor) - The index for the minimum value of the input tensor, with dtype int32. If `keepdims`
5447          is true, the shape of output tensors is :math:`(input_1, input_2, ..., input_{axis-1}, 1, input_{axis+1},
5448          ..., input_N)` . Otherwise, the shape is :math:`(input_1, input_2, ..., input_{axis-1}, input_{axis+1},
5449          ..., input_N)` .
5450
5451    Raises:
5452        TypeError: If `x` is not Tensor.
5453        TypeError: If `keepdims` is not a bool.
5454        TypeError: If `axis` is not an int.
5455        TypeError: If `initial` is not a number.
5456
5457    Supported Platforms:
5458        ``Ascend`` ``GPU`` ``CPU``
5459
5460    Examples:
5461        >>> import mindspore
5462        >>> import numpy as np
5463        >>> from mindspore import Tensor, ops
5464        >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
5465        >>> output, index = ops.min(x, keepdims=True)
5466        >>> print(output, index)
5467        0.0 0
5468    """
5469    if not input.shape:
5470        return (input, Tensor(0, dtype=mstype.int64))
5471    if axis is None:
5472        return (min_(input), Tensor(0, dtype=mstype.int64))
5473    if initial is not None and not isinstance(initial, numbers.Number):
5474        raise TypeError(f"For 'min', 'initial' must be a scalar, but got {type(initial)}")
5475    if axis is not None and not isinstance(axis, int):
5476        raise TypeError(f"For 'min', 'axis' must be int, but got {type(axis)}")
5477    input = _init_and_select_elem(input, initial, where, ops.minimum)
5478    argmin_with_value_op = _get_cache_prim(ArgMinWithValue)(axis, keepdims)
5479    indices, values = argmin_with_value_op(input)
5480    return values, indices
5481
5482
5483def aminmax(input, *, axis=0, keepdims=False):
5484    """
5485    It returns the minimum and maximum value along the given axis of input tensor.
5486
5487    Args:
5488        input (Tensor): The input tensor, can be any dimension. Set the shape of input tensor as
5489          :math:`(x_1, x_2, ..., x_N)` .
5490
5491    Keyword Args:
5492        axis (int, optional): The dimension to reduce. The value range of `axis` is [-rank, rank),
5493            where "rank" is the dimension of `input`. If `axis` is None, computes the minimum and maximum value
5494            along the entire input tensor. Default: ``0`` .
5495        keepdims (bool, optional): Whether to maintain dimension. When set to True, the output will keep the same
5496            dimension as the input, or the dimension specified by `axis` is reduced. Default: ``False`` .
5497
5498    Returns:
5499        tuple (Tensor), containing the minimum value and maximum value of the input tensor.
5500
5501        - If `keepdims` is True, the shape of output tensors is
5502          :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`.
5503        - If `keepdims` is False, the shape of output tensors is
5504          :math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`.
5505
5506    Raises:
5507        TypeError: If `keepdims` is not a bool.
5508        TypeError: If `axis` is not an int and not None.
5509        ValueError: If `axis` is not in range [-rank, rank).
5510
5511    Supported Platforms:
5512        ``Ascend`` ``GPU`` ``CPU``
5513
5514    Examples:
5515        >>> import mindspore
5516        >>> import numpy as np
5517        >>> from mindspore import Tensor, ops
5518        >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
5519        >>> output0, output1 = ops.aminmax(x)
5520        >>> print(output0, output1)
5521        0.0 0.7
5522        >>> output2, output3 = ops.aminmax(x, axis=-1, keepdims=True)
5523        >>> print(output2, output3)
5524        [0.] [0.7]
5525        >>> x = Tensor(np.array([[0.0, 0.4, 0.6, 0.7, 0.1], [0.78, 0.97, 0.5, 0.82, 0.99]]), mindspore.float32)
5526        >>> output4, output5 = ops.aminmax(x, axis=None, keepdims=True)
5527        >>> print(output4, output5)
5528        [[0.]] [[0.99]]
5529    """
5530    if axis is None:
5531        output0, _ = ops.min(input, axis, keepdims)
5532        output1, _ = ops.max(input, axis, keepdims)
5533        if keepdims is True:
5534            output0 = ops.reshape(output0, [1] * input.ndim)
5535            output1 = ops.reshape(output1, [1] * input.ndim)
5536        return output0, output1
5537    argmin_with_value_op = _get_cache_prim(ArgMinWithValue)(axis, keepdims)
5538    argmax_with_value_op = _get_cache_prim(ArgMaxWithValue)(axis, keepdims)
5539    _, output0 = argmin_with_value_op(input)
5540    _, output1 = argmax_with_value_op(input)
5541    if keepdims is True and input.ndim == 0:
5542        output0 = ops.reshape(output0, [1])
5543        output1 = ops.reshape(output1, [1])
5544    return output0, output1
5545
5546
5547def narrow(input, axis, start, length):
5548    """
5549    Returns a narrowed tensor from input tensor, and
5550    the dimension axis is input from start to start + length.
5551
5552    Args:
5553        input (Tensor): the tensor to narrow.
5554        axis (int): the axis along which to narrow.
5555        start (int): the starting dimension.
5556        length (int): the distance to the ending dimension.
5557
5558    Returns:
5559        Tensor.
5560
5561        - output (Tensors) - The narrowed tensor.
5562
5563    Raises:
5564        TypeError: If the input is not a tensor or tuple or list of tensors.
5565
5566    Supported Platforms:
5567        ``Ascend`` ``GPU`` ``CPU``
5568
5569    Examples:
5570        >>> import mindspore
5571        >>> from mindspore import ops
5572        >>> from mindspore import Tensor
5573        >>> x = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], mindspore.int32)
5574        >>> output = ops.narrow(x, 0, 0, 2)
5575        >>> print(output)
5576        [[ 1 2 3]
5577         [ 4 5 6]]
5578        >>> output = ops.narrow(x, 1, 1, 2)
5579        >>> print(output)
5580        [[ 2 3]
5581         [ 5 6]
5582         [ 8 9]]
5583    """
5584    validator.check_value_type("input", input, Tensor, "narrow")
5585    validator.check_axis_in_range(axis, input.ndim)
5586    validator.check_int_range(start, 0, input.shape[axis], validator.INC_LEFT)
5587    validator.check_int_range(length, 1, input.shape[axis] - start, validator.INC_BOTH)
5588
5589    begins = [0] * input.ndim
5590    begins[axis] = start
5591    sizes = list(input.shape)
5592    sizes[axis] = length
5593    return tensor_slice(input, begins, sizes)
5594
5595
5596def narrow_ext(input, dim, start, length):
5597    """
5598    Returns a narrowed tensor from input tensor, and
5599    the dimension axis is input from start to start + length.
5600
5601    Args:
5602        input (Tensor): the tensor to narrow.
5603        dim (int): dimension  along which to narrow.
5604        start (int): the starting dimension.
5605        length (int): the distance to the ending dimension.
5606
5607    Returns:
5608        Tensor.
5609
5610    Raises:
5611        ValueError: If dim is out of range [-input.ndim, input.ndim).
5612        ValueError: If start is out of range [-input.shape[dim], input.shape[dim]].
5613        ValueError: It length is out of range [0, input.shape[dim]-start].
5614
5615    Supported Platforms:
5616        ``Ascend``
5617
5618    Examples:
5619        >>> import mindspore
5620        >>> from mindspore import ops
5621        >>> from mindspore import Tensor
5622        >>> x = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], mindspore.int32)
5623        >>> output = ops.narrow(x, 0, 0, 2)
5624        >>> print(output)
5625        [[ 1 2 3]
5626         [ 4 5 6]]
5627        >>> output = ops.narrow(x, 1, 1, 2)
5628        >>> print(output)
5629        [[ 2 3]
5630         [ 5 6]
5631         [ 8 9]]
5632    """
5633    validator.check_value_type("input", input, Tensor, "narrow")
5634    return slice_ext_op(input, dim, start, start+length, 1)
5635
5636
5637def topk(input, k, dim=None, largest=True, sorted=True):
5638    r"""
5639    Finds values and indices of the `k` largest or smallest entries along a given dimension.
5640
5641    .. warning::
5642        - If sorted is set to False, it will use the aicpu operator, the performance may be reduced. In addition, due to
5643          different memory layout and traversal methods on different platforms, the display order of calculation results
5644          may be inconsistent when `sorted` is False.
5645
5646    If the `input` is a one-dimensional Tensor, finds the `k` largest  or smallest entries in the Tensor,
5647    and outputs its value and index as a Tensor. values[`k`] is the `k` largest item in `input`,
5648    and its index is indices [`k`].
5649
5650    For a multi-dimensional matrix,
5651    calculates the first or last `k` entries in a given dimension, therefore:
5652
5653    .. math::
5654
5655        values.shape = indices.shape
5656
5657    If the two compared elements are the same, the one with the smaller index value is returned first.
5658
5659    Args:
5660        input (Tensor): Input to be computed, data type must be float16, float32 or int32.
5661        k (int): The number of top or bottom elements to be computed along the last dimension.
5662        dim (int, optional): The dimension to sort along. Default: ``None`` .
5663        largest (bool, optional): If largest is ``False``  then the k smallest elements are returned.
5664            Default: ``True`` .
5665        sorted (bool, optional): If ``True`` , the obtained elements will be sorted by the values in descending order.
5666            If ``False`` , the obtained elements will not be sorted. Default: ``True`` .
5667
5668    Returns:
5669        A tuple consisting of `values` and `indexes`.
5670
5671        - values (Tensor): The `k` largest or smallest elements in each slice of the given dimension.
5672        - indices (Tensor): The indices of values within the last dimension of input.
5673
5674    Raises:
5675        TypeError: If `sorted` is not a bool.
5676        TypeError: If `input` is not a Tensor.
5677        TypeError: If `k` is not an int.
5678        TypeError: If dtype of `input` is not one of the following: float16, float32 or int32.
5679
5680    Supported Platforms:
5681        ``Ascend`` ``GPU`` ``CPU``
5682
5683    Examples:
5684        >>> import mindspore as ms
5685        >>> from mindspore import ops
5686        >>> x = ms.Tensor([[0.5368, 0.2447, 0.4302, 0.9673],
5687        ...                [0.4388, 0.6525, 0.4685, 0.1868],
5688        ...                [0.3563, 0.5152, 0.9675, 0.8230]], dtype=ms.float32)
5689        >>> output = ops.topk(x, 2, dim=1)
5690        >>> print(output)
5691        (Tensor(shape=[3, 2], dtype=Float32, value=
5692        [[ 9.67299998e-01,  5.36800027e-01],
5693         [ 6.52499974e-01,  4.68499988e-01],
5694         [ 9.67499971e-01,  8.23000014e-01]]), Tensor(shape=[3, 2], dtype=Int32, value=
5695        [[3, 0],
5696         [1, 2],
5697         [2, 3]]))
5698        >>> output2 = ops.topk(x, 2, dim=1, largest=False)
5699        >>> print(output2)
5700        (Tensor(shape=[3, 2], dtype=Float32, value=
5701        [[ 2.44700000e-01,  4.30200011e-01],
5702         [ 1.86800003e-01,  4.38800007e-01],
5703         [ 3.56299996e-01,  5.15200019e-01]]), Tensor(shape=[3, 2], dtype=Int32, value=
5704        [[1, 2],
5705         [3, 0],
5706         [0, 1]]))
5707    """
5708    top_k_ = _get_cache_prim(P.TopK)(sorted)
5709    if not largest:
5710        input = -input
5711    if dim is None or dim == input.ndim - 1:
5712        if not largest:
5713            res = top_k_(input, k)
5714            values, indices = -res[0], res[1]
5715            return values, indices
5716        return top_k_(input, k)
5717    input = input.swapaxes(dim, input.ndim - 1)
5718    output = top_k_(input, k)
5719    values = output[0].swapaxes(dim, input.ndim - 1)
5720    indices = output[1].swapaxes(dim, input.ndim - 1)
5721    if not largest:
5722        res = (-values, indices)
5723    else:
5724        res = (values, indices)
5725    return res
5726
5727
5728def expand(input_x, size):
5729    r"""
5730    :func:`mindspore.ops.expand` will be deprecated in the future.
5731    Please use :func:`mindspore.ops.broadcast_to` instead.
5732    """
5733    expand_op = _get_cache_prim(Expand)()
5734    return expand_op(input_x, size)
5735
5736
5737@_primexpr
5738def _check_fold_param(param, param_name):
5739    """Check the parameters of fold op."""
5740    validator.check_value_type(param_name, param, [int, list, tuple], 'fold')
5741    param = (param, param) if isinstance(param, int) else param
5742    validator.check_int(len(param), 2, validator.EQ, param_name, 'fold')
5743    if param_name == "padding":
5744        validator.check_non_negative_int_sequence(param, param_name, 'fold')
5745    else:
5746        validator.check_positive_int_sequence(param, param_name, 'fold')
5747    return param
5748
5749
5750def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
5751    r"""
5752    Combines an array of sliding local blocks into a large containing tensor.
5753
5754    Consider a batched input tensor of shape :math:`(N, C \times \prod(\text{kernel_size}), L)` ,
5755    where :math:`N` is the batch dimension, :math:`C \times \prod(\text{kernel_size})` is the
5756    total number of values within each block (a block has :math:`\prod(\text{kernel_size})` spatial
5757    locations each containing a `C`-channeled vector), and :math:`L` is the total number of such blocks:
5758
5759    .. math::
5760        L = \prod_d \left\lfloor\frac{\text{output_size}[d] + 2 \times \text{padding}[d] %
5761            - \text{dilations}[d] \times (\text{kernel_size}[d] - 1) - 1}{\text{strides}[d]} + 1\right\rfloor,
5762
5763    where :math:`d` is over all spatial dimensions.
5764
5765    Therefore, `output_size` is the spatial shape of the large containing tensor of the sliding local blocks.
5766
5767    The `dilation`, `padding` and `stride` arguments specify how the sliding blocks are retrieved.
5768
5769    .. warning::
5770        - The input must be a 3-dimensional Tensor with shape
5771          :math:`(N, C \times \prod(\text{kernel_size}), L)` .
5772        - The output must be a 4-dimensional Tensor with shape
5773          :math:`(N, C, output\_size[0], output\_size[1], ...)` .
5774
5775    Args:
5776        input (Tensor): 3-D Tensor, supported dtypes: float16, float32, float64, complex64 and complex128.
5777        output_size (Tensor): 1D tensor with `2` elements of data type int.
5778        kernel_size (Union[int, tuple[int], list[int]]): The size of the kernel, should be two int
5779            for height and width. If type is int, it means that height equal with width. Must be specified.
5780        dilation (Union[int, tuple[int], list[int]], optional): The size of the dilation, should be two int
5781            for height and width. If type is int, it means that height equal with width. Default: ``1`` .
5782        padding (Union[int, tuple[int], list[int]], optional): The size of the padding, should be two int
5783            for height and width. If type is int, it means that height equal with width. Default: ``0`` .
5784        stride (Union[int, tuple[int], list[int]], optional): The size of the stride, should be two int
5785            for height and width. If type is int, it means that height equal with width. Default: ``1`` .
5786
5787    Returns:
5788        A Tensor, with same type as `input` . And its shape is as described above.
5789
5790    Raises:
5791        TypeError: If `output_size`, `kernel_size`, `stride`, `dilation`, `padding` data type is not int, tuple or list.
5792        ValueError: If `output_size`, `kernel_size`, `dilation`, `stride` value is not
5793            greater than zero or elements number more than `2`.
5794        ValueError: If `padding` value is less than zero or elements number more than `2`.
5795        ValueError: If `input.shape[1] != kernel_size[0] * kernel_size[1]`
5796        ValueError: If `input.shape[2]` does not match the calculated number of sliding blocks.
5797
5798    Supported Platforms:
5799        ``Ascend`` ``GPU`` ``CPU``
5800
5801    Examples:
5802        >>> import numpy as np
5803        >>> from mindspore import Tensor, ops
5804        >>> from mindspore import dtype as mstype
5805        >>> x = Tensor(input_data=np.random.rand(16, 64, 25), dtype=mstype.float32)
5806        >>> output_size = Tensor(input_data=[8, 8], dtype=mstype.int32)
5807        >>> output = ops.fold(x, output_size, [2, 2], [2, 2], [2, 2], [2, 2])
5808        >>> print(output.shape)
5809        (16, 16, 8, 8)
5810    """
5811    kernel_size = _check_fold_param(kernel_size, "kernel_size")
5812    dilation = _check_fold_param(dilation, "dilation")
5813    padding = _check_fold_param(padding, "padding")
5814    stride = _check_fold_param(stride, "stride")
5815    fold_op = _get_cache_prim(Col2Im)(kernel_size, dilation, padding, stride)
5816    input_shape = ops.shape(input)
5817    k = kernel_size[0] * kernel_size[-1]
5818    r_shape = input_shape[:1] + (-1, k) + input_shape[-1:]
5819    input = ops.reshape(input, r_shape)
5820    return fold_op(input, output_size)
5821
5822
5823@_primexpr
5824def _check_unfold_params(param, param_name, param_size):
5825    """Check the parameters of unfold op."""
5826    validator.check_value_type(param_name, param, [int, tuple, list], 'unfold')
5827    param = (param, param) if isinstance(param, int) else param
5828    validator.check(param_name + " size", len(param), "", param_size, validator.IN, 'unfold')
5829    if param_name == "padding":
5830        validator.check_non_negative_int_sequence(param, param_name, 'unfold')
5831    else:
5832        validator.check_positive_int_sequence(param, param_name, 'unfold')
5833    return param
5834
5835
5836def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
5837    r"""
5838    Extracts sliding local blocks from a batched input tensor.
5839
5840    Consider a batched input tensor of shape :math:`(N, C, *)`,
5841    where :math:`N` is the batch dimension, :math:`C` is the channel dimension,
5842    and :math:`*` represent arbitrary spatial dimensions. This operation flattens
5843    each sliding `Kernel_size`- sized block within the spatial dimensions
5844    of input `x` into a column (i.e., last dimension) of a 3-D output
5845    tensor of shape :math:`(N, C \times \prod(\text{kernel_size}), L)`, where
5846    :math:`C \times \prod(\text{kernel_size})` is the total number of values
5847    within each block (a block has :math:`\prod(\text{kernel_size})` spatial
5848    locations each containing a `C`-channeled vector), and :math:`L` is
5849    the total number of such blocks:
5850
5851    .. math::
5852        L = \prod_d \left\lfloor\frac{\text{spatial_size}[d] + 2 \times \text{pads}[d] %
5853            - \text{dilations}[d] \times (\text{kernel_size}[d] - 1) - 1}{\text{strides}[d]} + 1\right\rfloor,
5854
5855    where :math:`\text{spatial_size}` is formed by the spatial dimensions
5856    of input `x` (:math:`*` above), and :math:`d` is over all spatial
5857    dimensions.
5858
5859    Therefore, indexing `output` at the last dimension (column dimension)
5860    gives all values within a certain block.
5861
5862    The `dilation`, `padding` and `stride` arguments specify
5863    how the sliding blocks are retrieved.
5864
5865    .. warning::
5866        - The output is a 3-dimensional Tensor whose shape is
5867          :math:`(N, C \times \prod(\text{kernel_size}), L)` .
5868        - This is an experimental API that is subject to change or deletion.
5869
5870    Args:
5871        input (Tensor): 4-D Tensor, supported dtypes: float16, float32, float64, complex64 and complex128.
5872        kernel_size (Union[int, tuple[int], list[int]]): The size of the kernel, should be two int
5873            for height and width. If type is int, it means that height equal with width. Must be specified.
5874        dilation (Union[int, tuple[int], list[int]], optional): The dilation of the window, should be two int
5875            for height and width. If type is int, it means that height equal with width. Default: ``1`` .
5876        padding (Union[int, tuple[int], list[int]], optional): The pad of the window, that must be
5877            a tuple/list of one or two `int` for height and width. Default: ``0`` .
5878
5879            - If one int, pad_height = pad_width.
5880            - If two int, pad_height = padding[0], pad_width = padding[1].
5881
5882        stride (Union[int, tuple[int], list[int]], optional): The stride of the window, should be two int
5883            for height and width. If type is int, it means that height equal with width. Default: ``1`` .
5884
5885    Returns:
5886        A Tensor, with same type as `input` . And its shape is as described above.
5887
5888    Raises:
5889        TypeError: If any data type of `kernel_size`, `stride`, `dilation`, `padding` is not int, tuple or list.
5890        ValueError: If `kernel_size`, `dilation`, `stride` value is not
5891            greater than zero or elements number more than `2`.
5892        ValueError: If `padding` value is less than zero.
5893
5894    Supported Platforms:
5895        ``Ascend`` ``GPU`` ``CPU``
5896
5897    Examples:
5898        >>> import mindspore
5899        >>> import numpy as np
5900        >>> from mindspore import Tensor, ops
5901        >>> x = Tensor(np.random.rand(4, 4, 32, 32), mindspore.float64)
5902        >>> output = ops.unfold(x, kernel_size=3, dilation=1, stride=1)
5903        >>> print(output.shape)
5904        (4, 36, 900)
5905    """
5906    kernel_size = _check_unfold_params(kernel_size, "kernel_size", [1, 2])
5907    dilation = _check_unfold_params(dilation, "dilation", [1, 2])
5908    padding = _check_unfold_params(padding, "padding", [1, 2])
5909    stride = _check_unfold_params(stride, "stride", [1, 2])
5910    unfold_op = _get_cache_prim(Im2Col)(ksizes=kernel_size,
5911                                        strides=stride,
5912                                        dilations=dilation,
5913                                        pads=padding)
5914    tmp = unfold_op(input)
5915    tmp_shape = ops.shape(tmp)
5916    out_shape = tmp_shape[:1] + (-1,) + tmp_shape[-1:]
5917    out = ops.reshape(tmp, out_shape)
5918    return out
5919
5920
5921@_primexpr
5922def _check_diagonal_axes(dim1, dim2, x_ndim):
5923    """Check the parameters of unfold op."""
5924    axes = validator.check_axis_valid((dim1, dim2), x_ndim)
5925    return axes
5926
5927
5928def _check_is_tensor(param_name, input, cls_name):
5929    """Returns True if input is Tensor."""
5930    if not isinstance(input, Tensor):
5931        raise TypeError(f"For {cls_name}, {param_name} must be a Tensor, but got {type(input)}.")
5932
5933
5934@_primexpr
5935def _check_diagonal_scatter_shape(diag_shape, src_shape):
5936    if diag_shape != src_shape:
5937        raise ValueError(f"For diagonal_scatter, the shape of src should equal to the shape of input diagonal,"
5938                         f"but got src.shape {src_shape} and diagonal shape {diag_shape}.")
5939
5940
5941def diagonal_scatter(input, src, offset=0, dim1=0, dim2=1):
5942    """
5943    `dim1` and `dim2` specify the two dimensions of `input`,
5944    the elements in these two dimensions will be treated as elements of a matrix,
5945    and `src` is embedded on the diagonal of the matrix.
5946
5947    Note:
5948        Currently, ``inf`` value of elements in `input` or `src` is not supported.
5949
5950    Args:
5951        input (Tensor): Input Tensor, whose dimension is larger than 1.
5952        src (Tensor): The source Tensor to embed.
5953        offset (int, optional): `offset` controls which diagonal to choose. Default: ``0`` .
5954
5955            - When `offset` is zero, the diagonal chosen is the main diagonal.
5956            - When `offset` is a positive integer, the diagonal chosen is up the main diagonal.
5957            - When `offset` is a negative integer, the diagonal chosen is down the main diagonal.
5958
5959        dim1 (int, optional): Axis to be used as the first axis of the 2-D
5960            sub-arrays from which the diagonals should be taken. Default: ``0`` .
5961        dim2 (int, optional): Axis to be used as the second axis of the 2-D
5962            sub-arrays from which the diagonals should be taken. Default: ``1`` .
5963
5964    Returns:
5965        Tensor after embedding, has the same shape and dtype as `input`.
5966
5967    Raises:
5968        TypeError: If `input` or `src` is not a Tensor.
5969        TypeError: If `offset` , `dim1` or `dim2` is not an integer.
5970
5971    Supported Platforms:
5972        ``Ascend`` ``GPU`` ``CPU``
5973
5974    Examples:
5975        >>> import mindspore as ms
5976        >>> input = ms.ops.zeros((3,3))
5977        >>> src = ms.ops.ones(2)
5978        >>> out = ms.ops.diagonal_scatter(input, src, 1, dim1=1, dim2=0)
5979        >>> print(out)
5980        [[0. 0. 0.]
5981         [1. 0. 0.]
5982         [0. 1. 0.]]
5983    """
5984    _check_is_tensor("input", input, "diagonal_scatter")
5985    _check_is_tensor("src", src, "diagonal_scatter")
5986    input_diag = input.diagonal(offset, dim1, dim2)
5987    _check_diagonal_scatter_shape(input_diag.shape, src.shape)
5988    input_shape = input.shape
5989    zeros_shape = list(input_shape)
5990    m, n = input_shape[dim1], input_shape[dim2]
5991    if m == n:
5992        src = src - input_diag
5993        src = ops.diag_embed(src, offset, dim1, dim2)
5994        return input + src
5995    if m > n:
5996        axis = dim2
5997        zeros_shape[axis] = m - n
5998    else:
5999        axis = dim1
6000        zeros_shape[axis] = n - m
6001    zeros_tensor = zeros(zeros_shape, dtype=input.dtype)
6002    input = concat((input, zeros_tensor), axis)
6003    input_diag = input.diagonal(offset, dim1, dim2)
6004    if src.shape != input_diag.shape:
6005        zeros_shape = []
6006        for i, ax in enumerate(src.shape):
6007            if ax == input_diag.shape[i]:
6008                zeros_shape.append(ax)
6009            else:
6010                axis = i
6011                zeros_shape.append(input_diag.shape[i] - ax)
6012        zeros_tensor = zeros(zeros_shape, dtype=src.dtype)
6013        src = concat((src, zeros_tensor), axis)
6014    src = src - input_diag
6015    src = ops.diag_embed(src, offset, dim1, dim2)
6016    input = input + src
6017    begin = (0,) * input.ndim
6018    return slice(input, begin, input_shape)
6019
6020
6021def lstsq(input, A):
6022    r"""
6023    Computes the solutions of the least squares and minimum norm problems of full-rank
6024    matrix `x` of size :math:`(m \times n)` and matrix `a` of size :math:`(m \times k)`.
6025
6026    If :math:`m \geq n`, `lstsq` solves the least-squares problem:
6027
6028    .. math::
6029
6030       \begin{array}{ll}
6031       \min_y & \|xy-a\|_2.
6032       \end{array}
6033
6034    If :math:`m < n`, `lstsq` solves the least-norm problem:
6035
6036    .. math::
6037
6038       \begin{array}{llll}
6039       \min_y & \|y\|_2 & \text{subject to} & xy = a.
6040       \end{array}
6041
6042    where `y` is the returned tensor.
6043
6044    Args:
6045        input (Tensor): The :math:`(m \times n)` matrix equivalent to :math:`x` in above.
6046            The input tensor whose data type is float16, float32 or float64.
6047        A (Tensor): The :math:`(m \times k)` matrix equivalent to :math:`a` in above.
6048            The input tensor whose data type is float16, float32 or float64.
6049
6050    Returns:
6051        Tensor, the least squares or minimum norm problems solution, which has shape :math:`(n \times k)`.
6052        The data type is the same with `input`.
6053
6054    Raises:
6055        TypeError: If `input` or `A` is not a Tensor.
6056        TypeError: If dtype of `input` or `A` is not one of: float16, float32, float64.
6057        TypeError: If the dtypes of `input` and `A` are not the same.
6058        ValueError: If the dimension of `input` is not equal to 2.
6059        ValueError: If the dimension of `A` is not equal to 2 or 1.
6060        ValueError: If the length of input_dims[0] is not equal to the length of A_dims[0].
6061
6062    Supported Platforms:
6063        ``CPU``
6064
6065    Examples:
6066        >>> import mindspore
6067        >>> import numpy as np
6068        >>> from mindspore import Tensor, ops
6069        >>> x = Tensor(np.array([[2,1,5],[3,5,1],[1,1,1]]),mindspore.float32)
6070        >>> a = Tensor(np.array([[10,5],[15,8],[7,4]]),mindspore.float32)
6071        >>> output = ops.lstsq(x, a)
6072        >>> print(output)
6073        [[17.000002  11.000002 ]
6074         [-6.5000005 -4.500001 ]
6075         [-3.500002  -2.5000017]]
6076    """
6077    return lstsq_(input, A)
6078
6079
6080def mvlgamma(input, p):
6081    r"""
6082    Returns the results of the multivariate log-gamma function with dimension `p` element-wise.
6083
6084    The mathematical calculation process of Mvlgamma is shown as follows:
6085
6086    .. math::
6087
6088        \log (\Gamma_{p}(input))=C+\sum_{i=1}^{p} \log (\Gamma(input-\frac{i-1}{2}))
6089
6090    where :math:`C = \log(\pi) \times \frac{p(p-1)}{4}` and :math:`\Gamma(\cdot)` is the Gamma function.
6091
6092    Args:
6093        input (Tensor): The input tensor of the multivariate log-gamma function,
6094          which must be one of the following types: float32, float64.
6095          The shape is :math:`(N,*)`, where :math:`*` means any number of additional dimensions.
6096          And the value of any element in `input` must be greater than :math:`(p - 1) / 2`.
6097        p (int): The number of dimensions. And the value of `p` must be greater than or equal to 1.
6098
6099    Returns:
6100        Tensor, has the same shape and type as `input`.
6101
6102    Raises:
6103        TypeError: If dtype of `input` is neither float32 nor float64.
6104        TypeError: If `p` is not an int.
6105        ValueError: If `p` is less than 1.
6106        ValueError: If not all elements of `input` are greater than :math:`(p - 1) / 2`.
6107
6108    Supported Platforms:
6109        ``Ascend`` ``GPU`` ``CPU``
6110
6111    Examples:
6112        >>> import mindspore
6113        >>> import numpy as np
6114        >>> from mindspore import Tensor, ops
6115        >>> x = Tensor(np.array([[3, 4, 5], [4, 2, 6]]), mindspore.float32)
6116        >>> y = ops.mvlgamma(x, p=3)
6117        >>> print(y)
6118        [[2.694925 5.402975 9.140645]
6119         [5.402975 1.596312 13.64045]]
6120    """
6121    mvlgamma_op = _get_cache_prim(Mvlgamma)(p)
6122    return mvlgamma_op(input)
6123
6124
6125def nonzero(input, as_tuple=False):
6126    r"""
6127    Return the positions of all non-zero values.
6128
6129    Args:
6130        input (Tensor): The input Tensor, its rank should be greater than or equal to 1.
6131        as_tuple (bool, optional): Whether the output is tuple.
6132            If ``False`` , return Tensor. Default: ``False`` .
6133            If ``True`` , return Tuple of Tensor, only support ``Ascend`` .
6134
6135
6136    Returns:
6137        - If `as_tuple` is ``False``, return the Tensor, a 2-D Tensor whose data type is int64,
6138          containing the positions of all non-zero values of the input.
6139        - If `as_tuple` is ``True``, return the Tuple of Tensor and data type is int64.
6140          The Tuple length is the dimension of the input tensor,
6141          and each element is the 1D tensor of the subscript of all non-zero elements of
6142          the input tensor in that dimension.
6143
6144    Raises:
6145        TypeError: If `input` is not Tensor.
6146        TypeError: If `as_tuple` is not bool.
6147        ValueError: If dim of `input` equals to 0.
6148
6149    Supported Platforms:
6150        ``Ascend`` ``GPU`` ``CPU``
6151
6152    Examples:
6153        >>> import mindspore
6154        >>> import numpy as np
6155        >>> from mindspore import Tensor, ops
6156        >>> x = Tensor(np.array([[[1,  0], [-5, 0]]]), mindspore.int32)
6157        >>> output = ops.nonzero(x)
6158        >>> print(output)
6159        [[0 0 0]
6160         [0 1 0]]
6161        >>> x = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
6162        >>> output = ops.nonzero(x, False)
6163        >>> print(output)
6164        [[0]
6165         [2]
6166         [4]]
6167        >>> x = Tensor(np.array([[[1,  0], [-5, 0]]]), mindspore.int32)
6168        >>> output = ops.nonzero(x, True)
6169        >>> print(output)
6170        (Tensor(shape=[2], dtype=Int64, value=[0, 0]),
6171         Tensor(shape=[2], dtype=Int64, value=[0, 1]),
6172         Tensor(shape=[2], dtype=Int64, value=[0, 0]))
6173        >>> x = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
6174        >>> output = ops.nonzero(x, True)
6175        >>> print(output)
6176        (Tensor(shape=[3], dtype=Int64, value=[0, 2, 4]), )
6177    """
6178    if as_tuple:
6179        return non_zero_ext_(input)
6180    return non_zero_(input)
6181
6182
6183def argwhere(input):
6184    """
6185    Return a Tensor of the positions of all non-zero values.
6186
6187    Args:
6188        input (Tensor): The input tensor. The data type is Number or Bool.
6189
6190    Returns:
6191        Tensor, a 2-D Tensor whose data type is int64, containing the positions of all non-zero values of the input.
6192
6193    Raises:
6194        TypeError: If `input` is not Tensor.
6195        ValueError: If dim of `input` equals to 0.
6196
6197    Supported Platforms:
6198        ``Ascend`` ``GPU`` ``CPU``
6199
6200    Examples:
6201        >>> import mindspore
6202        >>> from mindspore import Tensor, ops
6203        >>> import numpy as np
6204        >>> x = Tensor(np.array([[[1,  0], [-5, 0]]]), mindspore.int32)
6205        >>> output = ops.argwhere(x)
6206        >>> print(output)
6207        [[0 0 0]
6208         [0 1 0]]
6209    """
6210    return nonzero(input)
6211
6212
6213def column_stack(tensors):
6214    """
6215    Stacks 1-D tensors as columns into a 2-D tensor. Tensors of other dimension are stacked as-is,
6216    like :func:`mindspore.ops.hstack`.
6217
6218    Args:
6219        tensors (Union[tuple[Tensor], list[Tensor]]): A sequence of tensors. All
6220            of them must have the same shape except the axis to be concatenated.
6221
6222    Returns:
6223        2-D Tensor, formed by stacking the given tensors.
6224
6225    Raises:
6226        TypeError: If `tensors` is not list or tuple.
6227        TypeError: If element in `tensors` is not Tensor.
6228        ValueError: If `tensors` is empty.
6229
6230    Supported Platforms:
6231        ``Ascend`` ``GPU`` ``CPU``
6232
6233    Examples:
6234        >>> from mindspore import Tensor, ops
6235        >>> x1 = Tensor([1, 1, 1])
6236        >>> x2 = Tensor([2, 2, 2])
6237        >>> output = ops.column_stack((x1, x2))
6238        >>> print(output)
6239        [[1 2]
6240         [1 2]
6241         [1 2]]
6242    """
6243    if not isinstance(tensors, (list, tuple)):
6244        raise TypeError(f"For column_stack, the input must be list or tuple of tensors, but got {type(tensors)}.")
6245
6246    trans_x = ()
6247    for tensor in tensors:
6248        if not isinstance(tensor, Tensor):
6249            raise TypeError(f"For column_stack, the input element must be tensor, but got {type(tensor)}.")
6250        if tensor.ndim < 1:
6251            tensor = expand_dims(tensor, 0)
6252        if tensor.ndim == 1:
6253            tensor = expand_dims(tensor, 1)
6254        trans_x += (tensor,)
6255    if not trans_x:
6256        raise ValueError(f"For column_stack, the input must have at least 1 tensor, but got 0.")
6257    _concat = _get_cache_prim(P.Concat)(1)
6258    return _concat(trans_x)
6259
6260
6261def hstack(tensors):
6262    """
6263    Stacks tensors in sequence horizontally.
6264    This is equivalent to concatenation along the second axis, except for 1-D tensors
6265    where it concatenates along the first axis.
6266
6267    Args:
6268        tensors (Union[tuple[Tensor], list[Tensor]]): A sequence of tensors. The
6269            tensors must have the same shape along all but the second axis, except
6270            1-D tensors which can be any length.
6271
6272    Returns:
6273        Stacked Tensor, formed by stacking the given tensors.
6274
6275    Raises:
6276        TypeError: If `tensors` is not list or tuple.
6277        TypeError: If element in `tensors` is not Tensor.
6278        ValueError: If `tensors` is empty.
6279
6280    Supported Platforms:
6281        ``Ascend`` ``GPU`` ``CPU``
6282
6283    Examples:
6284        >>> from mindspore import Tensor, ops
6285        >>> x1 = Tensor([1, 1, 1])
6286        >>> x2 = Tensor([2, 2, 2])
6287        >>> output = ops.hstack((x1, x2))
6288        >>> print(output)
6289        [1. 1. 1. 2. 2. 2.]
6290    """
6291    if not isinstance(tensors, (list, tuple)):
6292        raise TypeError(f"For hstack, the input must be list or tuple, but got {type(tensors)}.")
6293
6294    tuple_of_tensor = ()
6295    for tensor in tensors:
6296        if not isinstance(tensor, Tensor):
6297            raise TypeError(f"For hstack, the input element must be tensor, but got {type(tensor)}.")
6298        if tensor.ndim < 1:
6299            tensor = expand_dims(tensor, 0)
6300        tuple_of_tensor += (tensor,)
6301    if not tuple_of_tensor:
6302        raise ValueError("For hstack, the input must have at least 1 tensor, but got 0.")
6303    if tuple_of_tensor[0].ndim <= 1:
6304        _concat = _get_cache_prim(P.Concat)(0)
6305        return _concat(tuple_of_tensor)
6306    _concat = _get_cache_prim(P.Concat)(1)
6307    return _concat(tuple_of_tensor)
6308
6309
6310@constexpr
6311def _check_axis_valid(axis, ndim):
6312    """
6313    Checks axis are valid given ndim, and returns axis that can be passed
6314    to the built-in operator (non-negative, int or tuple).
6315    """
6316    if axis is None:
6317        axis = ops.make_range(ndim)
6318        return axis
6319    if isinstance(axis, (tuple, list)):
6320        axis = tuple(map(lambda x: _check_check_axis_in_range(x, ndim), axis))
6321        return axis
6322    return (_check_check_axis_in_range(axis, ndim),)
6323
6324
6325@constexpr
6326def _get_moved_perm(ndim, source, destination):
6327    """
6328    Helper function for movedim, returns permutation after moving axis
6329    from source to destination.
6330    """
6331    dest_sorted_idx = [i for i, _ in sorted(enumerate(destination), key=operator.itemgetter(1))]
6332    axis_orig = [i for i in builtins.range(0, ndim) if i not in source]
6333
6334    k = 0
6335    m = 0
6336    perm = []
6337    for i in dest_sorted_idx:
6338        # inserts an axis that has been moved, denoted by n, and axis that remain
6339        # in their original position, indexed from k to k + n - m, into index m in
6340        # the list of permuted axis
6341        n = destination[i]
6342        j = k + n - m
6343        perm += axis_orig[k:j]
6344        perm.append(source[i])
6345        k += n - m
6346        m = n + 1
6347    perm += axis_orig[k:]
6348    return tuple(perm)
6349
6350
6351def movedim(x, source, destination):
6352    """
6353    Moves axis of an array from source to destination.
6354
6355    Other axis remain in their original order.
6356
6357    Args:
6358        x (Tensor): The tensor array whose axis should be reordered.
6359            The dimension of `x` must not be 0.
6360        source (Union[int, sequence[int]]): Original positions of the
6361            axis to move. The length of `source` and `destination` must be the same.
6362        destination (Union[int, sequence[int]]): Destination positions
6363            for each of the original axis. The length of `source` and `destination` must be the same.
6364
6365    Returns:
6366        Tensor, array with moved axis.
6367
6368    Raises:
6369        ValueError: If axis are out of the range of `[-x.ndim, x.ndim)`, or
6370            if the axis contain duplicates.
6371
6372    Supported Platforms:
6373        ``Ascend`` ``GPU`` ``CPU``
6374
6375    Examples:
6376        >>> # case1 : moving single axis
6377        >>> from mindspore import ops, Tensor
6378        >>> import numpy as np
6379        >>> x = Tensor(np.zeros((3, 4, 5)))
6380        >>> output = ops.movedim(x, 0, -1)
6381        >>> print(output.shape)
6382        (4, 5, 3)
6383        >>> # case 2 : moving multiple axes
6384        >>> from mindspore import ops, Tensor
6385        >>> import numpy as np
6386        >>> x = Tensor(np.zeros((3, 4, 5)))
6387        >>> output = ops.movedim(x, (0, 2), (1, 2))
6388        >>> print(output.shape)
6389        (4, 3, 5)
6390    """
6391    ndim = ops.rank(x)
6392    source = _check_axis_valid(source, ndim)
6393    destination = _check_axis_valid(destination, ndim)
6394    if len(source) != len(destination):
6395        raise ValueError(
6396            f"For `source` and `destination` arguments, the number of elements must be the same, but got 'source':"
6397            f" {len(source)} and 'destination': {len(destination)}.")
6398    perm = _get_moved_perm(ndim, source, destination)
6399    return transpose_(x, perm)
6400
6401
6402def moveaxis(x, source, destination):
6403    """
6404    Alias for `ops.movedim`. Moves axis of an array from source to destination.
6405
6406    Refer to :func:`mindspore.ops.movedim` for more detail.
6407
6408    Supported Platforms:
6409        ``Ascend`` ``GPU`` ``CPU``
6410
6411    Examples:
6412        >>> from mindspore import ops, Tensor
6413        >>> import numpy as np
6414        >>> x = Tensor(np.zeros((3, 4, 5)))
6415        >>> output = ops.moveaxis(x, 0, -1)
6416        >>> print(output.shape)
6417        (4, 5, 3)
6418    """
6419
6420    return movedim(x, source, destination)
6421
6422
6423@_primexpr
6424def _check_swapaxes_axis(axes, ndim):
6425    return validator.check_swapaxes_axis(axes, ndim)
6426
6427
6428def swapaxes(input, axis0, axis1):
6429    '''
6430    Interchange two axes of a tensor.
6431
6432    Args:
6433        input(Tensor): Input tensor.
6434        axis0 (int): First axis.
6435        axis1 (int): Second axis.
6436
6437    Returns:
6438        Transposed tensor, has the same data type as `input`.
6439
6440    Raises:
6441        TypeError: If argument `input` is not Tensor.
6442        TypeError: If `axis0` or `axis1` is not integer.
6443        ValueError: If `axis0` or `axis1` is not in the range of :math:`[-ndim, ndim-1]`.
6444
6445    Supported Platforms:
6446        ``Ascend`` ``GPU`` ``CPU``
6447
6448    Examples:
6449        >>> import numpy as np
6450        >>> from mindspore import ops
6451        >>> from mindspore import Tensor
6452        >>> input = Tensor(np.ones((2,3,4), dtype=np.float32))
6453        >>> output = ops.swapaxes(input, 0, 2)
6454        >>> print(output.shape)
6455        (4, 3, 2)
6456    '''
6457    if not isinstance(input, Tensor):
6458        raise TypeError(f'For ops.swapaxes, parameter `input` must be Tensor, but got {type(input)}')
6459
6460    axis0, axis1 = _check_swapaxes_axis((axis0, axis1), input.ndim)
6461    if axis0 == axis1:
6462        return input
6463    if axis0 > axis1:
6464        axis0, axis1 = axis1, axis0
6465
6466    perm = ops.make_range(0, input.ndim)
6467    if axis1 + 1 < input.ndim:
6468        new_perm = perm[0:axis0] + perm[axis1:axis1 + 1] + \
6469                   perm[axis0 + 1:axis1] + perm[axis0:axis0 + 1] + perm[axis1 + 1:]
6470    else:
6471        new_perm = perm[0:axis0] + perm[axis1:axis1 + 1] + \
6472                   perm[axis0 + 1:axis1] + perm[axis0:axis0 + 1]
6473
6474    return transpose_(input, new_perm)
6475
6476
6477def swapdims(input, dim0, dim1):
6478    '''
6479    Interchange two dims of a tensor.
6480    This function is equivalent to :func:`mindspore.ops.swapaxes` function.
6481
6482    Args:
6483        input(Tensor): Input tensor.
6484        dim0 (int): First dim.
6485        dim1 (int): Second dim.
6486
6487    Returns:
6488        Transposed tensor, has the same data type as `input`.
6489
6490    Raises:
6491        TypeError: If argument `input` is not Tensor.
6492        TypeError: If `dim0` or `dim1` is not integer.
6493        ValueError: If `dim0` or `dim1` is not in the range of :math:`[-ndim, ndim-1]`.
6494
6495    Supported Platforms:
6496        ``Ascend`` ``GPU`` ``CPU``
6497
6498    Examples:
6499        >>> import numpy as np
6500        >>> from mindspore import ops
6501        >>> from mindspore import Tensor
6502        >>> input = Tensor(np.ones((2,3,4), dtype=np.float32))
6503        >>> output = ops.swapdims(input, 0, 2)
6504        >>> print(output.shape)
6505        (4, 3, 2)
6506    '''
6507    return ops.swapaxes(input, dim0, dim1)
6508
6509
6510@constexpr
6511def _check_is_int(arg_value, arg_name, op_name):
6512    arg_value = validator.check_is_int(arg_value, arg_name, op_name)
6513    return arg_value
6514
6515
6516@_primexpr
6517def _check_positive_int(arg_value, arg_name, op_name):
6518    arg_value = validator.check_int_range(arg_value, 0, 2147483647, validator.INC_RIGHT, arg_name, op_name)
6519    return arg_value
6520
6521
6522@constexpr
6523def _check_axis_range(arg_value, limit, arg_name, op_name):
6524    arg_value = validator.check_int_range(arg_value, -limit, limit, validator.INC_LEFT, arg_name, op_name)
6525    return arg_value
6526
6527
6528@_primexpr
6529def _cal_repeat_dims(x_rank, rep, expand_axis):
6530    rep_dims = [1] * (x_rank + 1)
6531    rep_dims[expand_axis] = rep
6532    return tuple(rep_dims)
6533
6534
6535@_primexpr
6536def _cal_reshape(x_shape, rep, axis):
6537    x_reshape = list(x_shape)
6538    x_reshape[axis] *= rep
6539    return tuple(x_reshape)
6540
6541
6542def repeat_interleave(input, repeats, axis=None):
6543    """
6544    Repeat elements of a tensor along an axis, like `numpy.repeat`.
6545
6546    Args:
6547        input (Tensor): The tensor to repeat values for. Must be of type: float16,
6548            float32, int8, uint8, int16, int32, or int64.
6549        repeats (Union[int, tuple, list, Tensor]): The number of times to repeat, must be positive.
6550        axis (int, optional): The axis along which to repeat, Default: ``None``. if dims is None,
6551            the input Tensor will be flattened and the output will alse be flattened.
6552
6553    Returns:
6554        One tensor with values repeated along the specified axis. If input has shape
6555        :math:`(s1, s2, ..., sn)` and axis is i, the output will have shape :math:`(s1, s2, ...,
6556        si * repeats, ..., sn)`. The output type will be the same as the type of `input`.
6557
6558    Supported Platforms:
6559        ``Ascend`` ``GPU`` ``CPU``
6560
6561    Examples:
6562        >>> import mindspore
6563        >>> import numpy as np
6564        >>> from mindspore import Tensor, ops
6565        >>> input = Tensor(np.array([[0, 1, 2], [3, 4, 5]]), mindspore.int32)
6566        >>> output = ops.repeat_interleave(input, repeats=2, axis=0)
6567        >>> print(output)
6568        [[0 1 2]
6569         [0 1 2]
6570         [3 4 5]
6571         [3 4 5]]
6572    """
6573    if axis is None:
6574        input = input.reshape(-1)
6575        axis = 0
6576    if isinstance(repeats, Tensor):
6577        repeats = TensorToList()(repeats)
6578    output = input.repeat(repeats, axis)
6579    return output
6580
6581
6582def repeat_interleave_ext(input, repeats, dim=None, output_size=None):
6583    r"""
6584    Repeat elements of a tensor along an axis, like `numpy.repeat`.
6585
6586    Args:
6587        input (Tensor): The tensor to repeat values for. Must be of type: float16,
6588            float32, int8, uint8, int16, int32, or int64.
6589        repeats (Union[int, tuple, list, Tensor]): The number of times to repeat, must be positive.
6590        dim (int, optional): The dim along which to repeat, Default: ``None``. if dims is None,
6591            the input Tensor will be flattened and the output will alse be flattened.
6592        output_size (int, optional): Total output size for the given axis (e.g. sum of repeats),
6593            Default: ``None``.
6594
6595    Returns:
6596        One tensor with values repeated along the specified dim. If input has shape
6597        :math:`(s1, s2, ..., sn)` and dim is i, the output will have shape :math:`(s1, s2, ...,
6598        si * repeats, ..., sn)`. The output type will be the same as the type of `input`.
6599
6600    Supported Platforms:
6601        ``Ascend``
6602
6603    Examples:
6604        >>> import mindspore
6605        >>> import numpy as np
6606        >>> from mindspore import Tensor, ops
6607        >>> input = Tensor(np.array([[0, 1, 2], [3, 4, 5]]), mindspore.int32)
6608        >>> output = ops.function.array_func.repeat_interleave_ext(input, repeats=2, dim=0)
6609        >>> print(output)
6610        [[0 1 2]
6611         [0 1 2]
6612         [3 4 5]
6613         [3 4 5]]
6614    """
6615    if isinstance(repeats, int):
6616        return repeat_interleave_int_(input, repeats, dim, output_size)
6617    return repeat_interleave_tensor_(input, repeats, dim, output_size)
6618
6619
6620def repeat_elements(x, rep, axis=0):
6621    """
6622    Repeat elements of a tensor along an axis, like `numpy.repeat` .
6623
6624    Args:
6625        x (Tensor): The tensor to repeat values for. Must be of type: float16,
6626            float32, int8, uint8, int16, int32, or int64.
6627        rep (int): The number of times to repeat, must be positive.
6628        axis (int): The axis along which to repeat. Default: 0.
6629
6630    Returns:
6631        One tensor with values repeated along the specified axis. If x has shape
6632        :math:`(s1, s2, ..., sn)` and axis is i, the output will have shape :math:`(s1, s2, ..., si * rep, ..., sn)`.
6633        The output type will be the same as the type of `x`.
6634
6635    Supported Platforms:
6636        ``Ascend`` ``GPU`` ``CPU``
6637
6638    Examples:
6639        >>> import mindspore
6640        >>> import numpy as np
6641        >>> from mindspore import Tensor, ops
6642        >>> # case 1 : repeat on axis 0
6643        >>> x = Tensor(np.array([[0, 1, 2], [3, 4, 5]]), mindspore.int32)
6644        >>> output = ops.repeat_elements(x, rep = 2, axis = 0)
6645        >>> print(output)
6646        [[0 1 2]
6647         [0 1 2]
6648         [3 4 5]
6649         [3 4 5]]
6650        >>> # case 2 : repeat on axis 1
6651        >>> x = Tensor(np.array([[0, 1, 2], [3, 4, 5]]), mindspore.int32)
6652        >>> output = ops.repeat_elements(x, rep = 2, axis = 1)
6653        >>> print(output)
6654        [[0 0 1 1 2 2]
6655         [3 3 4 4 5 5]]
6656    """
6657    const_utils.check_type_valid(ops.dtype(x), mstype.number_type, 'input x')
6658    rep = _check_positive_int(rep, "rep", "repeat_elements")
6659    axis = _check_is_int(axis, "axis", "repeat_elements")
6660    x_rank = rank_(x)
6661    axis = _check_axis_range(axis, x_rank, "axis", "repeat_elements")
6662    axis = axis + x.ndim if axis < 0 else axis
6663    expand_axis = axis + 1
6664    x_expand = expand_dims(x, expand_axis)
6665    rep_dims = _cal_repeat_dims(x_rank, rep, expand_axis)
6666    x_expand = tile_(x_expand, rep_dims)
6667    x_shape = shape_(x)
6668    x_reshape = _cal_reshape(x_shape, rep, axis)
6669    x_rep = reshape_(x_expand, x_reshape)
6670    return x_rep
6671
6672
6673def sequence_mask(lengths, maxlen=None):
6674    """
6675    Returns a mask tensor representing the first N positions of each cell.
6676
6677    If `lengths` has shape :math:`(d_1, d_2, ..., d_n)`, then the resulting tensor mask has type and shape
6678    :math:`(d_1, d_2, ..., d_n, maxlen)`, with mask :math:`[i_1, i_2, ..., i_n, j] = (j < lengths[i_1, i_2, ..., i_n])`.
6679
6680    Args:
6681        lengths (Tensor): Tensor to calculate the mask for. All values in this tensor should be
6682            less than or equal to `maxlen`. Values greater than `maxlen` will be treated as `maxlen`.
6683        maxlen (int): size of the last dimension of returned tensor. Must be positive and same
6684            type as elements in `lengths`. Default is ``None`` .
6685
6686    Returns:
6687        One mask tensor of shape `lengths.shape + (maxlen,)` .
6688
6689    Raises:
6690        TypeError: If `lengths` is not a Tensor.
6691        TypeError: If `maxlen` is not an int.
6692        TypeError: If dtype of `lengths` is neither int32 nor int64.
6693
6694    Supported Platforms:
6695        ``GPU`` ``CPU``
6696
6697    Examples:
6698        >>> import numpy as np
6699        >>> from mindspore import Tensor, ops
6700        >>> # case 1: When maxlen is assigned
6701        >>> x = Tensor(np.array([1, 2, 3, 4]))
6702        >>> output = ops.sequence_mask(x, 5)
6703        >>> print(output)
6704        [[ True False False False False]
6705         [ True  True False False False]
6706         [ True  True  True False False]
6707         [ True  True  True  True False]]
6708        >>> # case 2: When there is 0 in x
6709        >>> x = Tensor(np.array([[1, 3], [2, 0]]))
6710        >>> output = ops.sequence_mask(x, 5)
6711        >>> print(output)
6712        [[[ True False False False False]
6713          [ True  True  True False False]]
6714         [[ True  True False False False]
6715          [False False False False False]]]
6716        >>> # case 3: when the maxlen is not assigned
6717        >>> x = Tensor(np.array([[1, 3], [2, 4]]))
6718        >>> output = ops.sequence_mask(x)
6719        >>> print(output)
6720        [[[ True False False False ]
6721          [ True  True  True False ]]
6722         [[ True  True False False ]
6723          [ True  True  True  True ]]]
6724    """
6725    const_utils.check_type_valid(ops.dtype(lengths), [mstype.int64, mstype.int32], 'lengths')
6726
6727    if maxlen is None:
6728        flatten_data = reshape_(lengths, (-1,))
6729        flatten_data = cast_(flatten_data, mstype.float32)
6730        _, value = arg_max_with_value_(flatten_data)
6731        maxlen = cast_(value, mstype.int32)
6732    else:
6733        maxlen = _check_positive_int(maxlen, "maxlen", "sequence_mask")
6734        maxlen = scalar_to_tensor_(maxlen, mstype.int32)
6735
6736    range_vector = range_(scalar_to_tensor_(0, mstype.int32), maxlen, scalar_to_tensor_(1, mstype.int32))
6737    mask = expand_dims(lengths, -1)
6738    result = range_vector < mask
6739    return result
6740
6741
6742def top_k(input_x, k, sorted=True):
6743    r"""
6744    `top_k` is deprecated, please use `ops.topk` instead.
6745    """
6746    top_k_ = _get_cache_prim(P.TopK)(sorted)
6747    return top_k_(input_x, k)
6748
6749
6750__all__ = [
6751    'unique',
6752    'unique_with_pad',
6753    'unique_consecutive',
6754    'eye',
6755    'matrix_band_part',
6756    'padding',
6757    'fill',
6758    'fills',
6759    'tile',
6760    'size',
6761    'ger',
6762    'ones',
6763    'ones_like',
6764    'zeros',
6765    'zeros_like',
6766    'shape',
6767    'shape_',
6768    'reverse',
6769    'reverse_sequence',
6770    'hamming_window',
6771    'chunk',
6772    'full',
6773    'full_like',
6774    'dyn_shape',
6775    'rank',
6776    'arange',
6777    'range',
6778    'reshape',
6779    'reshape_',
6780    'flatten',
6781    'tensor_slice',
6782    'strided_slice',
6783    'slice',
6784    'slice_scatter',
6785    'select_scatter',
6786    'cat',
6787    'concat',
6788    'stack',
6789    'unbind',
6790    'unstack',
6791    'is_tensor',
6792    'scalar_cast',
6793    'scalar_to_array',
6794    'scalar_to_tensor',
6795    'space_to_batch_nd',
6796    'batch_to_space_nd',
6797    'tuple_to_array',
6798    'expand_dims',
6799    'squeeze',
6800    'unsqueeze',
6801    'transpose',
6802    'scatter_nd',
6803    'scatter_nd_add',
6804    'scatter_nd_sub',
6805    'scatter_nd_mul',
6806    'scatter_nd_div',
6807    'scatter_nd_max',
6808    'scatter_nd_min',
6809    'tensor_scatter_add',
6810    'tensor_scatter_sub',
6811    'tensor_scatter_mul',
6812    'tensor_scatter_div',
6813    'tensor_scatter_max',
6814    'tensor_scatter_min',
6815    'tensor_scatter_elements',
6816    'scatter',
6817    'unsorted_segment_min',
6818    'unsorted_segment_max',
6819    'unsorted_segment_prod',
6820    'gather',
6821    'gather_d',
6822    'gather_elements',
6823    'gather_nd',
6824    'one_hot',
6825    'masked_fill',
6826    'masked_select',
6827    'where',
6828    'narrow',
6829    'ravel',
6830    'scatter_add',
6831    'scatter_mul',
6832    'scatter_max',
6833    'scatter_min',
6834    'scatter_div',
6835    'scatter_update',
6836    'select',
6837    'tril',
6838    'triu',
6839    'nonzero',
6840    'is_nonzero',
6841    'matrix_diag',
6842    'matrix_diag_part',
6843    'matrix_set_diag',
6844    'diag',
6845    'diagflat',
6846    'meshgrid',
6847    'affine_grid',
6848    'meshgrid',
6849    'broadcast_to',
6850    'col2im',
6851    'split',
6852    'tensor_split',
6853    'vsplit',
6854    'hsplit',
6855    'dsplit',
6856    'index_fill',
6857    'index_select',
6858    'max',
6859    'argmax',
6860    'min',
6861    'unsorted_segment_sum',
6862    'population_count',
6863    'topk',
6864    'expand',
6865    'fold',
6866    'unfold',
6867    'diagonal',
6868    'diagonal_scatter',
6869    'lstsq',
6870    'mvlgamma',
6871    'swapaxes',
6872    'swapdims',
6873    'searchsorted',
6874    'argsort',
6875    'sequence_mask',
6876    'repeat_elements',
6877    'repeat_interleave',
6878    'argwhere',
6879    'column_stack',
6880    'hstack',
6881    'movedim',
6882    'moveaxis',
6883    'aminmax',
6884    'sort',
6885    'top_k',
6886    'deepcopy',
6887    'flip',
6888]
6889__all__.sort()
6890