• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2020-2023 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15
16"""Operators for array."""
17import copy
18import itertools
19import numbers
20
21import numpy as np
22
23from mindspore import log as logger
24from mindspore import context
25from mindspore.common.initializer import Zero
26from mindspore.ops import signature as sig
27from mindspore.ops._utils import get_broadcast_shape
28from mindspore.common._utils import is_shape_unknown, is_dim_unknown
29from mindspore.ops.primitive import Primitive, PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register, _run_op
30from mindspore import _checkparam as validator
31from mindspore._checkparam import _check_3d_int_or_tuple
32from mindspore.common import dtype as mstype
33from mindspore.common._decorator import deprecated
34from mindspore.common import Tensor, CSRTensor, COOTensor
35from mindspore._c_expression import Tensor as Tensor_
36from mindspore._c_expression import CSRTensor as CSRTensor_
37from mindspore._c_expression import COOTensor as COOTensor_
38from ..auto_generate import (ExpandDims, Reshape, TensorShape, Transpose, Gather,
39                             OnesLike, ZerosLike, Argmax, ArgMaxExt,
40                             ReverseV2, Diag, Eye, ScatterNd, ResizeNearestNeighborV2,
41                             GatherNd, GatherD, Range, MaskedFill, RightShift, NonZero,
42                             ResizeNearestNeighbor, Identity, Split, CumSum, CumProd,
43                             Cummax, Cummin, Argmin, Concat, UnsortedSegmentSum, ScalarToTensor,
44                             Triu, BroadcastTo, StridedSlice, Select, TopkExt, SearchSorted)
45from .manually_defined import Rank, Shape, Tile, Cast, Ones, Zeros
46from ..auto_generate import ArgMaxWithValue, ArgMinWithValue
47
48class _ScatterOp(PrimitiveWithInfer):
49    """
50    Defines Scatter operators
51    """
52    __mindspore_signature__ = (
53        sig.make_sig('x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
54        sig.make_sig('indices', dtype=sig.sig_dtype.T1),
55        sig.make_sig('updates', dtype=sig.sig_dtype.T)
56    )
57
58    def _check_scatter_shape(self, x_shape, indices_shape, updates_shape, prim_name):
59        if indices_shape != [-1] and updates_shape and updates_shape != indices_shape + x_shape[1:]:
60            raise ValueError(f"For '{prim_name}', "
61                             f"updates_shape = indices_shape + input_x_shape[1:], but got input_x_shape: {x_shape}, "
62                             f"indices_shape: {indices_shape}, updates_shape: {updates_shape}.")
63
64    @prim_attr_register
65    def __init__(self, use_locking=False):
66        """Initialize _ScatterOp"""
67        validator.check_value_type('use_locking', use_locking, [bool], self.name)
68        self.init_prim_io_names(inputs=['x', 'indices', 'updates'], outputs=['y'])
69        self.add_prim_attr('side_effect_mem', True)
70
71    def infer_shape(self, x_shape, indices_shape, updates_shape):
72        self._check_scatter_shape(x_shape, indices_shape, updates_shape, self.name)
73        return x_shape
74
75    def infer_dtype(self, x_dtype, indices_dtype, updates_dtype):
76        validator.check_tensor_dtype_valid('indices', indices_dtype, [mstype.int32], self.name)
77        args = {"x": x_dtype, "updates": updates_dtype}
78        validator.check_tensors_dtypes_same_and_valid(args, mstype.number_type, self.name)
79        return x_dtype
80
81
82class UnravelIndex(Primitive):
83    """
84    Transforms an array consisting of flattened indices into a tuple that contains coordinate arrays.
85
86    Inputs:
87        - **indices** (Tensor) - The input Tensor, containing indices that will be transformed
88          into the flattened form of an array with dimensions specified by `dims`.
89          The dimension of `indices` must be 0-D or 1-D.
90          Must be one of the following types: int32, int64.
91        - **dims** (Tensor) - The shape of the array to use for unraveling indices.
92          The dimension of `dims` must be 1-D. Must have the same type as `indices`.
93
94    Outputs:
95        - **y** (Tensor) - Tensor, it should be 2-D or 1-D(if `indices` is 0D)
96          and has the same type as `indices`.
97
98    Raises:
99        TypeError: If the data type of `indices` and `dims` are different.
100        TypeError: If the data type of `indices` and `dims` is not int32 or int64.
101        ValueError: If the dimension of `dims` is not 1 or dimension of `indices` is not 1 or 0.
102        ValueError: If `indices` contains negative elements.
103
104    Supported Platforms:
105        ``Ascend`` ``GPU`` ``CPU``
106
107    Examples:
108        >>> indices = Tensor(np.array([2, 5]), mindspore.int32)
109        >>> dims = Tensor(np.array([3, 3]), mindspore.int32)
110        >>> output = ops.UnravelIndex()(indices, dims)
111        >>> print(output)
112        [[0 2]
113         [1 2]]
114    """
115
116    @prim_attr_register
117    def __init__(self):
118        """Initialize Shape"""
119
120
121class _ScatterOpDynamic(PrimitiveWithCheck):
122    """
123    Defines Scatter operators with dynamic shape
124    """
125    __mindspore_signature__ = (
126        sig.make_sig('x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
127        sig.make_sig('indices', dtype=sig.sig_dtype.T1),
128        sig.make_sig('updates', dtype=sig.sig_dtype.T)
129    )
130
131    def _check_scatter_shape(self, x_shape, indices_shape, updates_shape, prim_name):
132        # x_shape cannot be dynamic
133        if np.any(np.array(x_shape) == -1):
134            raise ValueError(f"For '{prim_name}', the 'input_x' does not support dynamic shape, "
135                             f"but got the shape of 'input_x' is {x_shape}.")
136        # support indices and updates dynamic
137        if is_shape_unknown(indices_shape) or is_shape_unknown(updates_shape):
138            pass
139        elif indices_shape != [-1] and updates_shape and updates_shape != indices_shape + x_shape[1:]:
140            raise ValueError(f"For '{prim_name}', "
141                             f"updates_shape = indices_shape + input_x_shape[1:], but got input_x_shape: {x_shape}, "
142                             f"indices_shape: {indices_shape}, updates_shape: {updates_shape}.")
143
144    @prim_attr_register
145    def __init__(self, use_locking=False):
146        """Initialize _ScatterOpDynamic"""
147        validator.check_value_type('use_locking', use_locking, [bool], self.name)
148        self.init_prim_io_names(inputs=['x', 'indices', 'updates'], outputs=['y'])
149        self.add_prim_attr('side_effect_mem', True)
150
151    def check_shape(self, x_shape, indices_shape, updates_shape):
152        self._check_scatter_shape(x_shape, indices_shape, updates_shape, self.name)
153
154    def check_dtype(self, x_dtype, indices_dtype, updates_dtype):
155        validator.check_tensor_dtype_valid('indices', indices_dtype, [mstype.int32, mstype.int64], self.name)
156        args = {"x": x_dtype, "updates": updates_dtype}
157        validator.check_tensors_dtypes_same_and_valid(args, mstype.number_type, self.name)
158
159
160class _ScatterNdOp(_ScatterOp):
161    """
162    Defines _ScatterNd operators
163    """
164
165    def _check_scatter_shape(self, x_shape, indices_shape, updates_shape, prim_name):
166        validator.check('the dimension of x', len(x_shape),
167                        'the dimension of indices', indices_shape[-1], validator.GE)
168        if indices_shape[:-1] + x_shape[indices_shape[-1]:] != updates_shape:
169            raise ValueError(f"For '{prim_name}', updates_shape = "
170                             f"indices_shape[:-1] + x_shape[indices_shape[-1]:], but got x_shape: {x_shape}, "
171                             f"indices_shape: {indices_shape}, updates_shape: {updates_shape}.")
172
173
174def _check_infer_attr_reduce(axis, keep_dims, prim_name):
175    validator.check_value_type('keep_dims', keep_dims, [bool], prim_name)
176    validator.check_value_type('axis', axis, [int, tuple], prim_name)
177    if isinstance(axis, tuple):
178        for index, value in enumerate(axis):
179            validator.check_value_type('axis[%d]' % index, value, [int], prim_name)
180
181
182class Expand(Primitive):
183    """
184    :class:`mindspore.ops.Expand` will be deprecated in the future.
185    Please use :class:`mindspore.ops.BroadcastTo` instead.
186    """
187
188    @deprecated("2.1", "BroadcastTo", False)
189    @prim_attr_register
190    def __init__(self):
191        """Initialize Expand."""
192        self.add_prim_attr("max_length", 1000000)
193        self.init_prim_io_names(inputs=['x', 'shape'], outputs=['y'])
194
195
196class DType(Primitive):
197    """
198    Returns the data type of the input tensor as mindspore.dtype.
199
200    Inputs:
201        - **input_x** (Tensor) - Input Tensor.
202
203    Outputs:
204        mindspore.dtype, the data type of a tensor.
205
206    Raises:
207        TypeError: If `input_x` is not a Tensor.
208
209    Supported Platforms:
210        ``Ascend`` ``GPU`` ``CPU``
211
212    Examples:
213        >>> import mindspore
214        >>> import numpy as np
215        >>> from mindspore import Tensor, ops
216        >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
217        >>> output = ops.DType()(input_tensor)
218        >>> print(output)
219        Float32
220    """
221
222    @prim_attr_register
223    def __init__(self):
224        """Initialize DType"""
225
226    def __call__(self, x):
227        if not isinstance(x, (Tensor, CSRTensor, COOTensor, Tensor_, CSRTensor_, COOTensor_)):
228            raise TypeError("For Primitive[Dtype], the input argument[input_x] "
229                            "must be a Tensor, CSRTensor or COOTensor, but got " + str(type(x)) + ".")
230        return x.dtype
231
232
233class CheckNumerics(Primitive):
234    """
235    Checks a tensor for NaN and Inf values. A runtime error is raised if input has NaN or Inf values.
236
237    Inputs:
238        - **x** (Tensor) - Input Tensor of any dimension. The data type is float16, float32 or float64.
239
240    Outputs:
241        Tensor, has the same shape and data type as `x` if `x` has no NaN or Inf values.
242
243    Raises:
244        TypeError: If `x` data type is not float16, float32, float64.
245        RuntimeError: If `x` has NaN or Inf values.
246
247    Supported Platforms:
248        ``Ascend`` ``GPU`` ``CPU``
249
250    Examples:
251        >>> x = Tensor(np.array([[1, 3], [2, 4]], dtype=np.float32))
252        >>> checknumerics = ops.CheckNumerics()
253        >>> output = checknumerics(x)
254        >>> print(output)
255        [[1. 3.]
256         [2. 4.]]
257    """
258
259    @prim_attr_register
260    def __init__(self):
261        """init CheckNumerics"""
262        self.init_prim_io_names(inputs=['x'], outputs=['y'])
263
264
265class Im2Col(Primitive):
266    r"""
267    Extracts sliding local blocks from a batched input tensor.
268
269    Consider a batched input tensor of shape :math:`(N, C, *)`,
270    where :math:`N` is the batch dimension, :math:`C` is the channel dimension,
271    and :math:`*` represent arbitrary spatial dimensions. This operation flattens
272    each sliding `ksizes`- sized block within the spatial dimensions
273    of input `x` into a column (i.e., last dimension) of a 4-D output
274    tensor of shape :math:`(N, C, \prod(\text{kernel_size}), L)`, where
275    :math:`C \times \prod(\text{kernel_size})` is the total number of values
276    within each block (a block has :math:`\prod(\text{kernel_size})` spatial
277    locations each containing a `C`-channeled vector), and :math:`L` is
278    the total number of such blocks:
279
280    .. math::
281        L = \prod_d \left\lfloor\frac{\text{spatial_size}[d] + 2 \times \text{pads}[d] %
282            - \text{dilations}[d] \times (\text{kernel_size}[d] - 1) - 1}{\text{strides}[d]} + 1\right\rfloor,
283
284    where :math:`\text{spatial_size}` is formed by the spatial dimensions
285    of input `x` (:math:`*` above), and :math:`d` is over all spatial
286    dimensions.
287
288    Therefore, indexing `output` at the last dimension (column dimension)
289    gives all values within a certain block.
290
291    The `pads`, `strides` and `dilations` arguments specify
292    how the sliding blocks are retrieved.
293
294    Note:
295        Currently, only 4-D input tensors (batched image-like tensors) are supported.
296
297    .. warning::
298        This is an experimental API that is subject to change or deletion.
299
300    Args:
301        ksizes (Union[int, tuple[int], list[int]]): The size of the kernel, should be two int
302            for height and width. If type is int, it means that height equal with width. Must be specified.
303        strides (Union[int, tuple[int], list[int]], optional): The stride of the window, should be two int
304            for height and width. If type is int, it means that height equal with width. Default: ``1`` .
305        dilations (Union[int, tuple[int], list[int]], optional): The dilation of the window, should be two int
306            for height and width. If type is int, it means that height equal with width. Default: ``1`` .
307
308        pads (Union[int, tuple[int], list[int]], optional): The pad of the window, that must be a tuple of
309            one or two `int` for height and width. Default: ``0`` .
310
311            - If one int, :math:`pad\_height = pad\_width`.
312            - If two int, :math:`pad\_height = pads[0]`, :math:`pad\_width = pads[1]`.
313
314    Inputs:
315        - **x** (Tensor) - input tensor, only 4-D input tensors (batched image-like tensors) are supported.
316
317    Outputs:
318        Tensor, a 4-D Tensor with same type of input `x`.
319
320    Raises:
321        TypeError: If `ksizes` data type is not in Union[int, tuple[int], list[int]].
322        TypeError: If `strides` data type is not in Union[int, tuple[int], list[int]].
323        TypeError: If `dilations` data type is not in Union[int, tuple[int], list[int]].
324        TypeError: If `pads` data type isnot in Union[int, tuple[int], list[int]].
325        ValueError: If `ksizes` value is not greater than zero or elements number more than 2.
326        ValueError: If `strides` value is not greater than zero or elements number more than 2.
327        ValueError: If `dilations` value is not greater than zero or elements number more than 2.
328        ValueError: If `pads` value is not greater than zero.
329
330    Supported Platforms:
331        ``Ascend`` ``GPU`` ``CPU``
332
333    Examples:
334        >>> import numpy as np
335        >>> from mindspore import Tensor, ops
336        >>> from mindspore import dtype as mstype
337        >>> x = Tensor(input_data=np.random.rand(4, 4, 32, 32), dtype=mstype.float64)
338        >>> im2col = ops.Im2Col(ksizes=3, strides=1, dilations=1)
339        >>> y = im2col(x)
340        >>> print(y.shape)
341        (4, 4, 9, 900)
342    """
343
344    @prim_attr_register
345    def __init__(self, ksizes, strides=1, dilations=1, pads=0):
346        """Initialize Im2Col."""
347        self.init_prim_io_names(inputs=['x'], outputs=['y'])
348
349        validator.check_value_type('ksizes', ksizes, [int, tuple, list], self.name)
350        validator.check_value_type('strides', strides, [int, tuple, list], self.name)
351        validator.check_value_type('dilations', dilations, [int, tuple, list], self.name)
352        validator.check_value_type('pads', pads, [int, tuple, list], self.name)
353
354        self.ksizes = (ksizes, ksizes) if isinstance(ksizes, int) else ksizes
355        self.strides = (strides, strides) if isinstance(strides, int) else strides
356        self.dilations = (dilations, dilations) if isinstance(dilations, int) else dilations
357        self.pads = (pads, pads) if isinstance(pads, int) else pads
358
359        validator.check("ksizes size", len(self.ksizes), "", [1, 2], validator.IN, self.name)
360        validator.check_positive_int_sequence(self.ksizes, "ksizes", self.name)
361        validator.check("strides size", len(self.strides), "", [1, 2], validator.IN, self.name)
362        validator.check_positive_int_sequence(self.strides, "strides", self.name)
363        validator.check("dilations size", len(self.dilations), "", [1, 2], validator.IN, self.name)
364        validator.check_positive_int_sequence(self.dilations, "dilations", self.name)
365        validator.check("pads size", len(self.pads), "", [1, 2], validator.IN, self.name)
366        validator.check_non_negative_int_sequence(self.pads, "pads", self.name)
367
368        self.add_prim_attr('ksizes', self.ksizes)
369        self.add_prim_attr('strides', self.strides)
370        self.add_prim_attr('dilations', self.dilations)
371        self.add_prim_attr('pads', self.pads)
372        self.add_prim_attr('padding_mode', "CALCULATED")
373
374
375class Col2Im(Primitive):
376    r"""
377    Rearranges a row vector to an image. It is
378    usually used to reconstruct an image from a set of image patches(or sliding local blocks).
379
380    Consider an input Tensor of shape :math:`(N, C, \prod(\text{kernel_size}), L)`,
381    where :math:`N` is batch dimension, :math:`C` is channel dimension,
382    :math:`\prod(\text{kernel_size})` is the block size, and
383    :math:`L` is the total number of blocks. This operation combines these
384    local blocks into the large :attr:`output` tensor of
385    shape :math:`(N, C, \text{output_size}[0], \text{output_size}[1], \dots)`
386    by summing the overlapping values.
387
388    .. math::
389        L = \prod_d \left\lfloor\frac{\text{output_size}[d] + 2 \times \text{padding}[d] %
390            - \text{dilation}[d] \times (\text{kernel_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor
391
392    where :math:`d` is over all spatial dimensions. The `padding`, `stride`
393    and `dilation` arguments specify how the sliding blocks are retrieved.
394
395    .. warning::
396        This is an experimental API that is subject to change or deletion.
397
398    Args:
399        kernel_size (Union[int, tuple[int], list[int]]): The size of the kernel, should be two positive int
400            for height and width. If type is int, it means that height equal with width. Must be specified.
401        dilation (Union[int, tuple[int], list[int]], optional): The size of the dilation, should be two positive int
402            for height and width. If type is int, it means that height equal with width. Default: ``1`` .
403        padding (Union[int, tuple[int], list[int]], optional): The size of the padding, should be two int
404            for height and width. If type is int, it means that height equal with width. Default: ``0`` .
405        stride (Union[int, tuple[int], list[int]], optional): The size of the stride, should be two positive int
406            for height and width. If type is int, it means that height equal with width. Default: ``1`` .
407
408    Inputs:
409        - **x** (Tensor) - 4D input Tensor.
410        - **output_size** (Tensor) - 1D tensor with 2 elements of data type int32 or int64.
411
412    Outputs:
413        Tensor, a 4-D Tensor with same type of input `x`.
414
415    Raises:
416        TypeError: If dtype of `kernel_size` , `dilation` , `padding` or `stride` is not in
417                   Union[int, tuple[int], list[int]].
418        ValueError: If values in `kernel_size` , `dilation` , `padding` or `stride` are not greater than zero or any
419                    one of them has more than 2 elements.
420        ValueError: If x.shape[2] != kernel_size[0] * kernel_size[1].
421        ValueError: If x.shape[3] does not match the calculated number of sliding blocks.
422
423    Supported Platforms:
424        ``Ascend`` ``GPU`` ``CPU``
425
426    Examples:
427        >>> import numpy as np
428        >>> from mindspore import Tensor, ops
429        >>> from mindspore import dtype as mstype
430        >>> x = Tensor(input_data=np.random.rand(16, 16, 4, 25), dtype=mstype.float32)
431        >>> output_size = Tensor(input_data=[8, 8], dtype=mstype.int32)
432        >>> col2im = ops.Col2Im(kernel_size=[2, 2], dilation=[2, 2], padding=[2, 2], stride=[2, 2])
433        >>> y = col2im(x, output_size)
434        >>> print(y.shape)
435        (16, 16, 8, 8)
436    """
437
438    @prim_attr_register
439    def __init__(self, kernel_size, dilation=1, padding=0, stride=1):
440        """Initialize Col2Im."""
441        self.init_prim_io_names(inputs=['x', 'output_size'], outputs=['y'])
442        validator.check_value_type('kernel_size', kernel_size, [int, list, tuple], self.name)
443        validator.check_value_type('dilation', dilation, [int, list, tuple], self.name)
444        validator.check_value_type('padding', padding, [int, list, tuple], self.name)
445        validator.check_value_type('stride', stride, [int, list, tuple], self.name)
446
447        self.kernel_size = (kernel_size, kernel_size) if isinstance(kernel_size, int) else kernel_size
448        self.dilation = (dilation, dilation) if isinstance(dilation, int) else dilation
449        self.padding = (padding, padding) if isinstance(padding, int) else padding
450        self.stride = (stride, stride) if isinstance(stride, int) else stride
451
452        validator.check("kernel_size size", len(self.kernel_size), "", 2, validator.EQ, self.name)
453        validator.check_positive_int_sequence(self.kernel_size, "kernel_size", self.name)
454        validator.check("dilation size", len(self.dilation), "", 2, validator.EQ, self.name)
455        validator.check_positive_int_sequence(self.dilation, "dilation", self.name)
456        validator.check("padding size", len(self.padding), "", 2, validator.EQ, self.name)
457        validator.check_non_negative_int_sequence(self.padding, "padding", self.name)
458        validator.check("stride size", len(self.stride), "", 2, validator.EQ, self.name)
459        validator.check_positive_int_sequence(self.stride, "stride", self.name)
460
461        self.add_prim_attr('kernel_size', self.kernel_size)
462        self.add_prim_attr('dilation', self.dilation)
463        self.add_prim_attr('padding', self.padding)
464        self.add_prim_attr('stride', self.stride)
465
466
467class Unsqueeze(PrimitiveWithCheck):
468    """Unsqueeze"""
469
470    @prim_attr_register
471    def __init__(self, axis):
472        self.init_prim_io_names(inputs=['x'], outputs=['y'])
473        self.axis = axis
474
475
476class Squeeze(Primitive):
477    """
478    Return the Tensor after deleting the dimension of size 1 in the specified `axis`.
479
480    Refer to :func:`mindspore.ops.squeeze` for more details.
481
482    Args:
483        axis (Union[int, tuple(int)]): Specifies the dimension indexes of shape to be removed, which will remove
484            all the dimensions of size 1 in the given axis parameter. If specified, it must be int32 or int64.
485            Default: ``()`` .
486
487    Inputs:
488        - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
489
490    Outputs:
491        Tensor, the shape of tensor is :math:`(x_1, x_2, ..., x_S)`.
492
493    Supported Platforms:
494        ``Ascend`` ``GPU`` ``CPU``
495
496    Examples:
497        >>> import mindspore
498        >>> import numpy as np
499        >>> from mindspore import Tensor, ops
500        >>> input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
501        >>> squeeze = ops.Squeeze(2)
502        >>> output = squeeze(input_x)
503        >>> print(output)
504        [[1. 1.]
505         [1. 1.]
506         [1. 1.]]
507    """
508
509    @prim_attr_register
510    def __init__(self, axis=()):
511        """Initialize Squeeze"""
512        self.init_prim_io_names(inputs=['x'], outputs=['output'])
513        validator.check_value_type('axis', axis, [int, tuple], self.name)
514        if isinstance(axis, tuple):
515            for idx, item in enumerate(axis):
516                validator.check_value_type("axis[%d]" % idx, item, [int], self.name)
517        else:
518            self.axis = (axis,)
519            self.add_prim_attr("axis", (axis,))
520
521
522class ConjugateTranspose(Primitive):
523    """
524    Calculate the conjugate matrix of input x which has been transposed according to input perm.
525
526    .. math::
527        y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])
528
529    Inputs:
530        - **x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
531        - **perm** (tuple[int]) - The permutation to be converted. The elements in `perm` are composed of
532          the indexes of each dimension of `x`. The length of `perm` and the shape of `x` must be
533          the same. Only constant value is allowed. Must be in the range [0, rank(x)).
534
535    Outputs:
536        Tensor, the type of output tensor is the same as `x` and the shape of output tensor is decided by the
537        shape of `x` and the value of `Conj(perm)`:
538
539        .. math::
540            y.shape[i] = x.shape[perm[i]]
541
542        where i is in range [0, rank(x) - 1].
543
544    Raises:
545        TypeError: If `perm` is not a tuple.
546        ValueError: If length of shape of `x` is not equal to length of shape of `perm`.
547        ValueError: If the same element exists in `perm`.
548
549    Supported Platforms:
550        ``Ascend`` ``GPU`` ``CPU``
551
552    Examples:
553        >>> x = Tensor(np.array([[1 + 1j,2 + 2j], [3 + 3j, 4 + 4j]]), mindspore.complex64)
554        >>> perm = (1, 0)
555        >>> conjugate_transpose = ops.ConjugateTranspose()
556        >>> output = conjugate_transpose(x, perm)
557        >>> print(output)
558            [[1.-1.j 3.-3.j]
559            [2.-2.j 4.-4.j]]
560    """
561
562    @prim_attr_register
563    def __init__(self):
564        """Initialize ConjugateTranspose"""
565        self.init_prim_io_names(inputs=['x', 'perm'], outputs=['output'])
566
567
568class Unique(Primitive):
569    """
570    Returns the unique elements of input tensor and also return a tensor containing the index of each value of input
571    tensor corresponding to the output unique tensor.
572
573    The output contains Tensor `y` and Tensor `idx`, the format is probably similar to (`y`, `idx`).
574    The shape of Tensor `y` and Tensor `idx` is different in most cases, because Tensor `y` will be duplicated,
575    and the shape of Tensor `idx` is consistent with the input.
576
577    To get the same shape between `idx` and `y`, please refer to :class:`mindspore.ops.UniqueWithPad`.
578
579    Inputs:
580        - **input_x** (Tensor) - The input tensor.
581          The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
582
583    Outputs:
584        Tuple, containing Tensor objects (`y`, `idx`), `y` is a tensor with the
585        same type as `input_x`, and contains the unique elements in `x`.
586        `idx` is a tensor containing indices of elements in
587        the input corresponding to the output tensor.
588
589    Raises:
590        TypeError: If `input_x` is not a Tensor.
591
592    Supported Platforms:
593        ``Ascend`` ``GPU`` ``CPU``
594
595    Examples:
596        >>> import mindspore
597        >>> import numpy as np
598        >>> from mindspore import Tensor, ops, nn
599        >>> input_x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)
600        >>> output = ops.Unique()(input_x)
601        >>> print(output)
602        (Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 1]))
603        >>> y = output[0]
604        >>> print(y)
605        [1 2 5]
606        >>> idx = output[1]
607        >>> print(idx)
608        [0 1 2 1]
609        >>> # As can be seen from the above, y and idx shape
610        >>> # note that for GPU, this operator must be wrapped inside a model, and executed in graph mode.
611        >>> class UniqueNet(nn.Cell):
612        ...     def __init__(self):
613        ...         super(UniqueNet, self).__init__()
614        ...         self.unique_op = ops.Unique()
615        ...
616        ...     def construct(self, x):
617        ...         output, indices = self.unique_op(x)
618        ...         return output, indices
619        ...
620        >>> input_x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)
621        >>> net = UniqueNet()
622        >>> output = net(input_x)
623        >>> print(output)
624        (Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 1]))
625    """
626
627    @prim_attr_register
628    def __init__(self):
629        self.init_prim_io_names(inputs=['x'], outputs=['output'])
630
631
632class UniqueConsecutive(Primitive):
633    """
634    Returns the elements that are unique in each consecutive group of equivalent elements in the input tensor.
635
636    .. warning::
637        This is an experimental API that is subject to change or deletion.
638
639    Refer to :func:`mindspore.ops.unique_consecutive` for more details.
640
641    Args:
642        return_idx (bool, optional): Whether to return the index of where the element in the original input
643            maps to the position in the output. Default: ``False`` .
644        return_counts (bool, optional): Whether to return the counts of each unique element. Default: ``False`` .
645        axis (int, optional): The dimension to apply unique. If ``None`` , the unique of the flattened input is
646            returned. If specified, it must be int32 or int64. Default: ``None`` .
647
648    Inputs:
649        - **x** (Tensor) - The input tensor.
650
651    Outputs:
652        A tensor or a tuple of tensors containing tensor objects (`output`, `idx`, `counts`).
653
654        - `output` has the same type as `x` and is used to represent the output list of unique scalar elements.
655        - If `return_idx` is True, there will be an additional returned tensor, `idx`,
656          which has the same shape as `x` and represents
657          the index of where the element in the original input maps to the position in the output.
658        - If `return_counts` is True, there will be an additional returned tensor, `counts`,
659          which represents the number of occurrences for each unique value or tensor.
660
661    Supported Platforms:
662        ``Ascend`` ``GPU`` ``CPU``
663
664    Examples:
665        >>> import numpy as np
666        >>> from mindspore import Tensor, ops
667        >>> from mindspore import dtype as mstype
668        >>> x = Tensor(np.array([1, 1, 2, 2, 3, 1, 1, 2]), mstype.int32)
669        >>> unique_consecutive = ops.UniqueConsecutive(True, True, None)
670        >>> output, idx, counts = unique_consecutive(x)
671        >>> print(output)
672        [1 2 3 1 2]
673        >>> print(idx)
674        [0 0 1 1 2 3 3 4]
675        >>> print(counts)
676        [2 2 1 2 1]
677    """
678
679    @prim_attr_register
680    def __init__(self, return_idx=False, return_counts=False, axis=None):
681        """Initialize UniqueConsecutive"""
682        self.init_prim_io_names(inputs=['x'], outputs=['output'])
683        validator.check_value_type("return_idx", return_idx, [bool], self.name)
684        validator.check_value_type("return_counts", return_counts, [bool], self.name)
685        validator.check_value_type("axis", axis, [int, type(None)], self.name)
686        self.add_prim_attr("return_idx", return_idx)
687        self.add_prim_attr("return_counts", return_counts)
688        self.add_prim_attr("axis", axis)
689
690
691class SparseGatherV2(Primitive):
692    """
693    Returns a slice of input tensor based on the specified indices and axis.
694
695    Inputs:
696        - **input_params** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
697        - **input_indices** (Tensor) - The shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
698          Specifies the indices of elements of the original Tensor, must be in the range
699          `[0, input_params.shape[axis])`.
700        - **axis** (Union(int, Tensor[int])) - Specifies the dimension index to gather indices.
701          When axis is Tensor, the size must be 1.
702
703    Outputs:
704        Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`.
705
706    Supported Platforms:
707        ``Ascend`` ``GPU``
708
709    Examples:
710        >>> import mindspore
711        >>> import numpy as np
712        >>> from mindspore import Tensor, ops
713        >>> input_params = Tensor(np.array([[1, 2, 7, 42], [3, 4, 54, 22], [2, 2, 55, 3]]), mindspore.float32)
714        >>> input_indices = Tensor(np.array([1, 2]), mindspore.int32)
715        >>> axis = 1
716        >>> out = ops.SparseGatherV2()(input_params, input_indices, axis)
717        >>> print(out)
718        [[2. 7.]
719         [4. 54.]
720         [2. 55.]]
721    """
722
723    @prim_attr_register
724    def __init__(self):
725        """Initialize SparseGatherV2"""
726        self.init_prim_io_names(inputs=['params', 'indices', 'axis'], outputs=['output'])
727        self.add_prim_attr('bprop_return_sparse', True)
728
729
730class Padding(Primitive):
731    """
732    Extends the last dimension of the input tensor from 1 to pad_dim_size, by filling with 0.
733
734    Refer to :func:`mindspore.ops.padding` for more details.
735
736    Args:
737        pad_dim_size (int, optional): The value of the last dimension of `x` to be
738            extended, which must be positive. Default: ``8`` .
739
740    Inputs:
741        - **x** (Tensor) - Input Tensor of 2D or higher-dimensional.
742          The last dimension of `x` must be 1. The data type is Number.
743
744    Outputs:
745        Tensor, the padded Tensor.
746
747    Supported Platforms:
748        ``Ascend`` ``GPU`` ``CPU``
749
750    Examples:
751        >>> import mindspore
752        >>> import numpy as np
753        >>> from mindspore import Tensor, ops
754        >>> x = Tensor(np.array([[8], [10]]), mindspore.float32)
755        >>> pad_dim_size = 4
756        >>> output = ops.Padding(pad_dim_size)(x)
757        >>> print(output)
758        [[ 8.  0.  0.  0.]
759         [10.  0.  0.  0.]]
760    """
761
762    @prim_attr_register
763    def __init__(self, pad_dim_size=8):
764        """Initialize padding"""
765        validator.check_value_type("pad_dim_size", pad_dim_size, [int], self.name)
766        validator.check_positive_int(pad_dim_size, "pad_dim_size", self.name)
767        self.pad_dim_size = pad_dim_size
768
769
770class UniqueWithPad(Primitive):
771    """
772    Returns unique elements and relative indexes in 1-D tensor, filled with padding num.
773
774    The basic function is the same as the Unique operator, but the UniqueWithPad operator adds a Pad function.
775    The returned tuple(`y`, `idx`) after the input Tensor `x` is processed by the unique operator,
776    in which the shapes of `y` and `idx` are mostly not equal. Therefore, in order to solve the above situation,
777    the UniqueWithPad operator will fill the `y` Tensor with the `pad_num` specified by the user
778    to make it have the same shape as the Tensor `idx`.
779
780    Refer to :func:`mindspore.ops.unique_with_pad` for more details.
781
782    Inputs:
783        - **x** (Tensor) - The tensor need to be unique. Must be 1-D vector with types: int32, int64.
784        - **pad_num** (int) - Pad num. The data type is an int.
785
786    Outputs:
787        tuple(Tensor), tuple of 2 tensors, `y` and `idx`.
788
789        - y (Tensor) - The unique elements filled with pad_num, the shape and data type same as `x`.
790        - idx (Tensor) - The index of each value of `x` in the unique output `y`, the shape and data type same as `x`.
791
792    Supported Platforms:
793        ``Ascend`` ``GPU`` ``CPU``
794
795    Examples:
796        >>> import mindspore
797        >>> import numpy as np
798        >>> from mindspore import Tensor, ops
799        >>> x = Tensor(np.array([1, 1, 2, 2, 3, 3, 4, 5]), mindspore.int32)
800        >>> pad_num = 8
801        >>> output = ops.UniqueWithPad()(x, pad_num)
802        >>> print(output)
803        (Tensor(shape=[8], dtype=Int32, value= [1, 2, 3, 4, 5, 8, 8, 8]),
804         Tensor(shape=[8], dtype=Int32, value= [0, 0, 1, 1, 2, 2, 3, 4]))
805    """
806
807    @prim_attr_register
808    def __init__(self):
809        """init UniqueWithPad"""
810        self.init_prim_io_names(inputs=['x', 'pad_num'], outputs=['y', 'idx'])
811
812
813class Size(Primitive):
814    r"""
815    Returns a Scalar of type int that represents the size of the input Tensor and the total number of elements in the
816    Tensor.
817
818    Refer to :func:`mindspore.ops.size` for more details.
819
820    Inputs:
821        - **input_x** (Tensor) - Input parameters, the shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is
822          `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
823
824    Outputs:
825        int. A scalar representing the elements' size of `input_x`, tensor is the number of elements
826        in a tensor, :math:`size=x_1*x_2*...x_R`. The data type is an int.
827
828    Supported Platforms:
829        ``Ascend`` ``GPU`` ``CPU``
830
831    Examples:
832        >>> import mindspore
833        >>> import numpy as np
834        >>> from mindspore import Tensor, ops
835        >>> input_x = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
836        >>> size = ops.Size()
837        >>> output = size(input_x)
838        >>> print(output)
839        4
840    """
841
842    @prim_attr_register
843    def __init__(self):
844        """Initialize Size"""
845
846
847class MatrixDiagV3(Primitive):
848    r"""
849    Constructs a diagonal matrix or a batch of diagonal matrices from a given input Tensor.
850
851    .. warning::
852        This is an experimental API that is subject to change or deletion.
853
854    Refer to :func:`mindspore.ops.matrix_diag` for more details.
855
856    Args:
857        align (str, optional): specifies how superdiagonals and subdiagonals should be aligned.
858            Supported values: ``"RIGHT_LEFT"`` , ``"LEFT_RIGHT"`` , ``"LEFT_LEFT"`` , ``"RIGHT_RIGHT"`` .
859            Default: ``"RIGHT_LEFT"`` .
860
861            - When set to ``"RIGHT_LEFT"`` , the alignment of superdiagonals will be towards the right side
862              (padding the row on the left), while subdiagonals will be towards the left side
863              (padding the row on the right)
864            - When set to ``"LEFT_RIGHT"`` , the alignment of superdiagonals will be towards the left side
865              (padding the row on the right), while subdiagonals will be towards the right side
866              (padding the row on the left)
867            - When set to ``"LEFT_LEFT"`` , the alignment of  both superdiagonals and subdiagonals will be towards
868              the left side(padding the row on the right).
869            - When set to ``"RIGHT_RIGHT"`` , the alignment of both superdiagonals and subdiagonals will be towards
870              the right side(padding the row on the left).
871
872    Inputs:
873        - **x** (Tensor) - The diagonal Tensor.
874        - **k** (Union[int, Tensor], optional) - Diagonal offsets.
875          A Tensor of type int32. Positive value means superdiagonal,
876          0 refers to the main diagonal, and negative value means subdiagonals. `k` can be a single integer
877          (for a single diagonal) or a pair of integers specifying the low and high ends of a matrix band.
878          k[0] must not be larger than k[1]. The value must be in the range of given or derivated `num_rows`
879          and `num_cols`, meaning value of k must be in (-num_rows, num_cols). Default: ``0`` .
880        - **num_rows** (Union[int, Tensor], optional) - The number of rows of the output Tensor.
881          A Tensor of type int32 with only one value. If `num_rows` is -1, indicating that the innermost
882          matrix of the output Tensor is a square
883          matrix, and the real number of rows will be derivated by other inputs. That is
884          :math:`num\_rows = x.shape[-1] - min(k[1], 0)`. Otherwise, the value must be equal or greater than
885          :math:`x.shape[-1] - min(k[1], 0)`. Default: -1.
886        - **num_cols** (Union[int, Tensor], optional) - The number of columns of
887          the output Tensor. A Tensor of type int32 with only one value.
888          If `num_cols` is -1, indicating that the innermost matrix of the output
889          Tensor is a square matrix, and the real number of columns will be derivated by other inputs.
890          That is :math:`num\_cols = x.shape[-1] + max(k[0], 0)`. Otherwise, the value must be equal or
891          greater than :math:`x.shape[-1] - min(k[1], 0)`.  Default: -1.
892        - **padding_value** (Union[int, float, Tensor], optional) - The number to fill the area outside the specified
893          diagonal band. A Tensor with only one value. Have the same dtype as x. Default: ``0`` .
894
895    Outputs:
896        A Tensor. Has the same type as `x`.
897        Suppose `x` has r dimensions with shape :math:`(I, J, ..., M, N)` . The output Tensor has rank r + 1 with shape
898        :math:`(I, J, ..., M, num\_rows, num\_cols)` when only one diagonal is given (k is an integer or k[0] == k[1]).
899        Otherwise, it has rank r with shape :math:`(I, J, ..., num\_rows, num\_cols)` .
900
901    Supported Platforms:
902        ``Ascend`` ``GPU`` ``CPU``
903
904    Examples:
905        >>> import mindspore
906        >>> import numpy as np
907        >>> from mindspore import Tensor, ops
908        >>> x = Tensor(np.array([[8, 9, 0],
909        ...                      [1, 2, 3],
910        ...                      [0, 4, 5]]), mindspore.float32)
911        >>> k =Tensor(np.array([-1, 1]), mindspore.int32)
912        >>> num_rows = Tensor(np.array(3), mindspore.int32)
913        >>> num_cols = Tensor(np.array(3), mindspore.int32)
914        >>> padding_value = Tensor(np.array(11), mindspore.float32)
915        >>> matrix_diag_v3 = ops.MatrixDiagV3(align='LEFT_RIGHT')
916        >>> output = matrix_diag_v3(x, k, num_rows, num_cols, padding_value)
917        >>> print(output)
918        [[ 1.  8. 11.]
919         [ 4.  2.  9.]
920         [11.  5.  3.]]
921        >>> print(output.shape)
922        (3, 3)
923    """
924
925    @prim_attr_register
926    def __init__(self, align="RIGHT_LEFT"):
927        """"Initialize MatrixDiagV3"""
928        validator.check_value_type("align", align, [str], self.name)
929        validator.check_string(align, ['LEFT_RIGHT', 'RIGHT_LEFT', 'LEFT_LEFT', 'RIGHT_RIGHT'], 'align', self.name)
930        self.init_prim_io_names(inputs=['x', 'k', 'num_rows', 'num_cols', 'padding_value'], outputs=['y'])
931
932
933class MatrixDiagPartV3(Primitive):
934    r"""
935    Returns the diagonal part of a tensor.
936
937    .. warning::
938        This is an experimental API that is subject to change or deletion.
939
940    Refer to :func:`mindspore.ops.matrix_diag_part` for more details.
941
942    Args:
943        align (str, optional): specifies how superdiagonals and subdiagonals should be aligned.
944            Supported values: ``"RIGHT_LEFT"`` , ``"LEFT_RIGHT"`` , ``"LEFT_LEFT"`` , ``"RIGHT_RIGHT"`` .
945            Default: ``"RIGHT_LEFT"`` .
946
947            - When set to ``"RIGHT_LEFT"`` , the alignment of superdiagonals will be towards the right side
948              (padding the row on the left), while subdiagonals will be towards the left side
949              (padding the row on the right)
950            - When set to ``"LEFT_RIGHT"`` , the alignment of superdiagonals will be towards the left side
951              (padding the row on the right), while subdiagonals will be towards the right side
952              (padding the row on the left)
953            - When set to ``"LEFT_LEFT"`` , the alignment of  both superdiagonals and subdiagonals will be towards
954              the left side(padding the row on the right).
955            - When set to ``"RIGHT_RIGHT"`` , the alignment of both superdiagonals and subdiagonals will be towards
956              the right side(padding the row on the left).
957
958    Inputs:
959        - **x** (Tensor) - Rank r, where r >= 2.
960        - **k** (Tensor) - A Tensor of type int32. Diagonal offset(s). Positive value means superdiagonal, 0 refers to
961          the main diagonal, and negative value means subdiagonals. k can be a single integer (for a single diagonal) or
962          a pair of integers specifying the low and high ends of a matrix band. k[0] must not be larger than k[1]. The
963          value of k has restructions, meaning value of k must be in (-x.shape[-2], x.shape[-1]).
964        - **padding_value** (Tensor) - A Tensor. Have the same dtype as x. The number to fill the area outside the
965          specified diagonal band with. There must be only one value.
966
967    Outputs:
968        A Tensor. Has the same type as `x`.
969        Assume `x` has r dimensions :math:`(I, J, ..., M, N)` . Let `max_diag_len` be the maximum length among all
970        diagonals to be extracted, :math:`max\_diag\_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
971        Let `num_diags` be the number of diagonals to extract, :math:`num\_diags = k[1] - k[0] + 1`.
972        If :math:`num\_diags == 1`, the output tensor is of rank r - 1 with shape :math:`(I, J, ..., L, max\_diag\_len)`
973        Otherwise, the output tensor has rank r with dimensions :math:`(I, J, ..., L, num\_diags, max\_diag\_len)` .
974
975    Supported Platforms:
976        ``Ascend`` ``GPU`` ``CPU``
977
978    Examples:
979        >>> import mindspore
980        >>> import numpy as np
981        >>> from mindspore import Tensor, ops
982        >>> x = Tensor(np.array([[1, 2, 3, 4],
983        ...                      [5, 6, 7, 8],
984        ...                      [9, 8, 7, 6]]), mindspore.float32)
985        >>> k =Tensor(np.array([1, 3]), mindspore.int32)
986        >>> padding_value = Tensor(np.array(9), mindspore.float32)
987        >>> matrix_diag_part_v3 = ops.MatrixDiagPartV3(align='RIGHT_LEFT')
988        >>> output = matrix_diag_part_v3(x, k, padding_value)
989        >>> print(output)
990        [[9. 9. 4.]
991         [9. 3. 8.]
992         [2. 7. 6.]]
993        >>> print(output.shape)
994        (3, 3)
995    """
996
997    @prim_attr_register
998    def __init__(self, align="RIGHT_LEFT"):
999        """"Initialize MatrixDiagPartV3"""
1000        self.add_prim_attr("max_length", 200000000)
1001        validator.check_value_type("align", align, [str], self.name)
1002        validator.check_string(align, ['LEFT_RIGHT', 'RIGHT_LEFT', 'LEFT_LEFT', 'RIGHT_RIGHT'], 'align', self.name)
1003        self.init_prim_io_names(inputs=['x', 'k', 'padding_value'], outputs=['y'])
1004
1005
1006class MatrixSetDiagV3(Primitive):
1007    r"""
1008    Updates the diagonal part of a batched tensor.
1009    It takes a Tensor `x` and `diagonal` as input and returns a Tensor in which
1010    the specified diagonal values in the innermost matrices will be replaced
1011    by the values in the `diagonal`.
1012
1013    Diagonals shorter than `max_diag_len` need to be padded, where `max_diag_len` is the
1014    longest diagonal value.
1015    The dimension of `diagonal` is :math:`shape[-2]` must be equal to num_diags calculated by
1016    :math:`num\_diags = k[1] - k[0] + 1`.
1017    The dimension of `diagonal` is :math:`shape[-1]` must be equal to the longest diagonal value `max_diag_len`
1018    calculated by :math:`max\_diag\_len = min(x.shape[-2] + min(k[1], 0), x.shape[-1] + min(-k[0], 0))`.
1019
1020    Assume `x` is an n-D Tensor with shape :math:`(d_1, d_2, ..., d_{n-2}, d_{n-1}, d_n)`.
1021    If `k` is an integer or :math:`k[0] == k[1]`, `diagonal` is an (n-1)-D Tensor with
1022    shape :math:`(d_1, d_2, ..., d_{n-2}, max\_diag\_len)`
1023    Otherwise, it has the same rank as `x`
1024    with shape :math:`(d_1, d_2, ..., d_{n-2}, num\_diags, max\_diag\_len)`.
1025
1026    .. warning::
1027        This is an experimental API that is subject to change or deletion.
1028
1029    Args:
1030        align (str, optional): specifies how superdiagonals and subdiagonals should be aligned.
1031            Supported values: ``"RIGHT_LEFT"`` , ``"LEFT_RIGHT"``, ``"LEFT_LEFT"`` , ``"RIGHT_RIGHT"`` .
1032            Default: ``"RIGHT_LEFT"`` .
1033
1034            - When set to ``"RIGHT_LEFT"`` , the alignment of superdiagonals will be towards the right side
1035              (padding the row on the left), while subdiagonals will be towards the left side
1036              (padding the row on the right)
1037            - When set to ``"LEFT_RIGHT"`` , the alignment of superdiagonals will be towards the left side
1038              (padding the row on the right), while subdiagonals will be towards the right side
1039              (padding the row on the left)
1040            - When set to ``"LEFT_LEFT"`` , the alignment of  both superdiagonals and subdiagonals will be towards
1041              the left side(padding the row on the right).
1042            - When set to ``"RIGHT_RIGHT"`` , the alignment of both superdiagonals and subdiagonals will be towards
1043              the right side(padding the row on the left).
1044
1045    Inputs:
1046        - **x** (Tensor) - A n-D Tensor, where :math:`n >= 2`.
1047        - **diagonal** (Tensor) - A Tensor with the same dtype as `x`. Its rank depends on `k`.
1048          If `k` is an integer or :math:`k[0] == k[1]`, its dimension is :math:`n-1`.
1049          Otherwise, it has dimension :math:`n`.
1050        - **k** (Tensor) - Diagonal offset(s), Tensor of type int32.
1051          `k` can either be a single integer, which represents a single diagonal,
1052          or a pair of integers that specify the low and high ends of a matrix band.
1053          In this case, `k[0]` should not be greater than `k[1]`.
1054          The value of `k` has restructions, which means that value of `k` must be in range
1055          :math:`(-x.shape[-2], x.shape[-1])`.
1056          Input `k` must be const Tensor when taking Graph mode.
1057
1058          - `k > 0` refers to a superdiagonal.
1059          - `k = 0` refers to the main diagonal.
1060          - `k < 0` refers to subdiagonals.
1061
1062    Outputs:
1063        Tensor. The same type and shape as `x`.
1064
1065    Raises:
1066        TypeError: If any input is not Tensor.
1067        TypeError: If input `x` and `diagonal` are not the same dtype.
1068        TypeError: If `k` is not int32 dtype.
1069        ValueError: If `align` is not a string or not in the valid range.
1070        ValueError: If rank of `k` is not equal to 0 or 1.
1071        ValueError: If rank of `x` is not greater equal to 2.
1072        ValueError: If size of `k` is not equal to 1 or 2.
1073        ValueError: If `k[1]` is not greater equal to `k[0]` in case the size of `k` is 2.
1074        ValueError: If the `diagonal` rank size don't match with input `x` rank size.
1075        ValueError: If the `diagonal` shape value don't match with input `x` shape value.
1076        ValueError: If the diagonal :math:`shape[-2]` is not equal to num_diags calculated by
1077            :math:`num\_diags = k[1] - k[0] + 1` .
1078        ValueError: If the value of `k` is not in :math:`(-x.shape[-2], x.shape[-1])`.
1079        ValueError: If the diagonal :math:`shape[-1]` is not equal to the max_diag_len calculated by
1080            :math:`max\_diag\_len = min(x.shape[-2] + min(k[1], 0), x.shape[-1] + min(-k[0], 0))` .
1081
1082    Supported Platforms:
1083        ``Ascend`` ``GPU`` ``CPU``
1084
1085    Examples:
1086        >>> import mindspore
1087        >>> import numpy as np
1088        >>> from mindspore import Tensor, ops
1089        >>> x = Tensor(np.array([[7, 7, 7, 7],
1090        ...                      [7, 7, 7, 7],
1091        ...                      [7, 7, 7, 7]]), mindspore.float32)
1092        >>> diagonal = Tensor(np.array([[0, 9, 1],
1093        ...                             [6, 5, 8],
1094        ...                             [1, 2, 3],
1095        ...                             [4, 5, 0]]), mindspore.float32)
1096        >>> k =Tensor(np.array([-1, 2]), mindspore.int32)
1097        >>> matrix_set_diag_v3 = ops.MatrixSetDiagV3(align='RIGHT_LEFT')
1098        >>> output = matrix_set_diag_v3(x, diagonal, k)
1099        >>> print(output)
1100        [[1. 6. 9. 7.]
1101         [4. 2. 5. 1.]
1102         [7. 5. 3. 8.]]
1103        >>> print(output.shape)
1104        (3, 4)
1105    """
1106    __mindspore_signature__ = (
1107        sig.make_sig('x', dtype=sig.sig_dtype.T1),
1108        sig.make_sig('diagonal', dtype=sig.sig_dtype.T1),
1109        sig.make_sig('k', dtype=sig.sig_dtype.T2)
1110    )
1111
1112    @prim_attr_register
1113    def __init__(self, align="RIGHT_LEFT"):
1114        """"Initialize MatrixSetDiagV3"""
1115        self.add_prim_attr("max_length", 200000000)
1116        validator.check_value_type("align", align, [str], self.name)
1117        validator.check_string(align, ['LEFT_RIGHT', 'RIGHT_LEFT', 'LEFT_LEFT', 'RIGHT_RIGHT'], 'align', self.name)
1118        self.init_prim_io_names(inputs=['x', 'diagonal', 'k'], outputs=['y'])
1119
1120
1121class MatrixBandPart(Primitive):
1122    r"""
1123    Extracts the central diagonal band of each matrix in a tensor, with all values outside
1124    the central band set to zero.
1125
1126    Refer to :func:`mindspore.ops.matrix_band_part` for more details.
1127
1128    .. warning::
1129        This is an experimental API that is subject to change or deletion.
1130
1131    Inputs:
1132        - **x** (Tensor) - Input tensor. :math:`(*, m, n)` where :math:`*` means, any number of additional dimensions.
1133        - **lower** (Union[int, Tensor]) - Number of subdiagonals to keep. The data type must be int32 or int64.
1134          If negative, keep entire lower triangle.
1135        - **upper** (Union[int, Tensor]) - Number of superdiagonals to keep. The data type must be int32 or int64.
1136          If negative, keep entire upper triangle.
1137
1138    Outputs:
1139        Tensor, has the same type and shape as `x`.
1140
1141    Supported Platforms:
1142        ``Ascend`` ``GPU`` ``CPU``
1143
1144    Examples:
1145        >>> import numpy as np
1146        >>> from mindspore import Tensor, ops
1147        >>> matrix_band_part = ops.MatrixBandPart()
1148        >>> x = np.ones([2, 4, 4]).astype(np.float32)
1149        >>> output = matrix_band_part(Tensor(x), 2, 1)
1150        >>> print(output)
1151        [[[1. 1. 0. 0.]
1152          [1. 1. 1. 0.]
1153          [1. 1. 1. 1.]
1154          [0. 1. 1. 1.]]
1155         [[1. 1. 0. 0.]
1156          [1. 1. 1. 0.]
1157          [1. 1. 1. 1.]
1158          [0. 1. 1. 1.]]]
1159    """
1160
1161    @prim_attr_register
1162    def __init__(self):
1163        super().__init__(name="MatrixBandPart")
1164        self.init_prim_io_names(inputs=['x', 'lower', 'upper'], outputs=['y'])
1165
1166
1167class Fill(PrimitiveWithCheck):
1168    """
1169    The Fill interface is deprecated, please use the :class:`mindspore.ops.FillV2` instead.
1170
1171    Supported Platforms:
1172        Deprecated
1173    """
1174
1175    @prim_attr_register
1176    def __init__(self):
1177        """Initialize Fill"""
1178        self.init_prim_io_names(inputs=['type', 'shape', 'value'], outputs=['y'])
1179
1180    def __call__(self, dtype, dims, x):
1181        if dtype not in mstype.all_types and dtype not in [mstype.uint16, mstype.uint32, mstype.uint64]:
1182            raise TypeError(
1183                f"For \'{self.name}\', the supported data type is ['bool', 'int8', 'int16', 'int32', 'int64', 'uint8', "
1184                "'uint16', 'uint32', 'uint64','float16', 'float32', 'float64'], but got an invalid dtype!.")
1185        x_nptype = mstype.dtype_to_nptype(dtype)
1186        if not isinstance(dims, Tensor) and not isinstance(dims, tuple):
1187            raise TypeError(f"For \'{self.name}\', input[1] must be tensor.")
1188        if not isinstance(x, Tensor) and not isinstance(x, float) and not isinstance(x, int):
1189            raise TypeError(f"For \'{self.name}\', the value input only takes scalar or scalar within a tensor!.")
1190        if isinstance(dims, Tensor):
1191            dims = dims.asnumpy()
1192        if isinstance(x, Tensor):
1193            x = x.asnumpy()
1194        ret = np.full(dims, x, x_nptype)
1195        return Tensor(ret, dtype=dtype)
1196
1197    def infer_value(self, dtype, dims, x):
1198        x_nptype = mstype.dtype_to_nptype(dtype)
1199        if dims is not None and None not in dims and x is not None:
1200            if isinstance(dims, Tensor):
1201                dims = dims.asnumpy()
1202            if isinstance(x, Tensor):
1203                x = x.asnumpy()
1204            ret = np.full(dims, x, x_nptype)
1205            return Tensor(ret, dtype=dtype)
1206        return None
1207
1208
1209class Fills(Primitive):
1210    """
1211    The `Fills` primitive  is deprecated.
1212    Please use :func:`mindspore.ops.fill` instead.
1213
1214    Supported Platforms:
1215        Deprecated
1216
1217    Examples:
1218        >>> import numpy as np
1219        >>> from mindspore import Tensor
1220        >>> a = Tensor(np.arange(4).reshape((2,2)).astype('float32'))
1221        >>> fills = ops.Fills()
1222        >>> output = fills(a, float(1))
1223        >>> print(output)
1224        [[1. 1.]
1225         [1. 1.]]
1226    """
1227
1228    @prim_attr_register
1229    def __init__(self):
1230        """Initialize Fills."""
1231        self.init_prim_io_names(inputs=['x', 'value'], outputs=['y'])
1232
1233
1234class FillV2(PrimitiveWithCheck):
1235    """
1236    Creates a tensor with shape described by `shape` and fills it with values in `value` .
1237
1238    Inputs:
1239        - **shape** (Union[Tuple[int], Tensor[int]]) - 1-D Tensor or Tuple, specify the shape
1240          of output tensor. Its dtype must be int32 or int64.
1241        - **value** (Tensor) - A 0-D Tensor, the value to fill the output tensor `y` .
1242
1243    Outputs:
1244        - **y** (Tensor) - A tensor, its shape and value are described above.
1245
1246    Raises:
1247        TypeError: If `shape` is not a 1-D tensor or tuple.
1248        TypeError: If the data type of `shape` is not int32 or int64.
1249        ValueError: If `value` is not a 0-D Tensor.
1250
1251    Supported Platforms:
1252        ``Ascend`` ``GPU`` ``CPU``
1253
1254    Examples:
1255        >>> import mindspore
1256        >>> from mindspore import Tensor, ops
1257        >>> fillV2 = ops.FillV2()
1258        >>> output = fillV2(Tensor([2, 3], mindspore.int32), Tensor(1, mindspore.float32))
1259        >>> print(output)
1260        [[1. 1. 1.]
1261         [1. 1. 1.]]
1262        >>> output = fillV2(Tensor([3, 3], mindspore.int64), Tensor(0, mindspore.int32))
1263        >>> print(output)
1264        [[0 0 0]
1265         [0 0 0]
1266         [0 0 0]]
1267    """
1268
1269    @prim_attr_register
1270    def __init__(self):
1271        """Initialize FillV2"""
1272        self.init_prim_io_names(inputs=['shape', 'value'], outputs=['y'])
1273
1274    def check_elim(self, dims, x):
1275        if x is None or (not isinstance(x, (Tensor, Tensor_))) or (x.shape != ()) or \
1276                dims is None or (isinstance(dims, (tuple, list)) and dims) or \
1277                isinstance(dims, (Tensor, Tensor_)):
1278            return (False, None)
1279        return (True, x)
1280
1281    def infer_value(self, dims, x):
1282        if x is None or dims is None or isinstance(dims, (Tensor, Tensor_)):
1283            return None
1284        if isinstance(dims, (tuple, list)) and None in dims:
1285            return None
1286        if 0 in dims:
1287            init_func = Zero()
1288            init_func.__enable_zero_dim__ = True
1289            out = Tensor(shape=dims, dtype=x.dtype, init=init_func)
1290            return out
1291        return Tensor(np.full(dims, x.asnumpy()))
1292
1293
1294class TupleToArray(PrimitiveWithInfer):
1295    """
1296    Converts a tuple to a tensor.
1297
1298    Refer to :func:`mindspore.ops.tuple_to_array` for more details.
1299
1300    Inputs:
1301        - **input_x** (tuple) - A tuple of numbers. These numbers have the same type.
1302          The shape is :math:`(N,*)` where :math:`*` means any number of additional dimensions.
1303
1304    Outputs:
1305        Tensor, if the input tuple contains `N` numbers, then the shape of the output tensor is :math:`(N,)`.
1306
1307    Supported Platforms:
1308        ``Ascend`` ``GPU`` ``CPU``
1309
1310    Examples:
1311        >>> from mindspore import ops
1312        >>> input_x = (1,2,3)
1313        >>> print(type(input_x))
1314        <class 'tuple'>
1315        >>> output = ops.TupleToArray()(input_x)
1316        >>> print(type(output))
1317        <class 'mindspore.common.tensor.Tensor'>
1318        >>> print(output)
1319        [1 2 3]
1320    """
1321
1322    @prim_attr_register
1323    def __init__(self):
1324        """Initialize TupleToArray"""
1325
1326    def infer_value(self, x):
1327        validator.check_value_type("x", x, [tuple], self.name)
1328        validator.check("size of x", len(x), '', 0, validator.GT, self.name)
1329        dtype = type(x[0])
1330        for i, item in enumerate(x):
1331            validator.check_value_type(f"x[{i}]", item, [numbers.Number], self.name)
1332        if not all(isinstance(item, dtype) for item in x):
1333            raise TypeError(f"For \'{self.name}\', all elements of 'input_x' must be have same type.")
1334        if isinstance(x[0], int):
1335            ret = np.array(x, np.int32)
1336        else:
1337            ret = np.array(x, np.float32)
1338        return Tensor(ret)
1339
1340    def __call__(self, *args):
1341        x, = args
1342        args = list()
1343        if isinstance(x, range):
1344            args.append(tuple(x))
1345        else:
1346            args.append(x)
1347        return _run_op(self, self.name, args)
1348
1349
1350
1351
1352class InvertPermutation(PrimitiveWithInfer):
1353    r"""
1354    Computes the inverse of an index permutation.
1355
1356    This operator is mainly used to calculate the inverse of index permutation.
1357    It requires a 1-dimensional integer tensor x, which represents the index of a zero-based array,
1358    and exchanges each value with its index position. In other words, For output tensor y and input tensor x,
1359    this operation calculates the following values:
1360
1361    :math:`y[x[i]] = i, \quad i \in [0, 1, \ldots, \text{len}(x)-1]`.
1362
1363    Note:
1364        These values must include 0. There must be no duplicate values and the
1365        values can not be negative.
1366
1367    Inputs:
1368        - **input_x** (Union(tuple[int], list[int])) - The input is constructed by multiple
1369          integers, i.e., :math:`(y_1, y_2, ..., y_S)` representing the indices.
1370          The values must include 0. There can be no duplicate values or negative values.
1371          Only constant value is allowed. The maximum value must be equal to length of input_x.
1372
1373    Outputs:
1374        tuple[int]. It has the same length as the input.
1375
1376    Raises:
1377        TypeError: If `input_x` is neither tuple nor list.
1378        TypeError: If element of `input_x` is not an int.
1379
1380    Supported Platforms:
1381        ``Ascend`` ``GPU`` ``CPU``
1382
1383    Examples:
1384        >>> from mindspore import ops
1385        >>> invert = ops.InvertPermutation()
1386        >>> input_data = (3, 4, 0, 2, 1)
1387        >>> output = invert(input_data)
1388        >>> print(output)
1389        (2, 4, 3, 0, 1)
1390    """
1391
1392    @prim_attr_register
1393    def __init__(self):
1394        """Initialize InvertPermutation"""
1395
1396    def __infer__(self, x):
1397        x_shp = x['shape']
1398        x_value = x['value']
1399        if mstype._issubclass_(x['dtype'], mstype.tensor_type):  # pylint: disable=W0212
1400            raise ValueError(f"For \'{self.name}\', the value of 'input_x' must be non-Tensor, but got {x['dtype']}")
1401        if x_value is None:
1402            raise ValueError(f"For '{self.name}', the value of 'input_x' can not be None, but got {x_value}.")
1403        validator.check_value_type("shape", x_shp, [tuple, list], self.name)
1404        for shp in x_shp:
1405            if shp:
1406                x_rank = len(np.array(x_value, np.int64).shape)
1407                raise ValueError(f"For \'{self.name}\', the dimension of 'input_x' must be 1, but got {x_rank}.")
1408        for i, value in enumerate(x_value):
1409            validator.check_value_type("input[%d]" % i, value, [int], self.name)
1410        z = [x_value[i] for i in range(len(x_value))]
1411        z.sort()
1412
1413        for i in range(1, len(z)):
1414            if z[i - 1] == z[i]:
1415                raise ValueError(f"For '{self.name}', the 'input_x' can not contain duplicate values, "
1416                                 f"but got duplicated {z[i]} in the 'input_x'.")
1417        validator.check(f'value min', min(x_value), '', 0, validator.EQ, self.name)
1418        validator.check(f'value max', max(x_value), '', len(x_value) - 1, validator.EQ, self.name)
1419
1420        y = [None] * len(x_value)
1421        for i, value in enumerate(x_value):
1422            validator.check_value_type("input[%d]" % i, value, [int], self.name)
1423            validator.check(f'value', z[i], f'index', i, validator.EQ, self.name)
1424            y[value] = i
1425            z.append(value)
1426        return {'shape': x_shp,
1427                'dtype': x['dtype'],
1428                'value': tuple(y)}
1429
1430
1431class ArgminV2(Primitive):
1432    """
1433    Returns the indices of the minimum value of a tensor across the axis.
1434
1435    If the shape of input tensor is :math:`(x_1, ..., x_N)`, the shape of the output tensor is
1436    :math:`(x_1, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`.
1437
1438    Note:
1439        This operator only supports dynamic shape. As for static shape, please use operator `Argmin` instead.
1440
1441    Inputs:
1442        - **x** (Tensor) - Input tensor.
1443          The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
1444        - **axis** (int) - Axis where the Argmin operator applies to. Default: ``-1`` .
1445
1446    Outputs:
1447        Tensor, indices of the min value of input tensor across the axis.
1448
1449    Raises:
1450        TypeError: If `axis` is not an int.
1451
1452    Supported Platforms:
1453        ``Ascend``
1454
1455    Examples:
1456        >>> class ArgMinV2DynatimicShape(nn.Cell):
1457        ...     def __init__(self, gather_axis=1, argmin_axis=1):
1458        ...         super(ArgMinV2DynatimicShape, self).__init__()
1459        ...         self.unique = P.Unique()
1460        ...         self.gather = P.Gather()
1461        ...         self.argmin = ArgminV2()
1462        ...         self.gather_axis = gather_axis
1463        ...         self.argmin_axis = argmin_axis
1464        ...     def construct(self, x, indices):
1465        ...         unique_index, _ = self.unique(indices)
1466        ...         y = self.gather(x, unique_index, self.gather_axis)
1467        ...         z = self.argmin(y, self.argmin_axis)
1468        ...         return z
1469        >>>
1470        >>> x = Tensor(np.array([[4, 8, 1, 6], [4, 3, 6, 2], [4, 4, 1, 1]]).astype(np.float32))
1471        >>> index = Tensor([1, 2], dtype=mindspore.int32)
1472        >>> net = ArgMinV2DynatimicShape()
1473        >>> res = net(x, index)
1474        >>> print(res)
1475        [1 0 1]
1476    """
1477
1478    @prim_attr_register
1479    def __init__(self):
1480        """Initialize ArgminV2"""
1481        self.init_prim_io_names(inputs=['x', 'axis'], outputs=['y'])
1482
1483    def __call__(self, x, axis=-1):
1484        args = [x, axis]
1485        output = _run_op(self, self.name, args)
1486        return output
1487
1488
1489class UnsortedSegmentMin(PrimitiveWithCheck):
1490    r"""
1491    Computes the minimum of a tensor along segments.
1492
1493    Refer to :func:`mindspore.ops.unsorted_segment_min` for more details.
1494
1495    Inputs:
1496        - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.
1497          The data type must be float16, float32 or int32.
1498        - **segment_ids** (Tensor) - The label indicates the segment to which each element belongs.
1499          Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
1500        - **num_segments** (Union[int, Tensor]) - Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
1501
1502    Outputs:
1503        Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
1504
1505    Supported Platforms:
1506        ``Ascend`` ``GPU`` ``CPU``
1507
1508    Examples:
1509        >>> from mindspore import Tensor
1510        >>> from mindspore import ops
1511        >>> import numpy as np
1512        >>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32))
1513        >>> segment_ids = Tensor(np.array([0, 1, 1]).astype(np.int32))
1514        >>> num_segments = 2
1515        >>> unsorted_segment_min = ops.UnsortedSegmentMin()
1516        >>> output = unsorted_segment_min(input_x, segment_ids, num_segments)
1517        >>> print(output)
1518        [[1. 2. 3.]
1519         [4. 2. 1.]]
1520    """
1521
1522    @prim_attr_register
1523    def __init__(self):
1524        """Initialize UnsortedSegmentMin"""
1525        self.init_prim_io_names(inputs=['x', 'segment_ids', 'num_segments'], outputs=['y'])
1526
1527    def __check__(self, x, segment_ids, num_segments):
1528        x_shape = x['shape']
1529        segment_ids_shape = segment_ids['shape']
1530        valid_type = [mstype.float16, mstype.float32, mstype.int32, mstype.int8, mstype.uint8,
1531                      mstype.int16, mstype.uint16, mstype.uint32, mstype.int64, mstype.uint64, mstype.float64]
1532        validator.check_tensor_dtype_valid("x", x['dtype'], valid_type, self.name)
1533        validator.check_tensor_dtype_valid("segment_ids", segment_ids['dtype'], [mstype.int32, mstype.int64], self.name)
1534
1535        # support vmap : segment_ids_shape support batch rank
1536        if not hasattr(self, 'batch_rank'):
1537            if not is_dim_unknown(x_shape) and not is_dim_unknown(segment_ids_shape):
1538                validator.check_int(len(segment_ids_shape), 1, validator.GE, "rank of segment_ids_shape", self.name)
1539
1540        num_segments_type = num_segments['dtype']
1541        validator.check_subclass("num_segments", num_segments_type, [mstype.number], self.name)
1542        if not is_shape_unknown(x_shape) and not is_shape_unknown(segment_ids_shape):
1543            # only validate when both shapes fully known
1544            validator.check(f'first shape of input_x', x_shape[0],
1545                            'length of segments_id', segment_ids_shape[0], validator.EQ, self.name)
1546        num_segments_v = num_segments['value']
1547        validator.check_value_type('num_segments', num_segments_v, [int], self.name)
1548        validator.check_positive_int(num_segments_v, "num_segments", self.name)
1549
1550
1551class UnsortedSegmentMax(PrimitiveWithCheck):
1552    r"""
1553    Computes the maximum along segments of a tensor.
1554
1555    Refer to :func:`mindspore.ops.unsorted_segment_max` for more details.
1556
1557    Inputs:
1558        - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.
1559          The data type must be float16, float32 or int32.
1560        - **segment_ids** (Tensor) - The label indicates the segment to which each element belongs.
1561          Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
1562        - **num_segments** (Union[int, Tensor]) - Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
1563
1564    Outputs:
1565        Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
1566
1567    Supported Platforms:
1568        ``Ascend`` ``GPU`` ``CPU``
1569
1570    Examples:
1571        >>> # case 1: Only have two num_segments, where is 0 and 1, and segment_ids=[0, 1, 1]
1572        >>> # num_segments = 2 indicates that there are two types of segment_id,
1573        >>> # the first number '0' in [0, 1, 1] indicates input_x[0],
1574        >>> # the second number '1' in [0, 1, 1] indicates input_x[1],
1575        >>> # the third number '1' in [0, 1, 1] indicates input_x[2],
1576        >>> # input_x[0], which is [1, 2, 3] will not be compared to other segment_id.
1577        >>> # Only the same segment_id will be compared.
1578        >>> from mindspore import Tensor
1579        >>> from mindspore import ops
1580        >>> import numpy as np
1581        >>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32))
1582        >>> segment_ids = Tensor(np.array([0, 1, 1]).astype(np.int32))
1583        >>> num_segments = 2
1584        >>> unsorted_segment_max = ops.UnsortedSegmentMax()
1585        >>> output = unsorted_segment_max(input_x, segment_ids, num_segments)
1586        >>> print(output)
1587        [[1. 2. 3.]
1588         [4. 5. 6.]]
1589        >>>
1590        >>> # case 2: The segment_ids=[0, 0, 1, 1].
1591        >>> # [1, 2, 3] will compare with [4, 2, 0],
1592        >>> # and [4, 5, 6] will compare with [4, 2, 1].
1593        >>> input_x = Tensor(np.array([[1, 2, 3], [4, 2, 0], [4, 5, 6], [4, 2, 1]]).astype(np.float32))
1594        >>> segment_ids = Tensor(np.array([0, 0, 1, 1]).astype(np.int32))
1595        >>> num_segments = 2
1596        >>> unsorted_segment_max = ops.UnsortedSegmentMax()
1597        >>> output = unsorted_segment_max(input_x, segment_ids, num_segments)
1598        >>> print(input_x.shape)
1599            (4, 3)
1600        >>> print(output)
1601            [[4. 2. 3.]
1602             [4. 5. 6.]]
1603        >>> # case 3: If the input_x have three dimensions even more, what will happen?
1604        >>> # The shape of input_x is (2, 4, 3),
1605        >>> # and the length of segment_ids should be the same as the first dimension of input_x.
1606        >>> # Because the segment_ids are different, input_x[0] will not be compared to input_x[1].
1607        >>> input_x = Tensor(np.array([[[1, 2, 3], [4, 2, 0], [4, 5, 6], [4, 2, 1]],
1608        ...                            [[1, 2, 3], [4, 2, 0], [4, 5, 6], [4, 2, 1]]]).astype(np.float32))
1609        >>> segment_ids = Tensor(np.array([0, 1]).astype(np.int32))
1610        >>> num_segments = 2
1611        >>> unsorted_segment_max = ops.UnsortedSegmentMax()
1612        >>> output = unsorted_segment_max(input_x, segment_ids, num_segments)
1613        >>> print(input_x.shape)
1614            (2, 4, 3)
1615        >>> print(output)
1616            [[[1. 2. 3.]
1617              [4. 2. 0.]
1618              [4. 5. 6.]
1619              [4. 2. 1.]]
1620             [[1. 2. 3.]
1621              [4. 2. 0.]
1622              [4. 5. 6.]
1623              [4. 2. 1.]]]
1624        >>> # case 4: It has the same input with the 3rd case.
1625        >>> # Because num_segments is equal to 2, there are two segment_ids, but currently only one 0 is used.
1626        >>> # the segment_id i is absent in the segment_ids, then output[i] will be filled with
1627        >>> # the smallest possible value of the input_x's type.
1628        >>> segment_ids = Tensor(np.array([0, 0]).astype(np.int32))
1629        >>> output = unsorted_segment_max(input_x, segment_ids, num_segments)
1630        >>> print(output)
1631            [[[ 1.0000000e+00  2.0000000e+00  3.0000000e+00]
1632              [ 4.0000000e+00  2.0000000e+00  0.0000000e+00]
1633              [ 4.0000000e+00  5.0000000e+00  6.0000000e+00]
1634              [ 4.0000000e+00  2.0000000e+00  1.0000000e+00]]
1635             [[-3.4028235e+38 -3.4028235e+38 -3.4028235e+38]
1636              [-3.4028235e+38 -3.4028235e+38 -3.4028235e+38]
1637              [-3.4028235e+38 -3.4028235e+38 -3.4028235e+38]
1638              [-3.4028235e+38 -3.4028235e+38 -3.4028235e+38]]]
1639    """
1640
1641    @prim_attr_register
1642    def __init__(self):
1643        """Initialize UnsortedSegmentMax"""
1644        self.init_prim_io_names(inputs=['x', 'segment_ids', 'num_segments'], outputs=['y'])
1645
1646    def __check__(self, x, segment_ids, num_segments):
1647        x_shape = x['shape']
1648        segment_ids_shape = segment_ids['shape']
1649        valid_type = [mstype.float16, mstype.float32, mstype.int32, mstype.int8, mstype.uint8,
1650                      mstype.int16, mstype.uint16, mstype.uint32, mstype.int64, mstype.uint64, mstype.float64]
1651        validator.check_tensor_dtype_valid("x", x['dtype'], valid_type, self.name)
1652        validator.check_tensors_dtypes_same_and_valid({"segment_ids": segment_ids['dtype']},
1653                                                      [mstype.int32, mstype.int64], self.name)
1654
1655        # support vmap : segment_ids_shape support batch rank
1656        if not hasattr(self, 'batch_rank'):
1657            if not is_dim_unknown(x_shape) and not is_dim_unknown(segment_ids_shape):
1658                validator.check_int(len(segment_ids_shape), 1, validator.GE, "rank of segment_ids_shape", self.name)
1659
1660        num_segments_type = num_segments['dtype']
1661        validator.check_subclass("num_segments", num_segments_type, [mstype.number], self.name)
1662        if not is_shape_unknown(x_shape) and not is_shape_unknown(segment_ids_shape):
1663            # only validate when both shapes fully known
1664            validator.check(f'first shape of input_x', x_shape[0],
1665                            'length of segments_id', segment_ids_shape[0], validator.EQ, self.name)
1666        num_segments_v = num_segments['value']
1667        if num_segments_v is not None:
1668            validator.check_value_type('num_segments', num_segments_v, [int], self.name)
1669            validator.check_positive_int(num_segments_v, "num_segments", self.name)
1670
1671
1672class UnsortedSegmentProd(Primitive):
1673    """
1674    Computes the product of a tensor along segments.
1675
1676    Refer to :func:`mindspore.ops.unsorted_segment_prod` for more details.
1677
1678    Inputs:
1679        - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.
1680          With float16, float32 or int32 data type.
1681        - **segment_ids** (Tensor) - The label indicates the segment to which each element belongs.
1682          Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R. Data type must be int32.
1683        - **num_segments** (Union[int, Tensor]) - Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
1684
1685    Outputs:
1686        Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
1687
1688    Supported Platforms:
1689        ``Ascend`` ``GPU`` ``CPU``
1690
1691    Examples:
1692        >>> from mindspore import Tensor
1693        >>> from mindspore import ops
1694        >>> import numpy as np
1695        >>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32))
1696        >>> segment_ids = Tensor(np.array([0, 1, 0]).astype(np.int32))
1697        >>> num_segments = 2
1698        >>> unsorted_segment_prod = ops.UnsortedSegmentProd()
1699        >>> output = unsorted_segment_prod(input_x, segment_ids, num_segments)
1700        >>> print(output)
1701        [[4. 4. 3.]
1702         [4. 5. 6.]]
1703    """
1704
1705    @prim_attr_register
1706    def __init__(self):
1707        """Initialize UnsortedSegmentProd"""
1708        self.init_prim_io_names(inputs=['x', 'segment_ids', 'num_segments'], outputs=['y'])
1709
1710
1711class ConcatOffsetV1(Primitive):
1712    r"""
1713    primitive for computing Concat’s gradient.
1714
1715    Computes offsets of concat inputs within its output. Accumulate offsets from zero along `axis`.
1716    If tensor element in `x` isn't along `axis`, they should be the same along their axis.
1717
1718    Inputs:
1719        - **axis** (Tensor): The specified axis, required to be 0-D Tensor object with dtype int32.
1720          Input `axis` should fall in :math:`[-numelement, numelement - 1]`,
1721          say numelement is the element number of first tensor in `x`.
1722        - **x** (tuple[Tensor], list[Tensor]) - A tuple or a list of input tensors.
1723          The tensors in `x` are all required to be a vector, in other word, 1-D Tensor object with dtype int32.
1724          Suppose there are two tensors in this tuple or list, namely x1 and x2.
1725          To perform `ConcatOffsetV1` in the axis 0 direction,
1726          except for the 0th axis, all elements in other axes should be equal,
1727          that is, :math:`x1[1] == x2[1], x1[2] == x2[2], ..., x1[R] == x2[R]`,
1728          where the :math:`R` indicates the last axis.
1729
1730    Outputs:
1731        Tensors. A tuple of N 1-D Tensor objects.
1732        The data type is the same with the Inputs `x`, dtype int32.
1733        The shape is the same with the Inputs `x`.
1734
1735    Raises:
1736        TypeError: If `axis` is not a tensor.
1737        TypeError: If dtype of tensor in `axis` is not int32.
1738        TypeError: If `x` have different type of tensor.
1739        TypeError: If dtype of tensor in `x` is not int32.
1740        ValueError: If the shape rank of `axis` does not equal to 0.
1741        ValueError: If the number of tensors in `x` is less than 2.
1742        ValueError: If the shape rank of tensor in `x` does not equal to 1.
1743        ValueError: If the element number of tensor in `x` is less than 1.
1744        ValueError: If `x` have different shape of tensors.
1745
1746    Supported Platforms:
1747        ``Ascend`` ``CPU``
1748
1749    Examples:
1750        >>> axis = Tensor(1, dtype=mstype.int32)
1751        >>> x1 = Tensor(np.array([1, 2, 3]).astype(np.int32))
1752        >>> x2 = Tensor(np.array([1, 5, 3]).astype(np.int32))
1753        >>> x3 = Tensor(np.array([1, 4, 3]).astype(np.int32))
1754        >>> op = ops.ConcatOffsetV1()
1755        >>> output = op(axis, (x1, x2, x3))
1756        >>> print(output)
1757        (Tensor(shape=[3,], dtype=Int32, value=[0, 0, 0]),
1758         Tensor(shape=[3,], dtype=Int32, value=[0, 2, 0]),
1759         Tensor(shape=[3,], dtype=Int32, value=[0, 7, 0]))
1760
1761    """
1762
1763    @prim_attr_register
1764    def __init__(self):
1765        """Initialize ConcatOffsetV1"""
1766
1767
1768class ParallelConcat(Primitive):
1769    r"""
1770    Concats input tensors along the first dimension.
1771
1772    The difference between Concat and ParallelConcat is that Concat requires all of the inputs be computed
1773    before the operation will begin but doesn't require that the input shapes be known during graph construction.
1774    Parallel concat will copy pieces of the input into the output as they become available, in some situations
1775    this can provide a performance benefit.
1776
1777    Note:
1778        The input tensors are all required to have size 1 in the first dimension.
1779
1780    Inputs:
1781        - **values** (tuple, list) - A tuple or a list of input tensors. The data type and shape of these
1782          tensors must be the same and their rank should not be less than 1.
1783          The supported date type is Number on CPU, the same for Ascend except
1784          [float64, complex64, complex128].
1785
1786    Outputs:
1787        Tensor, data type is the same as `values`.
1788
1789    Raises:
1790        TypeError: If any type of the inputs is not a Tensor.
1791        TypeError: If the data type of these tensors are not the same.
1792        ValueError: If any tensor.shape[0] is not 1.
1793        ValueError: If rank of any Tensor in `values` is less than 1.
1794        ValueError: If the shape of these tensors are not the same.
1795
1796    Supported Platforms:
1797        ``Ascend`` ``GPU`` ``CPU``
1798
1799    Examples:
1800        >>> import numpy as np
1801        >>> from mindspore import Tensor, ops
1802        >>> data1 = Tensor(np.array([[0, 1]]).astype(np.int32))
1803        >>> data2 = Tensor(np.array([[2, 1]]).astype(np.int32))
1804        >>> op = ops.ParallelConcat()
1805        >>> output = op((data1, data2))
1806        >>> print(output)
1807        [[0 1]
1808         [2 1]]
1809    """
1810
1811    @prim_attr_register
1812    def __init__(self):
1813        """Initialize ParallelConcat"""
1814
1815
1816def _get_stack_shape(value, x_shape, x_type, axis, prim_name):
1817    """for stack output shape"""
1818    validator.check_value_type("shape", x_shape, [tuple, list], prim_name)
1819    validator.check_int(len(x_shape), 1, validator.GE, "len of input_x", prim_name)
1820    validator.check_subclass("input_x[0]", x_type[0], mstype.tensor_type, prim_name)
1821
1822    out_n = len(x_shape)
1823    for i in range(1, out_n):
1824        if x_type[i] != x_type[i - 1]:
1825            raise TypeError(f"For {prim_name}, all types should be same, but got {x_type}")
1826
1827    new_x_shape = []
1828    for i, shp in enumerate(x_shape):
1829        if is_dim_unknown(shp):
1830            continue
1831        new_x_shape.append({"shape": shp, "id": i})
1832
1833    if not new_x_shape:
1834        out = {"shape": x_shape[0]}
1835        return out
1836
1837    out_shape = new_x_shape[0]["shape"]
1838    n = len(new_x_shape)
1839
1840    rank_base = len(new_x_shape[0]["shape"])
1841    for i in range(1, n):
1842        validator.check('len of x_shape[%d]' % new_x_shape[i]["id"], len(new_x_shape[i]["shape"]),
1843                        'len of x_shape[0]', rank_base, validator.EQ, prim_name, ValueError)
1844        for j in range(0, rank_base):
1845            if new_x_shape[i]["shape"][j] != new_x_shape[0]["shape"][j] and \
1846                    new_x_shape[i]["shape"][j] != -1 and new_x_shape[0]["shape"][j] != -1:
1847                raise ValueError(f"For {prim_name} element {new_x_shape[i]['id']} shape"
1848                                 f"in input can not pack with first element")
1849
1850    validator.check_int_range(axis, -rank_base - 1, rank_base, validator.INC_BOTH, 'axis', prim_name)
1851    if axis < 0:
1852        axis = axis + rank_base + 1
1853
1854    if is_shape_unknown(out_shape):
1855        out = {}
1856        out_shape.insert(axis, out_n)
1857        out['shape'] = out_shape
1858        return out
1859
1860    out_shape.insert(axis, out_n)
1861    return out_shape
1862
1863
1864class Stack(PrimitiveWithInfer):
1865    r"""
1866    Stacks a list of tensors in specified axis.
1867
1868    Refer to :func:`mindspore.ops.stack` for more details.
1869
1870    Args:
1871        axis (int, optional): Dimension to stack. The range is [-(R+1), R+1). Default: ``0`` .
1872
1873    Inputs:
1874        - **input_x** (Union[tuple, list]) - A Tuple or list of Tensor objects with the same shape and type.
1875
1876    Outputs:
1877        Tensor. A stacked Tensor with the same type as `input_x`.
1878
1879    Supported Platforms:
1880        ``Ascend`` ``GPU`` ``CPU``
1881
1882    Examples:
1883        >>> import mindspore
1884        >>> from mindspore import Tensor, ops
1885        >>> import numpy as np
1886        >>> data1 = Tensor(np.array([0, 1]).astype(np.float32))
1887        >>> data2 = Tensor(np.array([2, 3]).astype(np.float32))
1888        >>> stack = ops.Stack()
1889        >>> output = stack([data1, data2])
1890        >>> print(output)
1891        [[0. 1.]
1892         [2. 3.]]
1893    """
1894
1895    @prim_attr_register
1896    def __init__(self, axis=0):
1897        """Initialize Stack"""
1898        self.init_prim_io_names(inputs=['x'], outputs=['y'])
1899        validator.check_value_type("axis", axis, [int], self.name)
1900        self.axis = axis
1901
1902    def __infer__(self, value):
1903        x_shape = value['shape']
1904        x_type = value['dtype']
1905        self.add_prim_attr('num', len(x_shape))
1906        self.add_prim_attr('N', len(x_shape))
1907        all_shape = _get_stack_shape(value, x_shape, x_type, self.axis, self.name)
1908        out = {}
1909        tuple_value = value['value']
1910        input_array = []
1911        infered_value = None
1912        dtype = x_type[0]
1913        if tuple_value is not None and None not in tuple_value:
1914            for item in tuple_value:
1915                npy_item = item.asnumpy()
1916                input_array.append(npy_item)
1917            infered_value = Tensor(np.stack(input_array, axis=self.axis))
1918
1919        shape = all_shape.get('shape') if isinstance(all_shape, dict) else all_shape
1920        out = {'shape': shape,
1921               'dtype': dtype,
1922               'value': infered_value}
1923
1924        return out
1925
1926
1927class Unstack(Primitive):
1928    r"""
1929    Unstacks tensor in specified axis, this is the opposite of ops.Stack.
1930    Assuming input is a tensor of rank `R`, output tensors will have rank `(R-1)`.
1931
1932    Refer to :func:`mindspore.ops.unstack` for more details.
1933
1934    Args:
1935        axis (int): Dimension along which to unpack. Default: ``0`` .
1936            Negative values wrap around. The range is [-R, R).
1937        num (Union[None, int]): The number of output tensors.
1938            Automatically inferred by input_x and axis if ``None`` . Default: ``None`` .
1939
1940    Inputs:
1941        - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.
1942          A tensor to be unstacked and the rank of the tensor must be greater than 0.
1943
1944    Outputs:
1945        A tuple of tensors, the shape of each objects is the same.
1946        Given a tensor of shape :math:`(x_1, x_2, ..., x_R)`. If :math:`0 \le axis`,
1947        the shape of tensor in output is :math:`(x_1, x_2, ..., x_{axis}, x_{axis+2}, ..., x_R)`.
1948
1949    Supported Platforms:
1950        ``Ascend`` ``GPU`` ``CPU``
1951
1952    Examples:
1953        >>> import numpy as np
1954        >>> from mindspore import Tensor, ops
1955        >>> unstack = ops.Unstack()
1956        >>> input_x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]))
1957        >>> output = unstack(input_x)
1958        >>> print(output)
1959        (Tensor(shape=[4], dtype=Int64, value= [1, 1, 1, 1]), Tensor(shape=[4], dtype=Int64, value= [2, 2, 2, 2]))
1960    """
1961
1962    @prim_attr_register
1963    def __init__(self, axis=0, num=None):
1964        """Initialize Unstack"""
1965        self.init_prim_io_names(inputs=['x'], outputs=['y'])
1966        validator.check_value_type("axis", axis, [int], self.name)
1967        if num is not None:
1968            validator.check_value_type("num", num, [int], self.name)
1969
1970
1971class Slice(Primitive):
1972    """
1973    Slices a tensor in the specified shape.
1974
1975    Refer to :func:`mindspore.ops.slice` for more details.
1976
1977    Inputs:
1978        - **input_x** (Tensor) - The target tensor.
1979          The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
1980        - **begin** (Union[tuple, list]) - The beginning of the slice. Only constant value(>=0) is allowed.
1981        - **size** (Union[tuple, list]) - The size of the slice. Only constant value is allowed.
1982
1983    Outputs:
1984        Tensor, the shape is: input `size`, the data type is the same as `input_x`.
1985
1986    Supported Platforms:
1987        ``Ascend`` ``GPU`` ``CPU``
1988
1989    Examples:
1990        >>> from mindspore import Tensor
1991        >>> from mindspore import ops
1992        >>> import numpy as np
1993        >>> data = Tensor(np.array([[[1, 1, 1], [2, 2, 2]],
1994        ...                         [[3, 3, 3], [4, 4, 4]],
1995        ...                         [[5, 5, 5], [6, 6, 6]]]).astype(np.int32))
1996        >>> slice_op = ops.Slice()
1997        >>> output = slice_op(data, (1, 0, 0), (1, 1, 3))
1998        >>> print(output)
1999        [[[3 3 3]]]
2000        >>> output = slice_op(data, (1, 0, 0), (1, 1, 2))
2001        >>> print(output)
2002        [[[3 3]]]
2003        >>> output = slice_op(data, (1, 0, 0), (1, 1, 1))
2004        >>> print(output)
2005        [[[3]]]
2006        >>> output = slice_op(data, (1, 1, 0), (1, 1, 3))
2007        >>> print(output)
2008        [[[4 4 4]]]
2009        >>> output = slice_op(data, (1, 0, 1), (1, 1, 2))
2010        >>> print(output)
2011        [[[3 3]]]
2012    """
2013
2014    @prim_attr_register
2015    def __init__(self):
2016        """Initialize slice"""
2017        self.init_prim_io_names(inputs=['x', 'begin', 'size'], outputs=['output'])
2018
2019
2020class Coalesce(Primitive):
2021    """
2022    Returns the coalesced sparse tensor of the input.
2023
2024    Inputs:
2025        - **x_indices** (Tensor) - A 2-D Tensor, represents the indices of the nonzero elements of the sparse tensor.
2026          Supported data type is int64. Its elements should be non-negative. The shape is :math:`(y, x)`.
2027        - **x_values** (Tensor) - A 1-D Tensor, represents the values corresponding to the indices in `x_indices`.
2028          Supported data types are float16 and float32. The shape is :math:`(x,)`.
2029        - **x_shape** (Tensor) - A 1-D Tensor, specifies the shape of the sparse tensor.
2030          Supported data type is int64. The shape is :math:`(y,)`.
2031
2032    Outputs:
2033        - **y_indices** (Tensor) - A 2-D Tensor, represents the indices of the nonzero elements of the sparse tensor.
2034          Data type is int64. It's elements are non-negative. The shape is :math:`(y, z)`.
2035          `z` represents the number of different indices in `x_indices`.
2036        - **y_values** (Tensor) - A 1-D Tensor, represents the values corresponding to the indices in `y_indices`.
2037          Data type is the same as `x_values`'s. The shape is :math:`(z,)`.
2038        - **y_shape** (Tensor) - A 1-D Tensor, specifies the shape of the sparse tensor.
2039          Data type is int64. The shape is :math:`(y,)`.
2040
2041    Raises:
2042        TypeError: If the data type of `x_values` is neither float32 nor float16.
2043        TypeError: If any of the data types of `x_indices` and `x_shape` is not int64.
2044        ValueError: If any of `x_values` and `x_shape` is not a 1-D tensor.
2045        ValueError: If `x_indices` is not a 2-D tensor.
2046        ValueError: If sizes of second dimension of `x_indices` and first dimension of `x_values` are not the same.
2047        ValueError: If sizes of first dimension of `x_indices` and first dimension of `x_shape` are not the same.
2048        ValueError: If any of the values of elements of `x_indices` is negative.
2049        ValueError: If any of the values of elements of `x_indices` exceed the limit set by `x_shape`.
2050
2051    Supported Platforms:
2052        ``GPU`` ``CPU``
2053
2054    Examples:
2055        >>> x_indices = Tensor([[0, 0, 1], [1, 1, 2]], dtype=mstype.int64)
2056        >>> x_values = Tensor([1, 5, 4], dtype=mstype.float32)
2057        >>> x_shape = Tensor([3, 3], dtype=mstype.int64)
2058        >>> coalesce = ops.Coalesce()
2059        >>> y_indices, y_values, y_shape = coalesce(x_indices, x_values, x_shape)
2060        >>> print(y_indices)
2061        [[0 1]
2062         [1 2]]
2063        >>> print(y_values)
2064        [6. 4.]
2065        >>> print(y_shape)
2066        [3 3]
2067    """
2068
2069    @prim_attr_register
2070    def __init__(self):
2071        """Initialize Coalesce."""
2072        self.init_prim_io_names(inputs=['x_indices', 'x_values', 'x_shape'],
2073                                outputs=['y_indices', 'y_values', 'y_shape'])
2074
2075
2076class Rint(Primitive):
2077    """
2078    Returns an integer that is closest to `input_x` element-wise.
2079
2080    Inputs:
2081        - **input_x** (Tensor) - Input tensor of any dimension, which must be one of the following types:
2082          float16, float32, float64.
2083    Outputs:
2084        Tensor, has the same shape and type as `input_x`.
2085
2086    Raises:
2087        TypeError: If dtype of `input_x` is not in [float16, float32, float64].
2088
2089    Supported Platforms:
2090        ``Ascend`` ``GPU`` ``CPU``
2091
2092    Examples:
2093        >>> import mindspore
2094        >>> import numpy as np
2095        >>> from mindspore import Tensor, ops
2096        >>> input_x = Tensor(np.array([-1.6, -0.1, 1.5, 2.0]), mindspore.float32)
2097        >>> op = ops.Rint()
2098        >>> output = op(input_x)
2099        >>> print(output)
2100        [-2.  0.  2.  2.]
2101        >>> input_x = Tensor(np.array([[-2.0, -1.9, -1.8, -1.7, -1.6],
2102        ...                            [-2.0, -1.9, -1.8, -1.7, -1.6]]), mindspore.float32)
2103        >>> output = op(input_x)
2104        >>> print(output)
2105        [[-2. -2. -2. -2. -2.]
2106         [-2. -2. -2. -2. -2.]]
2107    """
2108
2109    @prim_attr_register
2110    def __init__(self):
2111        """Initialize Rint."""
2112        self.init_prim_io_names(inputs=['x'], outputs=['output'])
2113
2114
2115class StridedSliceV2(Primitive):
2116    r"""
2117    StridedSliceV2 will be deprecated by StridedSlice in the future.
2118    Extracts a strided slice of a tensor.
2119    Refer to class StridedSlice for more details.
2120
2121    Args:
2122        begin_mask (int): Starting index of the slice. Default: ``0`` .
2123        end_mask (int): Ending index of the slice. Default: ``0`` .
2124        ellipsis_mask (int): An int mask. Default: ``0`` .
2125        new_axis_mask (int): An int mask. Default: ``0`` .
2126        shrink_axis_mask (int): An int mask. Default: ``0`` .
2127
2128    Inputs:
2129        - **input_x** (Tensor) - The input Tensor.
2130        - **begin** (tuple[int]) - A tuple which represents the location where to start. Only
2131          constant value is allowed.
2132        - **end** (tuple[int]) - A tuple or which represents the maximum location where to end.
2133          Only constant value is allowed.
2134        - **strides** (tuple[int]) - A tuple which represents the stride is continuously added
2135          before reaching the maximum location. Only constant value is allowed.
2136
2137    Outputs:
2138        Tensor, The output is explained by following example.
2139
2140    Raises:
2141        TypeError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or `shrink_axis_mask` is not an int.
2142        TypeError: If `begin`, `end` or `strides` is not a tuple.
2143        ValueError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or `shrink_axis_mask` is less than 0.
2144
2145    Supported Platforms:
2146        ``Ascend`` ``CPU``
2147
2148    Examples:
2149        >>> input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]],
2150        ...                   [[5, 5, 5], [6, 6, 6]]], mindspore.float32)
2151        >>> strided_slice_v2 = ops.StridedSliceV2()
2152        >>> output = strided_slice_v2(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1))
2153        >>> print(output)
2154        [[[3.]]
2155         [[5.]]]
2156    """
2157
2158    @prim_attr_register
2159    def __init__(self,
2160                 begin_mask=0,
2161                 end_mask=0,
2162                 ellipsis_mask=0,
2163                 new_axis_mask=0,
2164                 shrink_axis_mask=0):
2165        """Initialize StridedSliceV2"""
2166        self.init_prim_io_names(inputs=['x', 'begin', 'end', 'strides'], outputs=['output'])
2167
2168
2169class DiagPart(PrimitiveWithCheck):
2170    r"""
2171
2172    Extracts the diagonal elements from the given Tensor.
2173
2174    If the `input_x` is a Tensor of shape :math:`[D_1,..., D_k, D_1,..., D_k]`, then the
2175    output will be a Tensor of rank k of shape :math:`[D_1,..., D_k]` where:
2176    :math:`output[i_1,..., i_k] = input\_x[i_1,..., i_k, i_1,..., i_k]`.
2177
2178    Inputs:
2179        - **input_x** (Tensor) - The rank of input tensor is 2k(k > 0).
2180
2181    Outputs:
2182        Tensor, the extracted diagonal has the same dtype as the `input_x`.
2183
2184    Raises:
2185        TypeError: If `input_x` is not a Tensor.
2186        ValueError: If rank of `input_x` is not even or zero.
2187        ValueError: If input_shape[i] is not equal to input_shape[i + len(input_shape)/2].
2188
2189    Supported Platforms:
2190        ``Ascend`` ``GPU`` ``CPU``
2191
2192    Examples:
2193        >>> input_x = Tensor([[1, 0, 0, 0],
2194        ...                   [0, 2, 0, 0],
2195        ...                   [0, 0, 3, 0],
2196        ...                   [0, 0, 0, 4]])
2197        >>> diag_part = ops.DiagPart()
2198        >>> output = diag_part(input_x)
2199        >>> print(output)
2200        [1 2 3 4]
2201    """
2202
2203    @prim_attr_register
2204    def __init__(self):
2205        """Initialize DiagPart"""
2206
2207    def infer_value(self, x):
2208        if x is None:
2209            return None
2210        # do constant-folding only when x rank is 2
2211        if len(x.shape) != 2:
2212            return None
2213        ret = np.diag(x.asnumpy())
2214        return Tensor(ret)
2215
2216
2217class Mvlgamma(Primitive):
2218    r"""
2219    Calculates the multivariate log-gamma function element-wise for a given dimension `p`.
2220
2221    .. warning::
2222        This is an experimental API that is subject to change or deletion.
2223
2224    Refer to :func:`mindspore.ops.mvlgamma` for more details.
2225
2226    Args:
2227        p(int): The number of dimensions. And the value of `p` must be greater than or equal to 1.
2228
2229    Inputs:
2230        - **x** (Tensor) - The tensor to compute the multivariate log-gamma function,
2231          which must be one of the following types: float32, float64.
2232          The shape is :math:`(N,*)`, where :math:`*` means any number of additional dimensions.
2233          And the value of any element in `x` must be greater than :math:`(p - 1) / 2`.
2234
2235    Outputs:
2236        Tensor, has the same shape and type as `x`.
2237
2238    Supported Platforms:
2239        ``Ascend`` ``GPU`` ``CPU``
2240
2241    Examples:
2242        >>> import mindspore
2243        >>> import numpy as np
2244        >>> from mindspore import Tensor, ops
2245        >>> x = Tensor(np.array([[3, 4, 5], [4, 2, 6]]), mindspore.float32)
2246        >>> op = ops.Mvlgamma(p=3)
2247        >>> y = op(x)
2248        >>> print(y)
2249        [[ 2.694925   5.402975   9.140645 ]
2250         [ 5.402975   1.5963125 13.640454 ]]
2251    """
2252
2253    @prim_attr_register
2254    def __init__(self, p):
2255        """Initialize Mvlgamma."""
2256        self.init_prim_io_names(inputs=['x'], outputs=['y'])
2257        validator.check_value_type('p', p, [int], self.name)
2258        validator.check_positive_int(p, 'p', self.name)
2259
2260
2261class ScatterUpdate(Primitive):
2262    r"""
2263    Updates tensor values by using input indices and value.
2264
2265    Using given values to update tensor value, along with the input indices.
2266
2267    for each `i, ..., j` in `indices.shape`:
2268
2269    .. math::
2270        \text{input_x}[\text{indices}[i, ..., j], :] = \text{updates}[i, ..., j, :]
2271
2272    Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
2273    If they have different data types, the lower priority data type will be converted to
2274    the relatively highest priority data type.
2275
2276    Args:
2277        use_locking (bool): Whether to protect the assignment by a lock. Default: ``True`` .
2278
2279    Inputs:
2280        - **input_x** (Parameter) - The target tensor, with data type of Parameter.
2281          The shape is 0-D or :math:`(N, *)` where :math:`*` means any number of additional dimensions.
2282        - **indices** (Tensor) - The index of input tensor. With int32 data type.
2283          If there are duplicates in indices, the order for updating is undefined.
2284        - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,
2285          and updates.shape = indices.shape + input_x.shape[1:].
2286
2287    Outputs:
2288        Tensor, has the same shape and type as `input_x`.
2289
2290    Raises:
2291        TypeError: If `use_locking` is not a bool.
2292        TypeError: If `indices` is not an int32.
2293        ValueError: If the shape of `updates` is not equal to `indices.shape + input_x.shape[1:]`.
2294        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
2295                      is required when data type conversion of Parameter is not supported.
2296
2297    Supported Platforms:
2298        ``Ascend`` ``GPU`` ``CPU``
2299
2300    Examples:
2301        >>> import mindspore
2302        >>> import numpy as np
2303        >>> from mindspore import Tensor, ops
2304        >>> np_x = np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]])
2305        >>> input_x = mindspore.Parameter(Tensor(np_x, mindspore.float32), name="x")
2306        >>> indices = Tensor(np.array([0, 1]), mindspore.int32)
2307        >>> np_updates = np.array([[2.0, 1.2, 1.0], [3.0, 1.2, 1.0]])
2308        >>> updates = Tensor(np_updates, mindspore.float32)
2309        >>> op = ops.ScatterUpdate()
2310        >>> output = op(input_x, indices, updates)
2311        >>> print(output)
2312        [[2. 1.2  1.]
2313         [3. 1.2  1.]]
2314    """
2315    __mindspore_signature__ = (
2316        sig.make_sig('x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
2317        sig.make_sig('indices', dtype=sig.sig_dtype.T1),
2318        sig.make_sig('updates', dtype=sig.sig_dtype.T)
2319    )
2320
2321    @prim_attr_register
2322    def __init__(self, use_locking=True):
2323        """Initialize ScatterUpdate"""
2324        validator.check_value_type('use_locking', use_locking, [bool], self.name)
2325        self.init_prim_io_names(inputs=['x', 'indices', 'updates'], outputs=['y'])
2326        self.add_prim_attr('side_effect_mem', True)
2327
2328
2329class ScatterNdUpdate(Primitive):
2330    r"""
2331    Updates tensor values by using input indices and value.
2332
2333    Using given values to update tensor value, along with the input indices.
2334
2335    `input_x` has rank P and `indices` has rank Q where `Q >= 2`.
2336
2337    `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)` where `N <= P`.
2338
2339    The last dimension of `indices` (with length `N` ) indicates slices along the `N` th dimension of `input_x`.
2340
2341    `updates` is a tensor of rank `Q-1+P-N`, and its shape is:
2342    :math:`(i_0, i_1, ..., i_{Q-2}, x\_shape_N, ..., x\_shape_{P-1})`.
2343
2344    Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
2345    If they have different data types, the lower priority data type will be converted to
2346    the relatively highest priority data type.
2347
2348    Args:
2349        use_locking (bool): Whether to protect the assignment by a lock. Default: ``True`` .
2350
2351    Inputs:
2352        - **input_x** (Parameter) - The target tensor, with data type of Parameter.
2353          The shape is :math:`(N, *)` where :math:`*` means any number of additional dimensions.
2354        - **indices** (Tensor) - The index of input tensor, with int32 or int64 data type.
2355        - **updates** (Tensor) - N-D(2D or 3D) Tensor The tensor to be updated to the input tensor,
2356          has the same type as input. The shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
2357
2358    Outputs:
2359        Tensor, has the same shape and type as `input_x`.
2360
2361    Raises:
2362        TypeError: If `use_locking` is not a bool.
2363        TypeError: If `indices` is not an int32 or an int64.
2364        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
2365                      is required when data type conversion of Parameter is not supported.
2366
2367    Supported Platforms:
2368        ``Ascend`` ``GPU`` ``CPU``
2369
2370    Examples:
2371        >>> import mindspore
2372        >>> import numpy as np
2373        >>> from mindspore import Tensor, ops
2374        >>> np_x = np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]])
2375        >>> input_x = mindspore.Parameter(Tensor(np_x, mindspore.float32), name="x")
2376        >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
2377        >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)
2378        >>> op = ops.ScatterNdUpdate()
2379        >>> output = op(input_x, indices, updates)
2380        >>> print(output)
2381        [[1.   0.3   3.6]
2382         [0.4  2.2  -3.2]]
2383    """
2384
2385    __mindspore_signature__ = (
2386        sig.make_sig('input_x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
2387        sig.make_sig('indices', dtype=sig.sig_dtype.T1),
2388        sig.make_sig('updates', dtype=sig.sig_dtype.T)
2389    )
2390
2391    @prim_attr_register
2392    def __init__(self, use_locking=True):
2393        """Initialize ScatterNdUpdate"""
2394        validator.check_value_type('use_locking', use_locking, [bool], self.name)
2395        self.init_prim_io_names(inputs=['input_x', 'indices', 'value'], outputs=['y'])
2396        self.add_prim_attr('side_effect_mem', True)
2397
2398
2399class ScatterMax(_ScatterOpDynamic):
2400    r"""
2401    Updates the value of the input tensor through the maximum operation.
2402
2403    Using given values to update tensor value through the max operation, along with the input indices.
2404    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
2405
2406    for each :math:`i, ..., j` in `indices.shape`:
2407
2408    .. math::
2409
2410        \text{input_x}[\text{indices}[i, ..., j], :]
2411        = \max(\text{input_x}[\text{indices}[i, ..., j], :], \text{updates}[i, ..., j, :])
2412
2413    Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
2414    If they have different data types, the lower priority data type will be converted to
2415    the relatively highest priority data type. A RuntimeError will be reported
2416    when `updates` does not support conversion to the data type required by `input_x`.
2417
2418    Args:
2419        use_locking (bool): Whether to protect the assignment by a lock. Default: ``False`` .
2420
2421    Inputs:
2422        - **input_x** (Parameter) - The target tensor, with data type of Parameter.
2423          The shape is :math:`(N, *)` where :math:`*` means any number of additional dimensions.
2424        - **indices** (Tensor) - The index to do max operation whose data type must be mindspore.int32 or
2425          mindspore.int64.
2426        - **updates** (Tensor) - The tensor that performs the maximum operation with `input_x`,
2427          the data type is the same as `input_x`, the shape is `indices.shape + input_x.shape[1:]`.
2428
2429    Outputs:
2430        Tensor, the updated `input_x`, has the same shape and type as `input_x`.
2431
2432    Raises:
2433        TypeError: If `use_locking` is not a bool.
2434        TypeError: If `indices` is not an int32 or an int64.
2435        ValueError: If the shape of `updates` is not equal to `indices.shape + x.shape[1:]`.
2436        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
2437                      is required when data type conversion of Parameter is not supported.
2438        RuntimeError: On the Ascend platform, the input data dimension of `input_x` , `indices`
2439                      and `updates` is greater than 8 dimensions.
2440
2441    Supported Platforms:
2442        ``Ascend`` ``GPU`` ``CPU``
2443
2444    Examples:
2445        >>> import mindspore
2446        >>> import numpy as np
2447        >>> from mindspore import Tensor, ops, Parameter
2448        >>> input_x = Parameter(Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32),
2449        ...                     name="input_x")
2450        >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
2451        >>> updates = Tensor(np.ones([2, 2, 3]) * 88, mindspore.float32)
2452        >>> scatter_max = ops.ScatterMax()
2453        >>> output = scatter_max(input_x, indices, updates)
2454        >>> print(output)
2455        [[88. 88. 88.]
2456         [88. 88. 88.]]
2457    """
2458
2459
2460class ScatterMin(_ScatterOpDynamic):
2461    r"""
2462    Updates the value of the input tensor through the minimum operation.
2463
2464    Using given values to update tensor value through the min operation, along with the input indices.
2465    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
2466
2467    for each :math:`i, ..., j` in `indices.shape`:
2468
2469    .. math::
2470
2471        \text{input_x}[\text{indices}[i, ..., j], :]
2472        = \min(\text{input_x}[\text{indices}[i, ..., j], :], \text{updates}[i, ..., j, :])
2473
2474    Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
2475    If they have different data types, the lower priority data type will be converted to
2476    the relatively highest priority data type. A RuntimeError will be reported
2477    when `updates` does not support conversion to the data type required by `input_x`.
2478
2479    Args:
2480        use_locking (bool): Whether to protect the assignment by a lock. Default: ``False`` .
2481
2482    Inputs:
2483        - **input_x** (Parameter) - The target tensor, with data type of Parameter.
2484          The shape is :math:`(N, *)` where :math:`*` means any number of additional dimensions.
2485        - **indices** (Tensor) - The index to do min operation whose data type must be mindspore.int32 or
2486          mindspore.int64.
2487        - **updates** (Tensor) - The tensor doing the min operation with `input_x`,
2488          the data type is same as `input_x`, the shape is `indices.shape + input_x.shape[1:]`.
2489
2490    Outputs:
2491        Tensor, the updated `input_x`, has the same shape and type as `input_x`.
2492
2493    Raises:
2494        TypeError: If `use_locking` is not a bool.
2495        TypeError: If `indices` is not an int32 or an int64.
2496        ValueError: If the shape of `updates` is not equal to `indices.shape + input_x.shape[1:]`.
2497        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
2498                      is required when data type conversion of Parameter is not supported.
2499        RuntimeError: On the Ascend platform, the input data dimension of `input_x` , `indices`
2500                      and `updates` is greater than 8 dimensions.
2501
2502    Supported Platforms:
2503        ``Ascend`` ``GPU`` ``CPU``
2504
2505    Examples:
2506        >>> import mindspore
2507        >>> import numpy as np
2508        >>> from mindspore import Tensor, ops, Parameter
2509        >>> input_x = Parameter(Tensor(np.array([[0.0, 1.0, 2.0], [0.0, 0.0, 0.0]]), mindspore.float32),
2510        ...                     name="input_x")
2511        >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
2512        >>> update = Tensor(np.ones([2, 2, 3]), mindspore.float32)
2513        >>> scatter_min = ops.ScatterMin()
2514        >>> output = scatter_min(input_x, indices, update)
2515        >>> print(output)
2516        [[0. 1. 1.]
2517         [0. 0. 0.]]
2518    """
2519
2520
2521class ScatterAdd(Primitive):
2522    r"""
2523    Updates the value of the input tensor through the addition operation.
2524
2525    Using given values to update tensor value through the add operation, along with the input indices.
2526    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
2527
2528    for each `i, ..., j` in `indices.shape`:
2529
2530    .. math::
2531
2532        \text{input_x}[\text{indices}[i, ..., j], :] \mathrel{+}= \text{updates}[i, ..., j, :]
2533
2534    Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
2535    If they have different data types, the lower priority data type will be converted to
2536    the relatively highest priority data type.
2537
2538    Note:
2539        This is an in-place update operator. Therefore, the `input_x` will be updated after the operation is completed.
2540
2541    Args:
2542        use_locking (bool): Whether to protect the assignment by a lock.
2543            If ``True`` , `input_x` will be protected by the lock.
2544            Otherwise, the calculation result is undefined. Default: ``False`` .
2545
2546    Inputs:
2547        - **input_x** (Parameter) - The target tensor, with data type of Parameter.
2548        - **indices** (Tensor) - The index to do min operation whose data type must be mindspore.int32 or
2549          mindspore.int64.
2550        - **updates** (Tensor) - The tensor doing the min operation with `input_x`,
2551          the data type is same as `input_x`, the shape is `indices.shape + x.shape[1:]`.
2552
2553    Outputs:
2554        Tensor, the updated `input_x`, has the same shape and type as `input_x`.
2555
2556    Raises:
2557        TypeError: If `use_locking` is not a bool.
2558        TypeError: If `indices` is not an int32 or an int64.
2559        ValueError: If the shape of `updates` is not equal to `indices.shape + x.shape[1:]`.
2560        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
2561                      is required when data type conversion of Parameter is not supported.
2562
2563    Supported Platforms:
2564        ``Ascend`` ``GPU`` ``CPU``
2565
2566    Examples:
2567        >>> import mindspore
2568        >>> import numpy as np
2569        >>> from mindspore import Tensor, ops, Parameter
2570        >>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name="x")
2571        >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)
2572        >>> updates = Tensor(np.ones([2, 2, 3]), mindspore.float32)
2573        >>> scatter_add = ops.ScatterAdd()
2574        >>> output = scatter_add(input_x, indices, updates)
2575        >>> print(output)
2576        [[1. 1. 1.]
2577         [3. 3. 3.]]
2578        >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.
2579        >>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name="x")
2580        >>> # for indices = [[0, 1], [1, 1]]
2581        >>> # step 1: [0, 1]
2582        >>> # input_x[0] = [0.0, 0.0, 0.0] + [1.0, 1.0, 1.0] = [1.0, 1.0, 1.0]
2583        >>> # input_x[1] = [0.0, 0.0, 0.0] + [3.0, 3.0, 3.0] = [3.0, 3.0, 3.0]
2584        >>> # step 2: [1, 1]
2585        >>> # input_x[1] = [3.0, 3.0, 3.0] + [7.0, 7.0, 7.0] = [10.0, 10.0, 10.0]
2586        >>> # input_x[1] = [10.0, 10.0, 10.0] + [9.0, 9.0, 9.0] = [19.0, 19.0, 19.0]
2587        >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)
2588        >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],
2589        ...                            [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)
2590        >>> scatter_add = ops.ScatterAdd()
2591        >>> output = scatter_add(input_x, indices, updates)
2592        >>> print(output)
2593        [[ 1.  1.  1.]
2594         [19. 19. 19.]]
2595        >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.
2596        >>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name="x")
2597        >>> # for indices = [[1, 0], [1, 1]]
2598        >>> # step 1: [1, 0]
2599        >>> # input_x[0] = [0.0, 0.0, 0.0] + [3.0, 3.0, 3.0] = [3.0, 3.0, 3.0]
2600        >>> # input_x[1] = [0.0, 0.0, 0.0] + [1.0, 1.0, 1.0] = [1.0, 1.0, 1.0]
2601        >>> # step 2: [1, 1]
2602        >>> # input_x[1] = [1.0, 1.0, 1.0] + [7.0, 7.0, 7.0] = [8.0, 8.0, 8.0]
2603        >>> # input_x[1] = [8.0, 8.0, 8.0] + [9.0, 9.0, 9.0] = [17.0, 17.0, 17.0]
2604        >>> indices = Tensor(np.array([[1, 0], [1, 1]]), mindspore.int32)
2605        >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],
2606        ...                            [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)
2607        >>> scatter_add = ops.ScatterAdd()
2608        >>> output = scatter_add(input_x, indices, updates)
2609        >>> print(output)
2610        [[ 3.  3.  3.]
2611         [17. 17. 17.]]
2612        >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.
2613        >>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name="x")
2614        >>> # for indices = [[0, 1], [0, 1]]
2615        >>> # step 1: [0, 1]
2616        >>> # input_x[0] = [0.0, 0.0, 0.0] + [1.0, 1.0, 1.0] = [1.0, 1.0, 1.0]
2617        >>> # input_x[1] = [0.0, 0.0, 0.0] + [3.0, 3.0, 3.0] = [3.0, 3.0, 3.0]
2618        >>> # step 2: [0, 1]
2619        >>> # input_x[0] = [1.0, 1.0, 1.0] + [7.0, 7.0, 7.0] = [8.0, 8.0, 8.0]
2620        >>> # input_x[1] = [3.0, 3.0, 3.0] + [9.0, 9.0, 9.0] = [12.0, 12.0, 12.0]
2621        >>> indices = Tensor(np.array([[0, 1], [0, 1]]), mindspore.int32)
2622        >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],
2623        ...                            [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)
2624        >>> scatter_add = ops.ScatterAdd()
2625        >>> output = scatter_add(input_x, indices, updates)
2626        >>> print(output)
2627        [[ 8.  8.  8.]
2628         [12. 12. 12.]]
2629    """
2630    __mindspore_signature__ = (
2631        sig.make_sig('x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
2632        sig.make_sig('indices', dtype=sig.sig_dtype.T1),
2633        sig.make_sig('updates', dtype=sig.sig_dtype.T)
2634    )
2635
2636    @prim_attr_register
2637    def __init__(self, use_locking=False):
2638        """Initialize ScatterAdd"""
2639        validator.check_value_type('use_locking', use_locking, [bool], self.name)
2640        self.init_prim_io_names(inputs=['x', 'indices', 'updates'], outputs=['y'])
2641        self.add_prim_attr('side_effect_mem', True)
2642
2643
2644class ScatterSub(Primitive):
2645    r"""
2646    Updates the value of the input tensor through the subtraction operation.
2647
2648    Using given values to update tensor value through the subtraction operation, along with the input indices.
2649    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
2650
2651    for each `i, ..., j` in `indices.shape`:
2652
2653    .. math::
2654
2655        \text{input_x}[\text{indices}[i, ..., j], :] \mathrel{-}= \text{updates}[i, ..., j, :]
2656
2657    Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
2658    If they have different data types, the lower priority data type will be converted to
2659    the relatively highest priority data type.
2660
2661    Args:
2662        use_locking (bool): Whether to protect the assignment by a lock. Default: ``False`` .
2663
2664    Inputs:
2665        - **input_x** (Parameter) - The target tensor, with data type of Parameter.
2666          The shape is :math:`(N, *)` where :math:`*` means any number of additional dimensions.
2667        - **indices** (Tensor) - The index to do min operation whose data type must be mindspore.int32 or
2668          mindspore.int64.
2669        - **updates** (Tensor) - The tensor doing the min operation with `input_x`,
2670          the data type is same as `input_x`, the shape is `indices_shape + x_shape[1:]`.
2671
2672    Outputs:
2673        Tensor, the updated `input_x`, has the same shape and type as `input_x`.
2674
2675    Raises:
2676        TypeError: If `use_locking` is not a bool.
2677        TypeError: If `indices` is not an int32.
2678        ValueError: If the shape of `updates` is not equal to `indices_shape + x_shape[1:]`.
2679        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
2680                      is required when data type conversion of Parameter is not supported.
2681
2682    Supported Platforms:
2683        ``Ascend`` ``GPU`` ``CPU``
2684
2685    Examples:
2686        >>> import mindspore
2687        >>> import numpy as np
2688        >>> from mindspore import Tensor, ops, Parameter
2689        >>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]), mindspore.float32), name="x")
2690        >>> indices = Tensor(np.array([[0, 1]]), mindspore.int32)
2691        >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]]), mindspore.float32)
2692        >>> scatter_sub = ops.ScatterSub()
2693        >>> output = scatter_sub(input_x, indices, updates)
2694        >>> print(output)
2695        [[-1. -1. -1.]
2696         [-1. -1. -1.]]
2697        >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.
2698        >>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name="x")
2699        >>> # for indices = [[0, 1], [1, 1]]
2700        >>> # step 1: [0, 1]
2701        >>> # input_x[0] = [0.0, 0.0, 0.0] - [1.0, 1.0, 1.0] = [-1.0, -1.0, -1.0]
2702        >>> # input_x[1] = [0.0, 0.0, 0.0] - [3.0, 3.0, 3.0] = [-3.0, -3.0, -3.0]
2703        >>> # step 2: [1, 1]
2704        >>> # input_x[1] = [-3.0, -3.0, -3.0] - [7.0, 7.0, 7.0] = [-10.0, -10.0, -10.0]
2705        >>> # input_x[1] = [-10.0, -10.0, -10.0] - [9.0, 9.0, 9.0] = [-19.0, -19.0, -19.0]
2706        >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)
2707        >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],
2708        ...                            [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)
2709        >>> scatter_sub = ops.ScatterSub()
2710        >>> output = scatter_sub(input_x, indices, updates)
2711        >>> print(output)
2712        [[ -1.  -1.  -1.]
2713         [-19. -19. -19.]]
2714        >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.
2715        >>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name="x")
2716        >>> # for indices = [[1, 0], [1, 1]]
2717        >>> # step 1: [1, 0]
2718        >>> # input_x[0] = [0.0, 0.0, 0.0] - [3.0, 3.0, 3.0] = [-3.0, -3.0, -3.0]
2719        >>> # input_x[1] = [0.0, 0.0, 0.0] - [1.0, 1.0, 1.0] = [-1.0, -1.0, -1.0]
2720        >>> # step 2: [1, 1]
2721        >>> # input_x[1] = [-1.0, -1.0, -1.0] - [7.0, 7.0, 7.0] = [-8.0, -8.0, -8.0]
2722        >>> # input_x[1] = [-8.0, -8.0, -8.0] - [9.0, 9.0, 9.0] = [-17.0, -17.0, -17.0]
2723        >>> indices = Tensor(np.array([[1, 0], [1, 1]]), mindspore.int32)
2724        >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],
2725        ...                            [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)
2726        >>> scatter_sub = ops.ScatterSub()
2727        >>> output = scatter_sub(input_x, indices, updates)
2728        >>> print(output)
2729        [[ -3.  -3.  -3.]
2730         [-17. -17. -17.]]
2731        >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.
2732        >>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name="x")
2733        >>> # for indices = [[0, 1], [0, 1]]
2734        >>> # step 1: [0, 1]
2735        >>> # input_x[0] = [0.0, 0.0, 0.0] - [1.0, 1.0, 1.0] = [-1.0, -1.0, -1.0]
2736        >>> # input_x[1] = [0.0, 0.0, 0.0] - [3.0, 3.0, 3.0] = [-3.0, -3.0, -3.0]
2737        >>> # step 2: [0, 1]
2738        >>> # input_x[0] = [-1.0, -1.0, -1.0] - [7.0, 7.0, 7.0] = [-8.0, -8.0, -8.0]
2739        >>> # input_x[1] = [-3.0, -3.0, -3.0] - [9.0, 9.0, 9.0] = [-12.0, -12.0, -12.0]
2740        >>> indices = Tensor(np.array([[0, 1], [0, 1]]), mindspore.int32)
2741        >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],
2742        ...                            [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)
2743        >>> scatter_sub = ops.ScatterSub()
2744        >>> output = scatter_sub(input_x, indices, updates)
2745        >>> print(output)
2746        [[ -8.  -8.  -8.]
2747         [-12. -12. -12.]]
2748    """
2749    __mindspore_signature__ = (
2750        sig.make_sig('input_x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
2751        sig.make_sig('indices', dtype=sig.sig_dtype.T1),
2752        sig.make_sig('updates', dtype=sig.sig_dtype.T)
2753    )
2754
2755    @prim_attr_register
2756    def __init__(self, use_locking=False):
2757        """Initialize ScatterSub"""
2758        validator.check_value_type('use_locking', use_locking, [bool], self.name)
2759        self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])
2760        self.add_prim_attr('side_effect_mem', True)
2761
2762
2763class ScatterMul(_ScatterOpDynamic):
2764    r"""
2765    Updates the value of the input tensor through the multiply operation.
2766
2767    Using given values to update tensor value through the mul operation, along with the input indices.
2768    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
2769
2770    for each `i, ..., j` in `indices.shape`:
2771
2772    .. math::
2773
2774        \text{input_x}[\text{indices}[i, ..., j], :] \mathrel{*}= \text{updates}[i, ..., j, :]
2775
2776    Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
2777    If they have different data types, the lower priority data type will be converted to
2778    the relatively highest priority data type.
2779
2780    Args:
2781        use_locking (bool): Whether to protect the assignment by a lock. Default: ``False`` .
2782
2783    Inputs:
2784        - **input_x** (Parameter) - The target tensor, with data type of Parameter.
2785          The shape is :math:`(N, *)` where :math:`*` means any number of additional dimensions.
2786        - **indices** (Tensor) - The index to do multiply operation whose data type must be mstype.int32 or
2787          mstype.int64.
2788        - **updates** (Tensor) - The tensor doing the multiply operation with `input_x`,
2789          the data type is same as `input_x`, the shape is `indices.shape + input_x.shape[1:]`.
2790
2791    Outputs:
2792        Tensor, the updated `input_x`, has the same shape and type as `input_x`.
2793
2794    Raises:
2795        TypeError: If `use_locking` is not a bool.
2796        TypeError: If `indices` is not an int32 or an int64.
2797        ValueError: If the shape of `updates` is not equal to `indices.shape + input_x.shape[1:]`.
2798        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
2799                      is required when data type conversion of Parameter is not supported.
2800
2801    Supported Platforms:
2802        ``Ascend`` ``GPU`` ``CPU``
2803
2804    Examples:
2805        >>> import numpy as np
2806        >>> from mindspore import dtype as mstype
2807        >>> from mindspore import Tensor, ops, Parameter
2808        >>> input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mstype.float32), name="x")
2809        >>> indices = Tensor(np.array([0, 1]), mstype.int32)
2810        >>> updates = Tensor(np.array([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]), mstype.float32)
2811        >>> scatter_mul = ops.ScatterMul()
2812        >>> output = scatter_mul(input_x, indices, updates)
2813        >>> print(output)
2814        [[2. 2. 2.]
2815         [4. 4. 4.]]
2816        >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.
2817        >>> input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mstype.float32), name="x")
2818        >>> # for indices = [[0, 1], [1, 1]]
2819        >>> # step 1: [0, 1]
2820        >>> # input_x[0] = [1.0, 1.0, 1.0] * [1.0, 1.0, 1.0] = [1.0, 1.0, 1.0]
2821        >>> # input_x[1] = [2.0, 2.0, 2.0] * [3.0, 3.0, 3.0] = [6.0, 6.0, 6.0]
2822        >>> # step 2: [1, 1]
2823        >>> # input_x[1] = [6.0, 6.0, 6.0] * [7.0, 7.0, 7.0] = [42.0, 42.0, 42.0]
2824        >>> # input_x[1] = [42.0, 42.0, 42.0] * [9.0, 9.0, 9.0] = [378.0, 378.0, 378.0]
2825        >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mstype.int32)
2826        >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],
2827        ...                            [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mstype.float32)
2828        >>> scatter_mul = ops.ScatterMul()
2829        >>> output = scatter_mul(input_x, indices, updates)
2830        >>> print(output)
2831        [[  1.   1.   1.]
2832         [378. 378. 378.]]
2833        >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.
2834        >>> input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mstype.float32), name="x")
2835        >>> # for indices = [[1, 0], [1, 1]]
2836        >>> # step 1: [1, 0]
2837        >>> # input_x[0] = [1.0, 1.0, 1.0] * [3.0, 3.0, 3.0] = [3.0, 3.0, 3.0]
2838        >>> # input_x[1] = [2.0, 2.0, 2.0] * [1.0, 1.0, 1.0] = [2.0, 2.0, 2.0]
2839        >>> # step 2: [1, 1]
2840        >>> # input_x[1] = [2.0, 2.0, 2.0] * [7.0, 7.0, 7.0] = [14.0, 14.0, 14.0]
2841        >>> # input_x[1] = [14.0, 14.0, 14.0] * [9.0, 9.0, 9.0] = [126.0, 126.0, 126.0]
2842        >>> indices = Tensor(np.array([[1, 0], [1, 1]]), mstype.int32)
2843        >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],
2844        ...                            [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mstype.float32)
2845        >>> scatter_mul = ops.ScatterMul()
2846        >>> output = scatter_mul(input_x, indices, updates)
2847        >>> print(output)
2848        [[  3.   3.   3.]
2849         [126. 126. 126.]]
2850        >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.
2851        >>> input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mstype.float32), name="x")
2852        >>> # for indices = [[0, 1], [0, 1]]
2853        >>> # step 1: [0, 1]
2854        >>> # input_x[0] = [1.0, 1.0, 1.0] * [1.0, 1.0, 1.0] = [1.0, 1.0, 1.0]
2855        >>> # input_x[1] = [2.0, 2.0, 2.0] * [3.0, 3.0, 3.0] = [6.0, 6.0, 6.0]
2856        >>> # step 2: [0, 1]
2857        >>> # input_x[0] = [1.0, 1.0, 1.0] * [7.0, 7.0, 7.0] = [7.0, 7.0, 7.0]
2858        >>> # input_x[1] = [6.0, 6.0, 6.0] * [9.0, 9.0, 9.0] = [54.0, 54.0, 54.0]
2859        >>> indices = Tensor(np.array([[0, 1], [0, 1]]), mstype.int32)
2860        >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],
2861        ...                            [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mstype.float32)
2862        >>> scatter_mul = ops.ScatterMul()
2863        >>> output = scatter_mul(input_x, indices, updates)
2864        >>> print(output)
2865        [[ 7.  7.  7.]
2866         [54. 54. 54.]]
2867    """
2868
2869
2870class ScatterDiv(_ScatterOpDynamic):
2871    r"""
2872    Updates the value of the input tensor through the divide operation.
2873
2874    Using given values to update tensor value through the div operation, along with the input indices.
2875    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
2876
2877    for each :math:`i, ..., j` in `indices.shape`:
2878
2879    .. math::
2880
2881        \text{input_x}[\text{indices}[i, ..., j], :] \mathrel{/}= \text{updates}[i, ..., j, :]
2882
2883    Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
2884    If they have different data types, the lower priority data type will be converted to
2885    the relatively highest priority data type. A RuntimeError will be reported
2886    when `updates` does not support conversion to the data type required by `input_x`.
2887
2888    Args:
2889        use_locking (bool): Whether to protect the assignment by a lock. Default: ``False`` .
2890
2891    Inputs:
2892        - **input_x** (Parameter) - The target tensor, with data type of Parameter.
2893          The shape is :math:`(N, *)` where :math:`*` means any number of additional dimensions.
2894        - **indices** (Tensor) - The index to do divide operation whose data type must be mstype.int32 or
2895          mstype.int64.
2896        - **updates** (Tensor) - The tensor doing the divide operation with `input_x`,
2897          the data type is same as `input_x`, the shape is `indices.shape + input_x.shape[1:]`.
2898
2899    Outputs:
2900        Tensor, the updated `input_x`, has the same shape and type as `input_x`.
2901
2902    Raises:
2903        TypeError: If `use_locking` is not a bool.
2904        TypeError: If `indices` is not an int32 or an int64.
2905        ValueError: If the shape of `updates` is not equal to `indices.shape + input_x.shape[1:]`.
2906        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
2907                      is required when data type conversion of Parameter is not supported.
2908        RuntimeError: On the Ascend platform, the input data dimension of `input_x` , `indices`
2909                      and `updates` is greater than 8 dimensions.
2910
2911    Supported Platforms:
2912        ``Ascend`` ``GPU`` ``CPU``
2913
2914    Examples:
2915        >>> import numpy as np
2916        >>> from mindspore import dtype as mstype
2917        >>> from mindspore import Tensor, ops, Parameter
2918        >>> input_x = Parameter(Tensor(np.array([[6.0, 6.0, 6.0], [2.0, 2.0, 2.0]]), mstype.float32), name="x")
2919        >>> indices = Tensor(np.array([0, 1]), mstype.int32)
2920        >>> updates = Tensor(np.array([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]), mstype.float32)
2921        >>> scatter_div = ops.ScatterDiv()
2922        >>> output = scatter_div(input_x, indices, updates)
2923        >>> print(output)
2924        [[3. 3. 3.]
2925         [1. 1. 1.]]
2926        >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.
2927        >>> input_x = Parameter(Tensor(np.array([[105.0, 105.0, 105.0],
2928        ...                                      [315.0, 315.0, 315.0]]), mstype.float32), name="x")
2929        >>> # for indices = [[0, 1], [1, 1]]
2930        >>> # step 1: [0, 1]
2931        >>> # input_x[0] = [105.0, 105.0, 105.0] / [1.0, 1.0, 1.0] = [105.0, 105.0, 105.0]
2932        >>> # input_x[1] = [315.0, 315.0, 315.0] / [3.0, 3.0, 3.0] = [105.0, 105.0, 105.0]
2933        >>> # step 2: [1, 1]
2934        >>> # input_x[1] = [105.0, 105.0, 105.0] / [5.0, 5.0, 5.0] = [21.0, 21.0, 21.0]
2935        >>> # input_x[1] = [21.0, 21.0, 21.0] / [7.0, 7.0, 7.0] = [3.0, 3.0, 3.0]
2936        >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mstype.int32)
2937        >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],
2938        ...                            [[5.0, 5.0, 5.0], [7.0, 7.0, 7.0]]]), mstype.float32)
2939        >>> scatter_div = ops.ScatterDiv()
2940        >>> output = scatter_div(input_x, indices, updates)
2941        >>> print(output)
2942        [[105. 105. 105.]
2943         [  3.   3.   3.]]
2944        >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.
2945        >>> input_x = Parameter(Tensor(np.array([[105.0, 105.0, 105.0],
2946        ...                                      [315.0, 315.0, 315.0]]), mstype.float32), name="x")
2947        >>> # for indices = [[1, 0], [1, 1]]
2948        >>> # step 1: [1, 0]
2949        >>> # input_x[0] = [105.0, 105.0, 105.0] / [3.0, 3.0, 3.0] = [35.0, 35.0, 35.0]
2950        >>> # input_x[1] = [315.0, 315.0, 315.0] / [1.0, 1.0, 1.0] = [315.0, 315.0, 315.0]
2951        >>> # step 2: [1, 1]
2952        >>> # input_x[1] = [315.0, 315.0, 315.0] / [5.0, 5.0, 5.0] = [63.0 63.0 63.0]
2953        >>> # input_x[1] = [63.0 63.0 63.0] / [7.0, 7.0, 7.0] = [9.0, 9.0, 9.0]
2954        >>> indices = Tensor(np.array([[1, 0], [1, 1]]), mstype.int32)
2955        >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],
2956        ...                            [[5.0, 5.0, 5.0], [7.0, 7.0, 7.0]]]), mstype.float32)
2957        >>> scatter_div = ops.ScatterDiv()
2958        >>> output = scatter_div(input_x, indices, updates)
2959        >>> print(output)
2960        [[35. 35. 35.]
2961         [ 9.  9.  9.]]
2962        >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.
2963        >>> input_x = Parameter(Tensor(np.array([[105.0, 105.0, 105.0],
2964        ...                                      [315.0, 315.0, 315.0]]), mstype.float32), name="x")
2965        >>> # for indices = [[0, 1], [0, 1]]
2966        >>> # step 1: [0, 1]
2967        >>> # input_x[0] = [105.0, 105.0, 105.0] / [1.0, 1.0, 1.0] = [105.0, 105.0, 105.0]
2968        >>> # input_x[1] = [315.0, 315.0, 315.0] / [3.0, 3.0, 3.0] = [105.0, 105.0, 105.0]
2969        >>> # step 2: [0, 1]
2970        >>> # input_x[0] = [105.0, 105.0, 105.0] / [5.0, 5.0, 5.0] = [21.0, 21.0, 21.0]
2971        >>> # input_x[1] = [105.0, 105.0, 105.0] / [7.0, 7.0, 7.0] = [15.0, 15.0, 15.0]
2972        >>> indices = Tensor(np.array([[0, 1], [0, 1]]), mstype.int32)
2973        >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],
2974        ...                            [[5.0, 5.0, 5.0], [7.0, 7.0, 7.0]]]), mstype.float32)
2975        >>> scatter_div = ops.ScatterDiv()
2976        >>> output = scatter_div(input_x, indices, updates)
2977        >>> print(output)
2978        [[21. 21. 21.]
2979         [15. 15. 15.]]
2980    """
2981
2982
2983class ScatterNdAdd(Primitive):
2984    r"""
2985    Applies sparse addition to individual values or slices in a tensor.
2986
2987    Using given values to update tensor value through the add operation, along with the input indices.
2988    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
2989
2990    Refer to :func:`mindspore.ops.scatter_nd_add` for more details.
2991
2992    Args:
2993        use_locking (bool, optional): Whether to protect the assignment by a lock. Default: ``False`` .
2994
2995    Inputs:
2996        - **input_x** (Parameter) - The target tensor, with data type of Parameter.
2997          The shape is :math:`(N, *)` where :math:`*` means any number of additional dimensions.
2998        - **indices** (Tensor) - The index to do add operation whose data type must be mindspore.int32.
2999          The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
3000        - **updates** (Tensor) - The tensor doing the add operation with `input_x`,
3001          the data type is same as `input_x`, the shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
3002
3003    Outputs:
3004        Tensor, the updated `input_x`, has the same shape and type as `input_x`.
3005
3006    Supported Platforms:
3007        ``Ascend`` ``GPU`` ``CPU``
3008
3009    Examples:
3010        >>> import mindspore
3011        >>> import numpy as np
3012        >>> from mindspore import Tensor, ops, Parameter
3013        >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
3014        >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
3015        >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
3016        >>> use_locking = False
3017        >>> scatter_nd_add = ops.ScatterNdAdd(use_locking)
3018        >>> output = scatter_nd_add(input_x, indices, updates)
3019        >>> print(output)
3020        [ 1. 10.  9.  4. 12.  6.  7. 17.]
3021        >>> input_x = Parameter(Tensor(np.zeros((4, 4, 4)), mindspore.int32))
3022        >>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
3023        >>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
3024        ...                            [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]), mindspore.int32)
3025        >>> use_locking = False
3026        >>> scatter_nd_add = ops.ScatterNdAdd(use_locking)
3027        >>> output = scatter_nd_add(input_x, indices, updates)
3028        >>> print(output)
3029        [[[1 1 1 1]
3030          [2 2 2 2]
3031          [3 3 3 3]
3032          [4 4 4 4]]
3033         [[0 0 0 0]
3034          [0 0 0 0]
3035          [0 0 0 0]
3036          [0 0 0 0]]
3037         [[5 5 5 5]
3038          [6 6 6 6]
3039          [7 7 7 7]
3040          [8 8 8 8]]
3041         [[0 0 0 0]
3042          [0 0 0 0]
3043          [0 0 0 0]
3044          [0 0 0 0]]]
3045    """
3046    __mindspore_signature__ = (
3047        sig.make_sig('x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
3048        sig.make_sig('indices', dtype=sig.sig_dtype.T1),
3049        sig.make_sig('updates', dtype=sig.sig_dtype.T)
3050    )
3051
3052    @prim_attr_register
3053    def __init__(self, use_locking=False):
3054        """Initialize _ScatterOp"""
3055        validator.check_value_type('use_locking', use_locking, [bool], self.name)
3056        self.init_prim_io_names(inputs=['x', 'indices', 'updates'], outputs=['y'])
3057        self.add_prim_attr('side_effect_mem', True)
3058
3059
3060class ScatterNdSub(Primitive):
3061    r"""
3062    Applies sparse subtraction to individual values or slices in a tensor.
3063
3064    Using given values to update tensor value through the subtraction operation, along with the input indices.
3065    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
3066
3067    Refer to :func:`mindspore.ops.scatter_nd_sub` for more details.
3068
3069    Args:
3070        use_locking (bool, optional): Whether to protect the assignment by a lock. Default: ``False`` .
3071
3072    Inputs:
3073        - **input_x** (Parameter) - The target tensor, with data type of Parameter.
3074          The shape is :math:`(N, *)` where :math:`*` means any number of additional dimensions.
3075        - **indices** (Tensor) - The index to do sub operation whose data type must be mindspore.int32.
3076          The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
3077        - **updates** (Tensor) - The tensor doing the sub operation with `input_x`,
3078          the data type is same as `input_x`, the shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
3079
3080    Outputs:
3081        Tensor, the updated `input_x`, has the same shape and type as `input_x`.
3082
3083    Supported Platforms:
3084        ``Ascend`` ``GPU`` ``CPU``
3085
3086    Examples:
3087        >>> import mindspore
3088        >>> import numpy as np
3089        >>> from mindspore import Tensor, ops, Parameter
3090        >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
3091        >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
3092        >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
3093        >>> use_locking = False
3094        >>> scatter_nd_sub = ops.ScatterNdSub(use_locking)
3095        >>> output = scatter_nd_sub(input_x, indices, updates)
3096        >>> print(output)
3097        [ 1. -6. -3.  4. -2.  6.  7. -1.]
3098        >>> input_x = Parameter(Tensor(np.zeros((4, 4, 4)), mindspore.int32))
3099        >>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
3100        >>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
3101        ...                            [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]), mindspore.int32)
3102        >>> use_locking = False
3103        >>> scatter_nd_sub = ops.ScatterNdSub(use_locking)
3104        >>> output = scatter_nd_sub(input_x, indices, updates)
3105        >>> print(output)
3106        [[[-1 -1 -1 -1]
3107          [-2 -2 -2 -2]
3108          [-3 -3 -3 -3]
3109          [-4 -4 -4 -4]]
3110         [[ 0  0  0  0]
3111          [ 0  0  0  0]
3112          [ 0  0  0  0]
3113          [ 0  0  0  0]]
3114         [[-5 -5 -5 -5]
3115          [-6 -6 -6 -6]
3116          [-7 -7 -7 -7]
3117          [-8 -8 -8 -8]]
3118         [[ 0  0  0  0]
3119          [ 0  0  0  0]
3120          [ 0  0  0  0]
3121          [ 0  0  0  0]]]
3122    """
3123
3124    __mindspore_signature__ = (
3125        sig.make_sig('input_x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
3126        sig.make_sig('indices', dtype=sig.sig_dtype.T1),
3127        sig.make_sig('updates', dtype=sig.sig_dtype.T)
3128    )
3129
3130    @prim_attr_register
3131    def __init__(self, use_locking=False):
3132        """Initialize ScatterNdSub"""
3133        validator.check_value_type('use_locking', use_locking, [bool], self.name)
3134        self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])
3135        self.add_prim_attr('side_effect_mem', True)
3136
3137
3138class ScatterNdMul(_ScatterNdOp):
3139    r"""
3140    Applies sparse multiplication to individual values or slices in a tensor.
3141
3142    Using given values to update parameter value through the multiplication operation, along with the input indices.
3143    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
3144
3145    .. warning::
3146        This is an experimental API that is subject to change or deletion.
3147
3148    Refer to :func:`mindspore.ops.scatter_nd_mul` for more details.
3149
3150    Args:
3151        use_locking (bool, optional): Whether to protect the assignment by a lock. Default: ``False`` .
3152
3153    Inputs:
3154        - **input_x** (Parameter) - The target tensor, with data type of Parameter.
3155        - **indices** (Tensor) - The index to do mul operation whose data type must be int32 or int64.
3156          The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
3157        - **updates** (Tensor) - The tensor to do the mul operation with `input_x`.
3158          The data type is same as `input_x`, and the shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
3159
3160    Outputs:
3161        Tensor, the updated `input_x`, has the same shape and type as `input_x`.
3162
3163    Supported Platforms:
3164        ``GPU`` ``CPU``
3165
3166    Examples:
3167        >>> import mindspore
3168        >>> import numpy as np
3169        >>> from mindspore import Tensor, ops, Parameter
3170        >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
3171        >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
3172        >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
3173        >>> scatter_nd_mul = ops.ScatterNdMul()
3174        >>> output = scatter_nd_mul(input_x, indices, updates)
3175        >>> print(output)
3176        [ 1. 16. 18.  4. 35.  6.  7. 72.]
3177        >>> input_x = Parameter(Tensor(np.ones((4, 4, 4)), mindspore.int32))
3178        >>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
3179        >>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
3180        ...                            [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]), mindspore.int32)
3181        >>> scatter_nd_mul = ops.ScatterNdMul()
3182        >>> output = scatter_nd_mul(input_x, indices, updates)
3183        >>> print(output)
3184        [[[1 1 1 1]
3185          [2 2 2 2]
3186          [3 3 3 3]
3187          [4 4 4 4]]
3188         [[1 1 1 1]
3189          [1 1 1 1]
3190          [1 1 1 1]
3191          [1 1 1 1]]
3192         [[5 5 5 5]
3193          [6 6 6 6]
3194          [7 7 7 7]
3195          [8 8 8 8]]
3196         [[1 1 1 1]
3197          [1 1 1 1]
3198          [1 1 1 1]
3199          [1 1 1 1]]]
3200    """
3201
3202
3203class ScatterNdDiv(_ScatterNdOp):
3204    r"""
3205    Applies sparse division to individual values or slices in a tensor.
3206
3207    Using given values to update tensor value through the division operation, along with the input indices.
3208    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
3209
3210    .. warning::
3211        This is an experimental API that is subject to change or deletion.
3212
3213    Refer to :func:`mindspore.ops.scatter_nd_div` for more details.
3214
3215    Args:
3216        use_locking (bool, optional): Whether to protect the assignment by a lock. Default: ``False`` .
3217
3218    Inputs:
3219        - **input_x** (Parameter) - The target tensor, with data type of Parameter.
3220        - **indices** (Tensor) - The index to do div operation whose data type must be int32 or int64.
3221          The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
3222        - **updates** (Tensor) - The tensor to do the div operation with `input_x`.
3223          The data type is same as `input_x`, and the shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
3224
3225    Outputs:
3226        Tensor, the updated `input_x`, has the same shape and type as `input_x`.
3227
3228    Supported Platforms:
3229        ``GPU`` ``CPU``
3230
3231    Examples:
3232        >>> import mindspore
3233        >>> import numpy as np
3234        >>> from mindspore import Tensor, ops, Parameter
3235        >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
3236        >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
3237        >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
3238        >>> use_locking = False
3239        >>> scatter_nd_div = ops.ScatterNdDiv(use_locking)
3240        >>> output = scatter_nd_div(input_x, indices, updates)
3241        >>> print(output)
3242        [1.         0.25       0.5        4.         0.71428573 6.
3243         7.         0.8888889 ]
3244        >>> input_x = Parameter(Tensor(np.ones((4, 4, 4)), mindspore.float32))
3245        >>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
3246        >>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
3247        ...                            [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]), mindspore.float32)
3248        >>> use_locking = False
3249        >>> scatter_nd_div = ops.ScatterNdDiv(use_locking)
3250        >>> output = scatter_nd_div(input_x, indices, updates)
3251        >>> print(output)
3252        [[[1.         1.         1.         1.        ]
3253          [0.5        0.5        0.5        0.5       ]
3254          [0.33333334 0.33333334 0.33333334 0.33333334]
3255          [0.25       0.25       0.25       0.25      ]]
3256         [[1.         1.         1.         1.        ]
3257          [1.         1.         1.         1.        ]
3258          [1.         1.         1.         1.        ]
3259          [1.         1.         1.         1.        ]]
3260         [[0.2        0.2        0.2        0.2       ]
3261          [0.16666667 0.16666667 0.16666667 0.16666667]
3262          [0.14285715 0.14285715 0.14285715 0.14285715]
3263          [0.125      0.125      0.125      0.125     ]]
3264         [[1.         1.         1.         1.        ]
3265          [1.         1.         1.         1.        ]
3266          [1.         1.         1.         1.        ]
3267          [1.         1.         1.         1.        ]]]
3268    """
3269
3270
3271class ScatterNdMax(_ScatterNdOp):
3272    r"""
3273    Applies sparse maximum to individual values or slices in a tensor.
3274
3275    Using given values to update parameter value through the maximum operation, along with the input indices.
3276    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
3277
3278    Refer to :func:`mindspore.ops.scatter_nd_max` for more details.
3279
3280    Args:
3281        use_locking (bool, optional): Whether to protect the assignment by a lock. Default: ``False`` .
3282
3283    Inputs:
3284        - **input_x** (Parameter) -The target tensor, with data type of Parameter.
3285        - **indices** (Tensor) - The index to do maximum operation whose data type must be int32 or int64.
3286          The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
3287        - **updates** (Tensor) - The tensor to do the max operation with `input_x`.
3288          The data type is same as `input_x`, and the shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
3289
3290    Outputs:
3291        Tensor, the updated `input_x`, has the same shape and type as `input_x`.
3292
3293    Supported Platforms:
3294        ``Ascend`` ``GPU`` ``CPU``
3295
3296    Examples:
3297        >>> import mindspore
3298        >>> import numpy as np
3299        >>> from mindspore import Tensor, ops, Parameter
3300        >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
3301        >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
3302        >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
3303        >>> scatter_nd_max = ops.ScatterNdMax()
3304        >>> output = scatter_nd_max(input_x, indices, updates)
3305        >>> print(output)
3306        [ 1. 8. 6.  4. 7.  6.  7. 9.]
3307        >>> input_x = Parameter(Tensor(np.ones((4, 4, 4)), mindspore.int32))
3308        >>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
3309        >>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
3310        ...                            [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]), mindspore.int32)
3311        >>> scatter_nd_max = ops.ScatterNdMax()
3312        >>> output = scatter_nd_max(input_x, indices, updates)
3313        >>> print(output)
3314        [[[1 1 1 1]
3315          [2 2 2 2]
3316          [3 3 3 3]
3317          [4 4 4 4]]
3318         [[1 1 1 1]
3319          [1 1 1 1]
3320          [1 1 1 1]
3321          [1 1 1 1]]
3322         [[5 5 5 5]
3323          [6 6 6 6]
3324          [7 7 7 7]
3325          [8 8 8 8]]
3326         [[1 1 1 1]
3327          [1 1 1 1]
3328          [1 1 1 1]
3329          [1 1 1 1]]]
3330    """
3331
3332    @prim_attr_register
3333    def __init__(self, use_locking=False):
3334        """Initialize ScatterNdMax"""
3335        super().__init__(use_locking)
3336
3337
3338class ScatterNdMin(_ScatterNdOp):
3339    r"""
3340    Applies sparse minimum to individual values or slices in a tensor.
3341
3342    Using given values to update tensor value through the minimum operation, along with the input indices.
3343    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
3344
3345    Refer to :func:`mindspore.ops.scatter_nd_min` for more details.
3346
3347    Args:
3348        use_locking (bool, optional): Whether to protect the assignment by a lock. Default: ``False`` .
3349
3350    Inputs:
3351        - **input_x** (Parameter) -The target tensor, with data type of Parameter.
3352        - **indices** (Tensor) - The index to do minimum operation whose data type must be int32 or int64.
3353          The rank of indices must be at least 2 and `indices.shape[-1] <= len(shape)`.
3354        - **updates** (Tensor) - The tensor to do the max operation with `input_x`.
3355          The data type is same as `input_x`, and the shape is `indices.shape[:-1] + x.shape[indices.shape[-1]:]`.
3356
3357    Outputs:
3358        Tensor, the updated `input_x`, has the same shape and type as `input_x`.
3359
3360    Supported Platforms:
3361        ``Ascend`` ``GPU`` ``CPU``
3362
3363    Examples:
3364        >>> import mindspore
3365        >>> import numpy as np
3366        >>> from mindspore import Tensor, ops, Parameter
3367        >>> input_x = Parameter(Tensor(np.ones(8) * 10, mindspore.float32), name="x")
3368        >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
3369        >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
3370        >>> use_locking = False
3371        >>> scatter_nd_min = ops.ScatterNdMin(use_locking)
3372        >>> output = scatter_nd_min(input_x, indices, updates)
3373        >>> print(output)
3374        [10.  8.  6. 10.  7. 10. 10.  9.]
3375        >>> input_x = Parameter(Tensor(np.ones((4, 4, 4)) * 10, mindspore.int32))
3376        >>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
3377        >>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
3378        ...                            [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]), mindspore.int32)
3379        >>> use_locking = False
3380        >>> scatter_nd_min = ops.ScatterNdMin(use_locking)
3381        >>> output = scatter_nd_min(input_x, indices, updates)
3382        >>> print(output)
3383        [[[ 1  1  1  1]
3384          [ 2  2  2  2]
3385          [ 3  3  3  3]
3386          [ 4  4  4  4]]
3387         [[10 10 10 10]
3388          [10 10 10 10]
3389          [10 10 10 10]
3390          [10 10 10 10]]
3391         [[ 5  5  5  5]
3392          [ 6  6  6  6]
3393          [ 7  7  7  7]
3394          [ 8  8  8  8]]
3395         [[10 10 10 10]
3396          [10 10 10 10]
3397          [10 10 10 10]
3398          [10 10 10 10]]]
3399    """
3400
3401    @prim_attr_register
3402    def __init__(self, use_locking=False):
3403        """Initialize ScatterNdMin"""
3404        super().__init__(use_locking)
3405
3406
3407class SpaceToDepth(Primitive):
3408    r"""
3409    Rearrange blocks of spatial data into depth.
3410
3411    The output tensor's `height` dimension is :math:`height / block\_size`.
3412
3413    The output tensor's `weight` dimension is :math:`weight / block\_size`.
3414
3415    The depth of output tensor is :math:`block\_size * block\_size * input\_depth`.
3416
3417    The input tensor's height and width must be divisible by `block_size`.
3418    The data format is "NCHW".
3419
3420    Args:
3421        block_size (int): The block size used to divide spatial data. It must be >= 2.
3422
3423    Inputs:
3424        - **x** (Tensor) - The target tensor. The data type is Number. It must be a 4-D tensor.
3425
3426    Outputs:
3427        Tensor, the same data type as `x`. It must be a 4-D tensor. Tensor of shape
3428        :math:`(N, (C_{in} * \text{block_size} * 2), H_{in} / \text{block_size}, W_{in} / \text{block_size})`.
3429
3430    Raises:
3431        TypeError: If `block_size` is not an int.
3432        ValueError: If `block_size` is less than 2.
3433        ValueError: If length of shape of `x` is not equal to 4.
3434
3435    Supported Platforms:
3436        ``Ascend`` ``GPU`` ``CPU``
3437
3438    Examples:
3439        >>> import mindspore
3440        >>> import numpy as np
3441        >>> from mindspore import Tensor, ops
3442        >>> x = Tensor(np.random.rand(1,3,2,2), mindspore.float32)
3443        >>> block_size = 2
3444        >>> space_to_depth = ops.SpaceToDepth(block_size)
3445        >>> output = space_to_depth(x)
3446        >>> print(output.shape)
3447        (1, 12, 1, 1)
3448    """
3449
3450    @prim_attr_register
3451    def __init__(self, block_size):
3452        """Initialize SpaceToDepth"""
3453        validator.check_value_type('block_size', block_size, [int], self.name)
3454        validator.check('block_size', block_size, self.name, 2, validator.GE)
3455        self.block_size = block_size
3456        self.add_prim_attr("data_format", "NCHW")
3457        self.init_prim_io_names(inputs=['x'], outputs=['y'])
3458
3459
3460class DepthToSpace(Primitive):
3461    r"""
3462    Rearrange blocks of depth data into spatial dimensions.
3463
3464    This is the reverse operation of SpaceToDepth.
3465
3466    The depth of output tensor is :math:`input\_depth / (block\_size * block\_size)`.
3467
3468    The output tensor's `height` dimension is :math:`height * block\_size`.
3469
3470    The output tensor's `weight` dimension is :math:`weight * block\_size`.
3471
3472    The input tensor's depth must be divisible by `block_size * block_size`.
3473    The data format is "NCHW".
3474
3475    Args:
3476        block_size (int): The block size used to divide depth data. It must be >= 2.
3477
3478    Inputs:
3479        - **x** (Tensor) - The target tensor. It must be a 4-D tensor with shape :math:`(N, C_{in}, H_{in}, W_{in})`.
3480          The data type is Number.
3481
3482    Outputs:
3483        Tensor of shape :math:`(N, C_{in} / \text{block_size} ^ 2, H_{in} * \text{block_size},
3484        W_{in} * \text{block_size})`.
3485
3486    Raises:
3487        TypeError: If `block_size` is not an int.
3488        ValueError: If `block_size` is less than 2.
3489        ValueError: If length of shape of `x` is not equal to 4.
3490
3491    Supported Platforms:
3492        ``Ascend`` ``GPU`` ``CPU``
3493
3494    Examples:
3495        >>> import mindspore
3496        >>> import numpy as np
3497        >>> from mindspore import Tensor, ops
3498        >>> x = Tensor(np.random.rand(1, 12, 1, 1), mindspore.float32)
3499        >>> block_size = 2
3500        >>> depth_to_space = ops.DepthToSpace(block_size)
3501        >>> output = depth_to_space(x)
3502        >>> print(output.shape)
3503        (1, 3, 2, 2)
3504    """
3505
3506    @prim_attr_register
3507    def __init__(self, block_size):
3508        """Initialize DepthToSpace"""
3509        validator.check_value_type('block_size', block_size, [int], self.name)
3510        validator.check('block_size', block_size, '', 2, validator.GE, self.name)
3511        self.block_size = block_size
3512        self.add_prim_attr("data_format", "NCHW")
3513        self.init_prim_io_names(inputs=['x'], outputs=['y'])
3514
3515
3516class SpaceToBatch(Primitive):
3517    r"""
3518    SpaceToBatch is deprecated. Please use :class:`mindspore.ops.SpaceToBatchND` instead.
3519    Divides spatial dimensions into blocks and combines the block size with the original batch.
3520
3521    This operation will divide spatial dimensions (H, W) into blocks with `block_size`, the output tensor's H and W
3522    dimension is the corresponding number of blocks after division. The output tensor's batch dimension is the
3523    product of the original batch and the square of block_size. Before division, the spatial dimensions
3524    of the input are zero padded according to paddings if necessary.
3525
3526    Args:
3527        block_size (int): The block size of dividing blocks with value greater than or equal to 2.
3528        paddings (Union[tuple, list]): The padding values for H and W dimension, containing 2 subtraction lists.
3529            Each subtraction list contains 2 integer value. All values must be greater than 0.
3530            paddings[i] specifies the paddings for the spatial dimension i, which corresponds to the
3531            input dimension i+2. It is required that input_shape[i+2]+paddings[i][0]+paddings[i][1]
3532            is divisible by block_size.
3533
3534    Inputs:
3535        - **input_x** (Tensor) - The input tensor. It must be a 4-D tensor. The data type is Number.
3536
3537    Outputs:
3538        Tensor, the output tensor with the same data type as input. Assume input shape is :math:`(n, c, h, w)` with
3539        :math:`block\_size` and :math:`paddings`. The shape of the output tensor will be :math:`(n', c', h', w')`,
3540        where
3541
3542        :math:`n' = n*(block\_size*block\_size)`
3543
3544        :math:`c' = c`
3545
3546        :math:`h' = (h+paddings[0][0]+paddings[0][1])//block\_size`
3547
3548        :math:`w' = (w+paddings[1][0]+paddings[1][1])//block\_size`
3549
3550    Raises:
3551        TypeError: If `block_size` is not an int.
3552        ValueError: If `block_size` is less than 2.
3553
3554    Supported Platforms:
3555        Deprecated
3556
3557    Examples:
3558        >>> block_size = 2
3559        >>> paddings = [[0, 0], [0, 0]]
3560        >>> space_to_batch = ops.SpaceToBatch(block_size, paddings)
3561        >>> input_x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mindspore.float32)
3562        >>> output = space_to_batch(input_x)
3563        >>> print(output)
3564        [[[[1.]]]
3565         [[[2.]]]
3566         [[[3.]]]
3567         [[[4.]]]]
3568    """
3569
3570    @prim_attr_register
3571    def __init__(self, block_size, paddings):
3572        """Initialize SpaceToBatch"""
3573        logger.warning("WARN_DEPRECATED: The usage of SpaceToBatch is deprecated."
3574                       " Please use SpaceToBatchND.")
3575        validator.check_value_type('block_size', block_size, [int], self.name)
3576        validator.check('block_size', block_size, self.name, 2, validator.GE, self.name)
3577        self.block_size = block_size
3578        validator.check('paddings shape', np.array(paddings).shape, self.name, (2, 2), validator.EQ, self.name)
3579        for elem in itertools.chain(*paddings):
3580            validator.check_non_negative_int(elem, 'paddings element', self.name)
3581            validator.check_value_type('paddings element', elem, [int], self.name)
3582        self.paddings = paddings
3583
3584
3585class BatchToSpace(PrimitiveWithInfer):
3586    r"""
3587    Divides batch dimension with blocks and interleaves these blocks back into spatial dimensions.
3588
3589    This operation will divide batch dimension N into blocks with block_size, the output tensor's N dimension
3590    is the corresponding number of blocks after division. The output tensor's H, W dimension is product of
3591    original H, W dimension and block_size with given amount to crop from dimension, respectively.
3592
3593    Args:
3594        block_size (int): The block size of division, has the value not less than 2.
3595        crops (Union[list(int), tuple(int)]): The crop value for H and W dimension, containing 2 subtraction lists.
3596            Each list contains 2 integers.
3597            All values must be not less than 0. crops[i] specifies the crop values for the spatial dimension i, which
3598            corresponds to the input dimension i+2. It is required that
3599            :math:`input\_shape[i+2]*block\_size > crops[i][0]+crops[i][1]` .
3600
3601    Inputs:
3602        - **input_x** (Tensor) - The input tensor. It must be a 4-D tensor, dimension 0 must be divisible by
3603          product of `block_shape`. The data type is float16 or float32.
3604
3605    Outputs:
3606        Tensor, the output tensor with the same type as input. Assume input shape is :math:`(n, c, h, w)` with
3607        block_size and crops. The output shape will be :math:`(n', c', h', w')`, where
3608
3609        :math:`n' = n//(block\_size*block\_size)`
3610
3611        :math:`c' = c`
3612
3613        :math:`h' = h*block\_size-crops[0][0]-crops[0][1]`
3614
3615        :math:`w' = w*block\_size-crops[1][0]-crops[1][1]`
3616
3617    Raises:
3618        TypeError: If `block_size` or element of `crops` is not an int.
3619        TypeError: If `crops` is neither list nor tuple.
3620        ValueError: If `block_size` is less than 2.
3621
3622    Supported Platforms:
3623        ``Ascend`` ``GPU``
3624
3625    Examples:
3626        >>> import mindspore
3627        >>> import numpy as np
3628        >>> from mindspore import Tensor, ops
3629        >>> block_size = 2
3630        >>> crops = [[0, 0], [0, 0]]
3631        >>> batch_to_space = ops.BatchToSpace(block_size, crops)
3632        >>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32)
3633        >>> output = batch_to_space(input_x)
3634        >>> print(output)
3635        [[[[1.  2.]
3636           [3.  4.]]]]
3637
3638    """
3639
3640    @prim_attr_register
3641    def __init__(self, block_size, crops):
3642        """Initialize BatchToSpace"""
3643        logger.warning("WARN_DEPRECATED: The usage of BatchToSpace is deprecated."
3644                       " Please use BatchToSpaceND.")
3645        validator.check_value_type('block_size', block_size, [int], self.name)
3646        validator.check('block_size', block_size, '', 2, validator.GE, self.name)
3647        self.block_size = block_size
3648        validator.check_value_type('crops type', crops, [list, tuple], self.name)
3649        validator.check('crops shape', np.array(crops).shape, self.name, (2, 2))
3650        for elem in itertools.chain(*crops):
3651            validator.check_non_negative_int(elem, 'crops element', self.name)
3652            validator.check_value_type('crops element', elem, [int], self.name)
3653        self.crops = crops
3654
3655    def infer_dtype(self, x_dtype):
3656        validator.check_tensor_dtype_valid('input_x', x_dtype, mstype.number_type, self.name)
3657        return x_dtype
3658
3659    def infer_shape(self, x_shape):
3660        validator.check('rank of input_x', len(x_shape), self.name, 4)
3661        out_shape = copy.deepcopy(x_shape)
3662        for i in range(2):
3663            x_block_prod = out_shape[i + 2] * self.block_size
3664            crops_sum = self.crops[i][0] + self.crops[i][1]
3665            validator.check("x block shape prod", x_block_prod, 'crops sum', crops_sum, validator.GT, self.name)
3666            out_shape[i + 2] = x_block_prod - crops_sum
3667        block_size_prod = self.block_size * self.block_size
3668        if out_shape[0] % block_size_prod != 0:
3669            raise ValueError(f"For '{self.name}', the shape of output with index 0 must be divided exactly "
3670                             f"by block_size_prod, but got the shape of output: {out_shape} and "
3671                             f"block_size_prod: {block_size_prod}.")
3672        out_shape[0] = out_shape[0] // block_size_prod
3673        return out_shape
3674
3675
3676class SpaceToBatchND(Primitive):
3677    r"""
3678    Divides spatial dimensions into blocks and combines the block size with the original batch.
3679
3680    This operation will divide spatial dimensions into blocks with `block_shape`, and then the output tensor's spatial
3681    dimension is the corresponding number of blocks after division. The output tensor's batch dimension is the
3682    product of the original batch and all elements in `block_shape`.
3683    Before division, the spatial dimensions of the input are zero padded according to paddings if necessary.
3684
3685    Args:
3686        block_shape (Union[list(int), tuple(int), int]): The block shape of dividing block
3687            with all elements greater than or euqal to 1. If `block_shape` is a list or tuple,
3688            the length of `block_shape` is the number of spatial dimensions, called M later.
3689            If `block_shape` is an int, the block size of M dimensions are the same, equal to `block_shape`.
3690            In this case of Ascend, M must be 2.
3691        paddings (Union[tuple, list]): The padding values for spatial dimensions, containing M subtraction list.
3692            Each contains 2 integer values. All values must be greater than or equal to 0.
3693            `paddings[i]` specifies the paddings for the spatial dimension i,
3694            which corresponds to the input dimension i + offset,where offset = N-M,
3695            and N is the number of input dimensions.
3696            For each i, input_shape[i + offset]+paddings[i][0]+paddings[i][1]
3697            should be divisible by block_shape[i].
3698
3699    Inputs:
3700        - **input_x** (Tensor) - The input tensor. The input tensor must be a 4-D tensor on Ascend.
3701
3702    Outputs:
3703        Tensor, the output tensor with the same data type as the input.
3704        Assume the input shape is :math:`(n, c_1, ... c_k, w_1, ..., w_M)` with
3705        :math:`block\_shape` and :math:`paddings`.
3706        The shape of the output tensor will be :math:`(n', c_1, ... c_k, w'_1, ..., w'_M)`,
3707        where
3708
3709        .. math::
3710            \begin{array}{ll} \\
3711                n' = n*(block\_shape[0]*...*block\_shape[M-1]) \\
3712                w'_i = (w_i+paddings[i-1][0]+paddings[i-1][1])//block\_shape[i-1]
3713            \end{array}
3714
3715    Raises:
3716        TypeError: If `block_shape` is not one of list, tuple, int.
3717        TypeError: If `paddings` is neither list nor tuple.
3718        ValueError: If `block_shape` is not one dimensional when `block_shape` is a list or tuple.
3719        ValueError: If the length of `block_shape` is not 2 on Ascend.
3720        ValueError: If shape of `paddings` is not (M, 2), where M is the length of `block_shape`.
3721        ValueError: If the element of `block_shape` is not an integer larger than or equal to 1.
3722        ValueError: If the element of `paddings` is not an integer larger than or euqal to 0.
3723
3724    Supported Platforms:
3725        ``Ascend`` ``GPU`` ``CPU``
3726
3727    Examples:
3728        >>> import mindspore
3729        >>> from mindspore import Tensor, ops
3730        >>> import numpy as np
3731        >>> block_shape = [2, 2]
3732        >>> paddings = [[0, 0], [0, 0]]
3733        >>> space_to_batch_nd = ops.SpaceToBatchND(block_shape, paddings)
3734        >>> input_x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mindspore.float32)
3735        >>> output = space_to_batch_nd(input_x)
3736        >>> print(output)
3737        [[[[1.]]]
3738         [[[2.]]]
3739         [[[3.]]]
3740         [[[4.]]]]
3741    """
3742
3743    @prim_attr_register
3744    def __init__(self, block_shape, paddings):
3745        """Initialize SpaceToBatchND"""
3746        validator.check_value_type('paddings type', paddings, [list, tuple], self.name)
3747        validator.check('paddings length', len(paddings), '', 1, validator.GE, self.name)
3748
3749        if isinstance(block_shape, int):
3750            block_shape = (block_shape,) * np.array(paddings).shape[0]
3751
3752        self.add_prim_attr("block_shape", block_shape)
3753        validator.check_value_type('block_shape type', block_shape, [list, tuple], self.name)
3754        validator.check('block_shape shape', len(np.array(block_shape).shape),
3755                        'default value', 1, validator.EQ, self.name)
3756        block_rank = len(block_shape)
3757        if context.get_context("device_target") == "Ascend":
3758            validator.check('block_shape length', block_rank, 'default value', 2, validator.EQ, self.name)
3759        for elem in block_shape:
3760            validator.check('block_shape element', elem, 'min value', 1, validator.GE, self.name)
3761            validator.check_value_type('block_shape element', elem, [int], self.name)
3762        self.block_shape = block_shape
3763
3764        validator.check(
3765            'paddings shape', np.array(paddings).shape, 'default value', (block_rank, 2), validator.EQ, self.name)
3766        for elem in itertools.chain(*paddings):
3767            validator.check_non_negative_int(elem, 'paddings element', self.name)
3768            validator.check_value_type('paddings element', elem, [int], self.name)
3769        self.paddings = paddings
3770
3771
3772class BatchToSpaceNDV2(Primitive):
3773    r"""
3774    Divides batch dimension with blocks and interleaves these blocks back into spatial dimensions.
3775
3776    Refer to :func:`mindspore.ops.batch_to_space_nd` for more details.
3777
3778    .. warning::
3779        This is an experimental API that is subject to change or deletion.
3780
3781    Inputs:
3782        - **input_x** (Tensor) - The input tensor. It must be greater or equal to 2-D
3783          tensor(equal to 4-D tensor on Ascend), batch dimension must be divisible by product of `block_shape`.
3784        - **block_shape** (Tensor) - The block shape of dividing block with all value greater
3785          than or equal to 1. If `block_shape` is a tuple or list, the length of `block_shape` is M corresponding
3786          to the number of spatial dimensions. If `block_shape` is an int, the block size of M dimensions are the
3787          same, equal to `block_shape`. In this case of Ascend, M must be 2.
3788        - **crops** (Union[list(int), tuple(int)]) - The crops values for spatial dimensions, containing
3789          M subtraction list. Each contains 2 integer values. All values must be >= 0. crops[i] specifies
3790          the crops values for spatial dimension i, which corresponds to input dimension i + offset,
3791          where offset = N-M, and N is the number of input dimensions. It is required that
3792          :math:`input\_shape[i+offset]*block\_shape[i] > crops[i][0]+crops[i][1]`
3793
3794    Outputs:
3795        Tensor, contains the result of batch division and rearrangement of the original Tensor.
3796
3797    Supported Platforms:
3798        ``Ascend``
3799
3800    Examples:
3801        >>> block_shape = Tensor(np.array([2, 2]), mindspore.int32)
3802        >>> crops = [[0, 0], [0, 0]]
3803        >>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32)
3804        >>> output = ops.BatchToSpaceNDV2(input_x, block_shape, crops)
3805        >>> print(output)
3806        [[[[1.  2.]
3807           [3.  4.]]]]
3808    """
3809
3810    @prim_attr_register
3811    def __init__(self):
3812        """Initialize BatchToSpaceNDV2"""
3813        self.init_prim_io_names(inputs=['input_x', 'block_shape', 'crops'], outputs=['y'])
3814        self.add_prim_attr('origin_format', 'NHWC')
3815
3816
3817class Meshgrid(PrimitiveWithInfer):
3818    """
3819    Generates coordinate matrices from given coordinate tensors.
3820
3821    Refer to :func:`mindspore.ops.meshgrid` for more details.
3822
3823    Args:
3824        indexing (str, optional): Cartesian ``'xy'`` or
3825            matrix ``'ij'`` indexing of output. In the 2-D case with
3826            inputs of length `M` and `N`, the outputs are of shape :math:`(N, M)`
3827            for ``'xy'`` indexing and :math:`(M, N)` for ``'ij'`` indexing. In the 3-D
3828            case with inputs of length `M`, `N` and `P`, outputs are of shape
3829            :math:`(N, M, P)` for ``'xy'`` indexing and :math:`(M, N, P)` for ``'ij'`` indexing.
3830            Default: ``'xy'``.
3831
3832    Inputs:
3833        - **input** (Union[tuple]) - A Tuple of N 1-D Tensor objects.
3834          The length of input should be greater than 1. The data type is Number.
3835
3836    Outputs:
3837        Tensors, A Tuple of N N-D Tensor objects. The data type is the same with the Inputs.
3838
3839    Supported Platforms:
3840        ``Ascend`` ``GPU`` ``CPU``
3841
3842    Examples:
3843        >>> import numpy as np
3844        >>> from mindspore import Tensor, ops
3845        >>> x = Tensor(np.array([1, 2, 3, 4]).astype(np.int32))
3846        >>> y = Tensor(np.array([5, 6, 7]).astype(np.int32))
3847        >>> z = Tensor(np.array([8, 9, 0, 1, 2]).astype(np.int32))
3848        >>> inputs = (x, y, z)
3849        >>> meshgrid = ops.Meshgrid(indexing='xy')
3850        >>> output = meshgrid(inputs)
3851        >>> print(output)
3852        (Tensor(shape=[3, 4, 5], dtype=Int32, value=
3853         [[[1, 1, 1, 1, 1],
3854           [2, 2, 2, 2, 2],
3855           [3, 3, 3, 3, 3],
3856           [4, 4, 4, 4, 4]],
3857          [[1, 1, 1, 1, 1],
3858           [2, 2, 2, 2, 2],
3859           [3, 3, 3, 3, 3],
3860           [4, 4, 4, 4, 4]],
3861          [[1, 1, 1, 1, 1],
3862           [2, 2, 2, 2, 2],
3863           [3, 3, 3, 3, 3],
3864           [4, 4, 4, 4, 4]]]),
3865         Tensor(shape=[3, 4, 5], dtype=Int32, value=
3866         [[[5, 5, 5, 5, 5],
3867           [5, 5, 5, 5, 5],
3868           [5, 5, 5, 5, 5],
3869           [5, 5, 5, 5, 5]],
3870          [[6, 6, 6, 6, 6],
3871           [6, 6, 6, 6, 6],
3872           [6, 6, 6, 6, 6],
3873           [6, 6, 6, 6, 6]],
3874          [[7, 7, 7, 7, 7],
3875           [7, 7, 7, 7, 7],
3876           [7, 7, 7, 7, 7],
3877           [7, 7, 7, 7, 7]]]),
3878         Tensor(shape=[3, 4, 5], dtype=Int32, value=
3879         [[[8, 9, 0, 1, 2],
3880           [8, 9, 0, 1, 2],
3881           [8, 9, 0, 1, 2],
3882           [8, 9, 0, 1, 2]],
3883          [[8, 9, 0, 1, 2],
3884           [8, 9, 0, 1, 2],
3885           [8, 9, 0, 1, 2],
3886           [8, 9, 0, 1, 2]],
3887          [[8, 9, 0, 1, 2],
3888           [8, 9, 0, 1, 2],
3889           [8, 9, 0, 1, 2],
3890           [8, 9, 0, 1, 2]]]))
3891    """
3892
3893    @prim_attr_register
3894    def __init__(self, indexing="xy"):
3895        """Initialize Meshgrid."""
3896        validator.check_value_type("indexing", indexing, (str), self.name)
3897        validator.check_string(indexing.lower(), ["xy", "ij"], "indexing", self.name)
3898        self.indexing = indexing
3899
3900    def infer_shape(self, x_shape):
3901        validator.check_value_type("shape", x_shape, [tuple], self.name)
3902        validator.check_int(len(x_shape), 2, validator.GE, "len of input", self.name)
3903        n = len(x_shape)
3904        shape_0 = []
3905        for s in x_shape:
3906            validator.check_int(len(s), 1, validator.EQ, 'each input rank', self.name)
3907            shape_0.append(s[0])
3908        if self.indexing == "xy":
3909            shape_0[0], shape_0[1] = shape_0[1], shape_0[0]
3910        out_shape = tuple(tuple(shape_0) for _ in range(n))
3911        return out_shape
3912
3913    def infer_dtype(self, x_type):
3914        validator.check_subclass("input[0]", x_type[0], mstype.tensor_type, self.name)
3915        n = len(x_type)
3916        for i in range(1, n):
3917            validator.check('x_type[%d]' % i, x_type[i], 'base', x_type[0], validator.EQ, self.name, TypeError)
3918        return x_type
3919
3920
3921class ReverseSequence(PrimitiveWithInfer):
3922    r"""
3923    Reverses variable length slices.
3924
3925    Args:
3926        seq_dim (int): The dimension where reversal is performed. Required.
3927        batch_dim (int): The input is sliced in this dimension. Default: ``0`` .
3928
3929    Inputs:
3930        - **x** (Tensor) - The input to reverse, supporting all number types including bool.
3931        - **seq_lengths** (Tensor) - Must be a 1-D vector with int32 or int64 types.
3932
3933    Outputs:
3934        Tensor, with the same shape and data type as `x`.
3935
3936    Raises:
3937        TypeError: If `seq_dim` or `batch_dim` is not an int.
3938        ValueError: If :math:`len(seq\_lengths) != x.shape[batch\_dim]`.
3939        ValueError: If :math:`batch\_dim == seq\_dim`.
3940        ValueError: If :math:`seq\_dim < 0` or :math:`seq\_dim >= len(x.shape)`.
3941        ValueError: If :math:`batch\_dim < 0` or :math:`batch\_dim >= len(x.shape)`.
3942        RuntimeError: If any value of `seq_lengths` is less than 0.
3943        RuntimeError: If any value of `seq_lengths` is larger than `x.shape[seq_dim]`.
3944
3945    Supported Platforms:
3946        ``Ascend`` ``GPU`` ``CPU``
3947
3948    Examples:
3949        >>> import mindspore
3950        >>> import numpy as np
3951        >>> from mindspore import Tensor, ops
3952        >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
3953        >>> seq_lengths = Tensor(np.array([1, 2, 3]))
3954        >>> reverse_sequence = ops.ReverseSequence(seq_dim=1)
3955        >>> output = reverse_sequence(x, seq_lengths)
3956        >>> print(output)
3957        [[1. 2. 3.]
3958         [5. 4. 6.]
3959         [9. 8. 7.]]
3960        >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
3961        >>> seq_lengths = Tensor(np.array([1, 2, 3]))
3962        >>> reverse_sequence = ops.ReverseSequence(seq_dim=0, batch_dim=1)
3963        >>> output = reverse_sequence(x, seq_lengths)
3964        >>> print(output)
3965        [[1. 5. 9.]
3966         [4. 2. 6.]
3967         [7. 8. 3.]]
3968        >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
3969        >>> seq_lengths = Tensor(np.array([2, 2, 3]))
3970        >>> reverse_sequence = ops.ReverseSequence(seq_dim=1)
3971        >>> output = reverse_sequence(x, seq_lengths)
3972        >>> print(output)
3973        [[2. 1. 3.]
3974         [5. 4. 6.]
3975         [9. 8. 7.]]
3976        >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
3977        >>> seq_lengths = Tensor(np.array([3, 2, 3]))
3978        >>> reverse_sequence = ops.ReverseSequence(seq_dim=1)
3979        >>> output = reverse_sequence(x, seq_lengths)
3980        >>> print(output)
3981        [[3. 2. 1.]
3982         [5. 4. 6.]
3983         [9. 8. 7.]]
3984        >>> x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.float32)
3985        >>> seq_lengths = Tensor(np.array([4, 4]))
3986        >>> reverse_sequence = ops.ReverseSequence(seq_dim=1)
3987        >>> output = reverse_sequence(x, seq_lengths)
3988        >>> print(output)
3989        [[4. 3. 2. 1.]
3990         [8. 7. 6. 5.]]
3991    """
3992
3993    @prim_attr_register
3994    def __init__(self, seq_dim, batch_dim=0):
3995        """Initialize ReverseSequence"""
3996        self.init_prim_io_names(inputs=['x', 'seq_lengths'], outputs=['y'])
3997        validator.check_value_type("seq_dim", seq_dim, [int], self.name)
3998        self.seq_dim_ = seq_dim
3999        validator.check_value_type("batch_dim", batch_dim, [int], self.name)
4000        self.batch_dim_ = batch_dim
4001
4002
4003class EditDistance(Primitive):
4004    r"""
4005    Computes the Levenshtein Edit Distance. It is used to measure the similarity of two sequences. The inputs are
4006    variable-length sequences provided by SparseTensors (hypothesis_indices, hypothesis_values, hypothesis_shape)
4007    and (truth_indices, truth_values, truth_shape).
4008
4009    .. math::
4010
4011        \operatorname{lev}_{a, b}(i, j)=\left\{\begin{array}{ll}
4012        \max (i, j)  \qquad \qquad \qquad \qquad \qquad \quad \  \text { if } \min (i, j)=0 \\
4013        \min \left\{\begin{array}{ll}
4014        \operatorname{lev}_{a, b}(i-1, j)+1 & \\
4015        \operatorname{lev}_{a, b}(i, j-1)+1 & \text { otherwise. } \\
4016        \operatorname{lev}_{a, b}(i-1, j-1)+1_{\left(a_{i} \neq b_{j}\right)}
4017        \end{array}\right. &
4018        \end{array}\right.
4019
4020    Where the :math:`a` indicates the hypothesis and the :math:`b` indicates the truth. For ease of understanding,
4021    i and j here in may be considered as lengths of a and b.
4022
4023    .. warning::
4024        Unorded `truth_indices` or `hypothesis_indices` might lead to expected result, so it is suggested to
4025        make sure `truth_indices` and `hypothesis_indices` are both in ascending order before
4026        calling this API.
4027
4028    Args:
4029        normalize (bool): If ``True`` , edit distances are normalized by length of truth. Default: ``True`` .
4030
4031    Inputs:
4032        - **hypothesis_indices** (Tensor) - The indices of the hypothesis list SparseTensor. With int64 data type.
4033          The shape of tensor is :math:`(N, R)`.
4034        - **hypothesis_values** (Tensor) - The values of the hypothesis list SparseTensor.
4035          Must be 1-D vector with length of N.
4036        - **hypothesis_shape** (Tensor) - The shape of the hypothesis list SparseTensor.
4037          Must be R-length vector with int64 data type. Only constant value is allowed.
4038        - **truth_indices** (Tensor) - The indices of the truth list SparseTensor. With int64 data type.
4039          The shape of tensor is :math:`(M, R)`.
4040        - **truth_values** (Tensor) - The values of the truth list SparseTensor. Must be 1-D vector with length of M.
4041        - **truth_shape** (Tensor) - The shape of the truth list SparseTensor.
4042          Must be R-length vector with int64 data type. Only constant value is allowed.
4043
4044    Outputs:
4045        Tensor, a dense tensor with rank `R-1` and float32 data type.
4046
4047    Raises:
4048        TypeError: If `normalize` is not a bool.
4049
4050    Supported Platforms:
4051        ``Ascend`` ``CPU``
4052
4053    Examples:
4054        >>> import numpy as np
4055        >>> from mindspore import Tensor
4056        >>> import mindspore.nn as nn
4057        >>> from mindspore import ops
4058        >>> class EditDistance(nn.Cell):
4059        ...     def __init__(self, hypothesis_shape, truth_shape, normalize=True):
4060        ...         super(EditDistance, self).__init__()
4061        ...         self.edit_distance = ops.EditDistance(normalize)
4062        ...         self.hypothesis_shape = hypothesis_shape
4063        ...         self.truth_shape = truth_shape
4064        ...
4065        ...     def construct(self, hypothesis_indices, hypothesis_values, truth_indices, truth_values):
4066        ...         return self.edit_distance(hypothesis_indices, hypothesis_values, self.hypothesis_shape,
4067        ...                                   truth_indices, truth_values, self.truth_shape)
4068        ...
4069        >>> hypothesis_indices = Tensor(np.array([[0, 0, 0], [1, 0, 1], [1, 1, 1]]).astype(np.int64))
4070        >>> hypothesis_values = Tensor(np.array([1, 2, 3]).astype(np.float32))
4071        >>> hypothesis_shape = Tensor(np.array([1, 1, 2]).astype(np.int64))
4072        >>> truth_indices = Tensor(np.array([[0, 1, 0], [0, 0, 1], [1, 1, 0], [1, 0, 1]]).astype(np.int64))
4073        >>> truth_values = Tensor(np.array([1, 3, 2, 1]).astype(np.float32))
4074        >>> truth_shape = Tensor(np.array([2, 2, 2]).astype(np.int64))
4075        >>> edit_distance = EditDistance(hypothesis_shape, truth_shape)
4076        >>> output = edit_distance(hypothesis_indices, hypothesis_values, truth_indices, truth_values)
4077        >>> print(output)
4078        [[1. 1.]
4079         [1. 1.]]
4080    """
4081
4082    @prim_attr_register
4083    def __init__(self, normalize=True):
4084        """Initialize EditDistance"""
4085        self.normalize = validator.check_value_type("normalize", normalize, [bool], self.name)
4086
4087
4088class TransShape(PrimitiveWithInfer):
4089    """
4090    Transforms the shape of input tensor to target shape.
4091
4092    Inputs:
4093        - **input_x** (Tensor) - A input tensor.
4094        - **out_shape** (tuple[int]) - The shape of output data.
4095
4096    Outputs:
4097        Tensor, a tensor whose data type is same as 'input_x', and the shape is the same as the `out_shape`.
4098    """
4099
4100    @prim_attr_register
4101    def __init__(self):
4102        """Initialize TransShape."""
4103        self.__setattr_flag__ = True
4104
4105    def __infer__(self, x, shape):
4106        shp = shape['value']
4107        dtype = x['dtype']
4108        validator.check_tensor_dtype_valid('x', dtype, mstype.number_type + (mstype.bool_,), self.name)
4109        self.add_prim_attr('out_shape', tuple(shp))
4110        return {'shape': shp,
4111                'dtype': dtype,
4112                'value': None}
4113
4114
4115class Sort(Primitive):
4116    """
4117    Sorts the elements of the input tensor along the given dimension in the specified order.
4118
4119    .. warning::
4120        Currently, the data types of float16, uint8, int8, int16, int32, int64 are well supported.
4121        If use float32, it may cause loss of accuracy.
4122
4123    Args:
4124        axis (int, optional): The dimension to sort along. Default: ``-1``, means the last dimension.
4125            The Ascend backend only supports sorting the last dimension.
4126        descending (bool, optional): Controls the sort order. If descending is ``True`` then the elements
4127            are sorted in descending order by value. Default: ``False`` .
4128
4129    Inputs:
4130        - **x** (Tensor) - The input tensor.
4131
4132    Outputs:
4133        - **y1** (Tensor) - A tensor whose values are the sorted values, with the same shape and data type as input.
4134        - **y2** (Tensor) - the indices of the elements in the original input tensor. Data type is int32.
4135
4136    Raises:
4137        TypeError: If `axis` is not an int.
4138        TypeError: If `descending` is not a bool.
4139        ValueError: If `axis` is not in range of [-len(x.shape), len(x.shape)).
4140
4141    Supported Platforms:
4142        ``Ascend`` ``GPU`` ``CPU``
4143
4144    Examples:
4145        >>> import mindspore
4146        >>> import numpy as np
4147        >>> from mindspore import Tensor, ops
4148        >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
4149        >>> sort = ops.Sort()
4150        >>> output = sort(x)
4151        >>> # The output below is based on the Ascend platform.
4152        >>> print(output)
4153        (Tensor(shape=[3, 3], dtype=Float16, value=
4154        [[ 1.0000e+00,  2.0000e+00,  8.0000e+00],
4155         [ 3.0000e+00,  5.0000e+00,  9.0000e+00],
4156         [ 4.0000e+00,  6.0000e+00,  7.0000e+00]]), Tensor(shape=[3, 3], dtype=Int32, value=
4157        [[2, 1, 0],
4158         [2, 0, 1],
4159         [0, 1, 2]]))
4160    """
4161
4162    @prim_attr_register
4163    def __init__(self, axis=-1, descending=False):
4164        """Initialize Sort"""
4165        self.axis = validator.check_value_type("axis", axis, [int], self.name)
4166        self.descending = validator.check_value_type("descending", descending, [bool], self.name)
4167        self.init_prim_io_names(inputs=['x'], outputs=['y1', 'y2'])
4168
4169
4170class EmbeddingLookup(Primitive):
4171    """
4172    Returns a slice of input tensor based on the specified indices.
4173
4174    This Primitive has the similar functionality as GatherV2 operating on `axis = 0`, but has one more inputs:
4175    `offset`.
4176
4177    Inputs:
4178        - **input_params** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
4179          This represents a Tensor slice, instead of the entire Tensor. Currently, the dimension is restricted to be 2.
4180        - **input_indices** (Tensor) - The shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
4181          Specifies the indices of elements of the original Tensor. Values can be out of range of `input_params`,
4182          and the exceeding part will be filled with 0 in the output. Values do not support negative and the result
4183          is undefined if values are negative. The data type should be int32 or int64.
4184        - **offset** (int) - Specifies the offset value of this `input_params` slice. Thus the real indices
4185          are equal to `input_indices` minus `offset`.
4186
4187    Outputs:
4188        Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`. The data type is the same with `input_params`.
4189
4190    Raises:
4191        TypeError: If dtype of `input_indices` is not int.
4192        ValueError: If length of shape of `input_params` is greater than 2.
4193
4194    Supported Platforms:
4195        ``Ascend`` ``GPU`` ``CPU``
4196
4197    Examples:
4198        >>> import mindspore
4199        >>> import numpy as np
4200        >>> from mindspore import Tensor, ops
4201        >>> input_params = Tensor(np.array([[8, 9], [10, 11], [12, 13], [14, 15]]), mindspore.float32)
4202        >>> input_indices = Tensor(np.array([[5, 2], [8, 5]]), mindspore.int32)
4203        >>> offset = 4
4204        >>> output = ops.EmbeddingLookup()(input_params, input_indices, offset)
4205        >>> print(output)
4206        [[[10. 11.]
4207          [ 0.  0.]]
4208         [[ 0.  0.]
4209          [10. 11.]]]
4210    """
4211
4212    @prim_attr_register
4213    def __init__(self):
4214        """Initialize EmbeddingLookup."""
4215        self.__setattr_flag__ = True
4216        self.init_prim_io_names(inputs=['params', 'indices', 'offset'],
4217                                outputs=['output'])
4218        self.add_prim_attr('bprop_return_sparse', True)
4219
4220
4221class IdentityN(Primitive):
4222    """
4223    Return a tuple of tensors with the same shapes and contents as the input.
4224
4225    This op can be used to override the gradient for complicated functions. For
4226    example, suppose :math:`y = f(x)` and we wish to apply a custom function g for backprop
4227    such that :math:`dx=g(dy)`.
4228
4229    Inputs:
4230        - **x** (Union[tuple[Tensor], list[Tensor]]) - Input, the data type is RealNumber.
4231
4232    Outputs:
4233        Tensors - tuple(Tensor), the shape of tensor and the data type are the same as input `x`.
4234
4235    Raises:
4236        TypeError: If `x` is not tuple(Tensor) or List(Tensor).
4237        TypeError: If input `x` type is not RealNumber.
4238
4239    Supported Platforms:
4240        ``Ascend`` ``GPU`` ``CPU``
4241
4242    Examples:
4243        >>> x = [Tensor(np.array([1, 2, 3, 4]), mstype.int64), Tensor(np.array([4, 3, 1, 1]), mstype.int64)]
4244        >>> output = ops.IdentityN()(x)
4245        >>> print(np.allclose(output[0].asnumpy(), x[0].asnumpy()))
4246        True
4247        >>> print(np.allclose(output[1].asnumpy(), x[1].asnumpy()))
4248        True
4249        >>> print(output)
4250        (Tensor(shape=[4], dtype=Int64, value= [1, 2, 3, 4]), Tensor(shape=[4], dtype=Int64, value= [4, 3, 1, 1]))
4251    """
4252
4253    @prim_attr_register
4254    def __init__(self):
4255        """Initialize IdentityN"""
4256        self.init_prim_io_names(inputs=['x'], outputs=['y'])
4257
4258
4259class RangeV2(Primitive):
4260    """
4261    Creates a sequence of numbers that begins at `start`, ends at `limit` but not including `limit`
4262    and extends by increments of `delta`.
4263
4264    The types of all 3 inputs must be the same. The type of the resulting tensor is
4265    the same as the type of the inputs.
4266
4267    Args:
4268        maxlen (int): Memory that can fit `maxlen` many elements
4269            will be allocated for the output. Optional, must be positive, defaults to 1000000.
4270            If the output has more than `maxlen` elements, a `ValueError` will occur.
4271
4272    Inputs:
4273        - **start** (Tensor) - A scalar Tensor. The first number in the sequence. Must have
4274          type: int32 or float32 or int64 or float64
4275        - **limit** (Tensor) - A scalar Tensor. Upper limit of the sequence, exclusive. Must
4276          have type: int32 or float32 or int64 or float64
4277        - **delta** (Tensor) - A scalar Tensor. Number that increments `start`. Must have
4278          type: int32 or float32 or int64 or float64
4279
4280    Outputs:
4281       A 1D Tensor, with the same type as the inputs.
4282
4283    Raises:
4284        TypeError: If datatype of `start`, `limit` and `delta` not supported.
4285        TypeError: If datatype of `start`, `limit` and `delta` not same.
4286        TypeError: If attr `max_len` is not int.
4287        TypeError: If `start` or `limit` or `delta` is not scalar Tensor.
4288        ValueError: If value of `max_len` is negative.
4289        ValueError: If `delta` >= 0 when `start` > `limit`.
4290        ValueError: If `delta` <= 0 when `start` < `limit`.
4291        ValueError: If the output has more than `maxlen` elements
4292
4293    Supported Platforms:
4294        ``Ascend`` ``CPU``
4295
4296    Examples:
4297        >>> start = Tensor(0, mstype.int32)
4298        >>> limit = Tensor(10, mstype.int32)
4299        >>> delta = Tensor(4, mstype.int32)
4300        >>> output = ops.RangeV2()(start, limit, delta)
4301        >>> print(output)
4302        [0 4 8]
4303    """
4304
4305    @prim_attr_register
4306    def __init__(self, maxlen=1000000):
4307        """Initialize RangeV2"""
4308        self.init_prim_io_names(inputs=['start', 'limit', 'delta'], outputs=['output'])
4309        validator.check_value_type("maxlen", maxlen, [int], self.name)
4310        validator.check_positive_int(maxlen, "maxlen", self.name)
4311
4312
4313class MaskedScatter(Primitive):
4314    """
4315    Updates the value in the input with value in `updates` according to the `mask`.
4316
4317    .. warning::
4318        This is an experimental API that is subject to change or deletion.
4319
4320    Inputs:
4321        - **x** (Tensor): The input Tensor to be updated.
4322        - **mask** (Tensor[bool]): The mask Tensor indicating which elements should be modified or replaced.
4323          The shapes of `mask` and `x` must be the same or broadcastable.
4324        - **updates** (Tensor): The values to scatter into the target tensor `x`. It has the same data type as `x`. The
4325          number of elements must be greater than or equal to the number of True's in `mask`.
4326
4327    Outputs:
4328        Tensor, with the same type and shape as `x`.
4329
4330    Raises:
4331        TypeError: If `x`, `mask` or `updates` is not a Tensor.
4332        TypeError: If data type of `x` is not be supported.
4333        TypeError: If dtype of `mask` is not bool.
4334        TypeError: If the dim of `x` less than the dim of `mask`.
4335        ValueError: If `mask` can not be broadcastable to `x`.
4336        ValueError: If the number of elements in `updates` is less than number of True's in `mask`.
4337
4338    Supported Platforms:
4339        ``Ascend`` ``CPU``
4340
4341    Examples:
4342        >>> import mindspore
4343        >>> import numpy as np
4344        >>> from mindspore import Tensor, ops
4345        >>> input_x = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
4346        >>> mask = Tensor(np.array([True, True, False, True]), mindspore.bool_)
4347        >>> updates = Tensor(np.array([5., 6., 7.]), mindspore.float32)
4348        >>> output = ops.MaskedScatter()(input_x, mask, updates)
4349        >>> print(output)
4350        [5. 6. 3. 7.]
4351    """
4352
4353    @prim_attr_register
4354    def __init__(self):
4355        """Initialize MaskedScatter"""
4356        self.init_prim_io_names(inputs=['x', 'mask', 'updates'], outputs=['y'])
4357
4358
4359class MaskedSelect(PrimitiveWithCheck):
4360    """
4361    Returns a new 1-D Tensor which indexes the `x` tensor according to the boolean `mask`.
4362    The shapes of the `mask` tensor and the `x` tensor don't need to match, but they must be broadcastable.
4363
4364    Inputs:
4365        - **x** (Tensor) - Input Tensor of any dimension.
4366        - **mask** (Tensor[bool]) - Boolean mask Tensor, has the same shape as `x`.
4367
4368    Outputs:
4369        A 1-D Tensor, with the same type as x.
4370
4371    Raises:
4372        TypeError: If `x` or `mask` is not a Tensor.
4373        TypeError: If dtype of `mask` is not bool.
4374
4375    Supported Platforms:
4376        ``Ascend`` ``GPU`` ``CPU``
4377
4378    Examples:
4379        >>> import mindspore
4380        >>> import numpy as np
4381        >>> from mindspore import Tensor, ops
4382        >>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int32)
4383        >>> mask = Tensor(np.array([1, 0, 1, 0]), mindspore.bool_)
4384        >>> output = ops.MaskedSelect()(x, mask)
4385        >>> print(output)
4386        [1 3]
4387        >>> x = Tensor(2.1, mindspore.float32)
4388        >>> mask = Tensor(True, mindspore.bool_)
4389        >>> output = ops.MaskedSelect()(x, mask)
4390        >>> print(output)
4391        [2.1]
4392    """
4393
4394    @prim_attr_register
4395    def __init__(self):
4396        self.init_prim_io_names(inputs=['x', 'mask'], outputs=['output'])
4397
4398    def check_shape(self, x_shape, mask_shape):
4399        get_broadcast_shape(x_shape, mask_shape, self.name, arg_name1="x", arg_name2="mask")
4400
4401    def check_dtype(self, x_dtype, mask_dtype):
4402        validator.check_tensor_dtype_valid('mask', mask_dtype, [mstype.bool_], self.name)
4403        validator.check_tensor_dtype_valid('x', x_dtype, (mstype.bool_,) + mstype.number_type, self.name)
4404
4405
4406class _TensorScatterOp(PrimitiveWithInfer):
4407    """
4408    Defines TensorScatter Base Operators
4409    """
4410
4411    def infer_shape(self, input_x_shape, indices_shape, updates_shape):
4412        if indices_shape != [-2] and len(indices_shape) < 2:
4413            raise ValueError(f"For '{self.name}', the dimension of 'indices' cannot be less than 2,"
4414                             f" but got {len(indices_shape)}.")
4415        if indices_shape[-1] > 0:
4416            if indices_shape[-1] > len(input_x_shape):
4417                raise ValueError(f"For '{self.name}', the last dimension of 'indices' must be less than or equal to "
4418                                 f"the dimension of 'input_x', but got the "
4419                                 f"last dimension of 'indices': {indices_shape[-1]} and the dimension of 'input_x': "
4420                                 f"{len(input_x_shape)}.")
4421            updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:]
4422            if self._check_shape(updates_shape_check, updates_shape) is False:
4423                raise ValueError(f"For '{self.name}', the shape of 'update' must be equal to updates_shape_check, "
4424                                 f"where updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:] "
4425                                 f"but got the shape of 'update': {updates_shape}, "
4426                                 f"updates_shape_check: {updates_shape_check}, indices_shape: {indices_shape} and "
4427                                 f"input_x_shape: {input_x_shape}. Please check input_x_shape and indices_shape.")
4428
4429        return input_x_shape
4430
4431    def infer_dtype(self, input_x_dtype, indices_dtype, updates_dtype):
4432        validator.check_tensor_dtype_valid('indices', indices_dtype, [mstype.int32, mstype.int64], self.name)
4433        args = {"input_x": input_x_dtype, "updates": updates_dtype}
4434        validator.check_tensors_dtypes_same_and_valid(args, mstype.number_type, self.name)
4435        return input_x_dtype
4436
4437    def _check_shape(self, expect, real):
4438        """check shape"""
4439        if -2 in expect or -2 in real:
4440            return True
4441        if len(expect) != len(real):
4442            return False
4443        for a, b in zip(expect, real):
4444            if a == -1 or b == -1:
4445                continue
4446            if a != b:
4447                return False
4448        return True
4449
4450
4451class TensorScatterUpdate(_TensorScatterOp):
4452    r"""
4453    Creates a new tensor by updating the positions in `input_x` indicated by
4454    `indices`, with values from `update`. This operation is almost equivalent to using
4455    `mindspore.ops.ScatterNdUpdate` , except that the updates are applied on `input_x` instead of a zero tensor.
4456
4457    `indices` must have rank at least 2, the last axis is the depth of each index
4458    vectors. For each index vector, there must be a corresponding value in `update`. If
4459    the depth of each index tensor matches the rank of `input_x`, then each index
4460    vector corresponds to a scalar in `input_x` and each `update` updates a scalar. If
4461    the depth of each index tensor is less than the rank of `input_x`, then each index
4462    vector corresponds to a slice in `input_x`, and each `update` updates a slice.
4463
4464    The order in which updates are applied is nondeterministic, meaning that if there
4465    are multiple index vectors in `indices` that correspond to the same position, the
4466    value of that position in the output will be nondeterministic.
4467
4468    .. math::
4469        output[indices] = update
4470
4471    Inputs:
4472        - **input_x** (Tensor) - The input tensor. The dimension of input_x must be no less than indices.shape[-1].
4473          The shape is :math:`(N, *)` where :math:`*` means,any number of additional dimensions.
4474          The data type is Number.
4475        - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
4476          The rank must be at least 2.
4477        - **update** (Tensor) - The tensor to update the input tensor, has the same type as `input_x`, and
4478          :math:`update.shape = indices.shape[:-1]+input\_x.shape[indices.shape[-1]:]`
4479
4480    Outputs:
4481        Tensor, has the same shape and type as `input_x`.
4482
4483    Raises:
4484        TypeError: If dtype of `indices` is neither int32 nor int64.
4485        ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.
4486        ValueError: If the value of `input_x` are not match with input `indices`.
4487        RuntimeError: If a value of `indices` is not in `input_x`.
4488
4489    Supported Platforms:
4490        ``Ascend`` ``GPU`` ``CPU``
4491
4492    Examples:
4493        >>> import mindspore
4494        >>> import numpy as np
4495        >>> from mindspore import Tensor, ops
4496        >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
4497        >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
4498        >>> update = Tensor(np.array([1.0, 2.2]), mindspore.float32)
4499        >>> op = ops.TensorScatterUpdate()
4500        >>> output = op(input_x, indices, update)
4501        >>> print(output)
4502        [[ 1.   0.3  3.6]
4503         [ 0.4  2.2 -3.2]]
4504    """
4505
4506    @prim_attr_register
4507    def __init__(self):
4508        self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])
4509
4510    def infer_dtype(self, input_x_dtype, indices_dtype, updates_dtype):
4511        validator.check_tensor_dtype_valid('indices', indices_dtype, [mstype.int32, mstype.int64], self.name)
4512        args = {"input_x": input_x_dtype, "updates": updates_dtype}
4513        validator.check_tensors_dtypes_same_and_valid(args, (mstype.bool_,) + mstype.number_type, self.name)
4514        return input_x_dtype
4515
4516
4517class TensorScatterMax(Primitive):
4518    r"""
4519    By comparing the value at the position indicated by `indices` in `x` with the value in the `updates`,
4520    the value at the index will eventually be equal to the largest one to create a new tensor.
4521
4522    Refer to :func:`mindspore.ops.tensor_scatter_max` for more details.
4523
4524    Inputs:
4525        - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].
4526        - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
4527          The rank must be at least 2.
4528        - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,
4529          and updates.shape should be equal to :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
4530
4531    Outputs:
4532        Tensor, has the same shape and type as `input_x`.
4533
4534    Supported Platforms:
4535        ``GPU`` ``CPU``
4536
4537    Examples:
4538        >>> import mindspore
4539        >>> import numpy as np
4540        >>> from mindspore import Tensor, ops
4541        >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
4542        >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)
4543        >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)
4544        >>> # Next, demonstrate the approximate operation process of this operator:
4545        >>> # 1, indices[0] = [0, 0], indices[1] = [0, 0]
4546        >>> # 2, And input_x[0, 0] = -0.1
4547        >>> # 3, So input_x[indices] = [-0.1, -0.1]
4548        >>> # 4, Satisfy the above formula: input_x[indices].shape=(2) == updates.shape=(2)
4549        >>> op = ops.TensorScatterMax()
4550        >>> # 5, Perform the max operation for the first time:
4551        >>> #      first_input_x = Max(input_x[0][0], updates[0]) = [[1.0, 0.3, 3.6], [0.4, 0.5, -3.2]]
4552        >>> # 6, Perform the max operation for the second time:
4553        >>> #      second_input_x = Max(input_x[0][0], updates[1]) = [[2.2, 0.3, 3.6], [0.4, 0.5, -3.2]]
4554        >>> output = op(input_x, indices, updates)
4555        >>> print(output)
4556        [[ 2.2  0.3  3.6]
4557         [ 0.4  0.5 -3.2]]
4558    """
4559
4560    @prim_attr_register
4561    def __init__(self):
4562        self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])
4563
4564
4565class TensorScatterMin(Primitive):
4566    r"""
4567    By comparing the value at the position indicated by `indices` in `input_x` with the value in the `updates`,
4568    the value at the index will eventually be equal to the smallest one to create a new tensor.
4569
4570    Refer to :func:`mindspore.ops.tensor_scatter_min` for more details.
4571
4572    Inputs:
4573        - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].
4574        - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
4575          The rank must be at least 2.
4576        - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,
4577          and updates.shape should be equal to :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
4578
4579    Outputs:
4580        Tensor, has the same shape and type as `input_x`.
4581
4582    Supported Platforms:
4583        ``Ascend`` ``GPU`` ``CPU``
4584
4585    Examples:
4586        >>> import mindspore
4587        >>> import numpy as np
4588        >>> from mindspore import Tensor, ops
4589        >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
4590        >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)
4591        >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)
4592        >>> # Next, demonstrate the approximate operation process of this operator:
4593        >>> # 1, indices[0] = [0, 0], indices[1] = [0, 0]
4594        >>> # 2, And input_x[0, 0] = -0.1
4595        >>> # 3, So input_x[indices] = [-0.1, -0.1]
4596        >>> # 4, Satisfy the above formula: input_x[indices].shape=(2) == updates.shape=(2)
4597        >>> op = ops.TensorScatterMin()
4598        >>> # 5, Perform the min operation for the first time:
4599        >>> #      first_input_x = Min(input_x[0][0], updates[0]) = [[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]
4600        >>> # 6, Perform the min operation for the second time:
4601        >>> #      second_input_x = Min(input_x[0][0], updates[1]) = [[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]
4602        >>> output = op(input_x, indices, updates)
4603        >>> print(output)
4604        [[ -0.1  0.3  3.6]
4605         [ 0.4  0.5 -3.2]]
4606    """
4607
4608    @prim_attr_register
4609    def __init__(self):
4610        self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])
4611
4612
4613class TensorScatterSub(Primitive):
4614    r"""
4615    Creates a new tensor by subtracting the values from the positions in `input_x` indicated by
4616    `indices`, with values from `updates`. When multiple values are provided for the same
4617    index, the result of the update will be to subtract these values respectively. This operation is almost
4618    equivalent to using :class:`mindspore.ops.ScatterNdSub` , except that the updates are applied on output `Tensor`
4619    instead of input `Parameter`.
4620
4621    .. math::
4622        output\left [indices  \right ] = input\_x- update
4623
4624    Refer to :func:`mindspore.ops.tensor_scatter_sub` for more details.
4625
4626    Inputs:
4627        - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].
4628        - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
4629          The rank must be at least 2.
4630        - **updates** (Tensor) - The tensor to update the input tensor, has the same type as `input_x`,
4631          and the shape of `updates` should be equal to :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
4632
4633    Outputs:
4634        Tensor, has the same shape and type as `input_x`.
4635
4636    Supported Platforms:
4637        ``Ascend`` ``GPU`` ``CPU``
4638
4639    Examples:
4640        >>> import mindspore
4641        >>> import numpy as np
4642        >>> from mindspore import Tensor, ops
4643        >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
4644        >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)
4645        >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)
4646        >>> # Next, demonstrate the approximate operation process of this operator:
4647        >>> # 1, indices[0] = [0, 0], indices[1] = [0, 0]
4648        >>> # 2, And input_x[0, 0] = -0.1
4649        >>> # 3, So input_x[indices] = [-0.1, -0.1]
4650        >>> # 4, Satisfy the above formula: input_x[indices].shape=(2) == updates.shape=(2)
4651        >>> op = ops.TensorScatterSub()
4652        >>> # 5, Perform the subtract operation for the first time:
4653        >>> #      first_input_x = input_x[0][0] - updates[0] = [[-1.1, 0.3, 3.6], [0.4, 0.5, -3.2]]
4654        >>> # 6, Perform the subtract operation for the second time:
4655        >>> #      second_input_x = input_x[0][0] - updates[1] = [[-3.3, 0.3, 3.6], [0.4, 0.5, -3.2]]
4656        >>> output = op(input_x, indices, updates)
4657        >>> print(output)
4658        [[-3.3000002  0.3        3.6      ]
4659         [ 0.4        0.5       -3.2      ]]
4660    """
4661
4662    @prim_attr_register
4663    def __init__(self):
4664        self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])
4665
4666
4667class TensorScatterAdd(Primitive):
4668    """
4669    Creates a new tensor by adding the values from the positions in `input_x` indicated by
4670    `indices`, with values from `updates`. When multiple values are given for the same
4671    index, the updated result will be the sum of all values. This operation is almost
4672    equivalent to using :class:`mindspore.ops.ScatterNdAdd`, except that the updates are applied on output `Tensor`
4673    instead of input `Parameter`.
4674
4675    Refer to :func:`mindspore.ops.tensor_scatter_add` for more details.
4676
4677    Inputs:
4678        - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].
4679        - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
4680          The rank must be at least 2.
4681        - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,
4682          and updates. Shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].
4683
4684    Outputs:
4685        Tensor, has the same shape and type as `input_x`.
4686
4687    Supported Platforms:
4688        ``Ascend`` ``GPU`` ``CPU``
4689
4690    Examples:
4691        >>> import mindspore
4692        >>> import numpy as np
4693        >>> from mindspore import Tensor, ops
4694        >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
4695        >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)
4696        >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)
4697        >>> # Next, demonstrate the approximate operation process of this operator:
4698        >>> # 1, indices[0] = [0, 0], indices[1] = [0, 0]
4699        >>> # 2, And input_x[0, 0] = -0.1
4700        >>> # 3, So input_x[indices] = [-0.1, -0.1]
4701        >>> # 4, Satisfy the above formula: input_x[indices].shape=(2) == updates.shape=(2)
4702        >>> op = ops.TensorScatterAdd()
4703        >>> # 5, Perform the addition operation for the first time:
4704        >>> #      first_input_x = input_x[0][0] + updates[0] = [[0.9, 0.3, 3.6], [0.4, 0.5, -3.2]]
4705        >>> # 6, Perform the addition operation for the second time:
4706        >>> #      second_input_x = input_x[0][0] + updates[1] = [[3.1, 0.3, 3.6], [0.4, 0.5, -3.2]]
4707        >>> output = op(input_x, indices, updates)
4708        >>> print(output)
4709        [[ 3.1  0.3  3.6]
4710         [ 0.4  0.5 -3.2]]
4711    """
4712
4713    @prim_attr_register
4714    def __init__(self):
4715        self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])
4716
4717
4718class TensorScatterMul(_TensorScatterOp):
4719    r"""
4720    Creates a new tensor by multiplying the values from the positions in `input_x` indicated by
4721    `indices`, with values from `updates`. When multiple values are provided for the same
4722    index, the result of the update will be to multiply these values respectively.
4723    The updates are applied on output `Tensor` instead of input `Parameter`.
4724
4725    .. math::
4726        output\left [indices  \right ] = input\_x\times  update
4727
4728    Refer to :func:`mindspore.ops.tensor_scatter_mul` for more details.
4729
4730    Inputs:
4731        - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].
4732        - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
4733          The rank must be at least 2.
4734        - **updates** (Tensor) - The tensor to update the input tensor, has the same type as `input_x`,
4735          and the shape of `updates` should be equal to :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
4736
4737    Outputs:
4738        Tensor, has the same shape and type as `input_x`.
4739
4740    Supported Platforms:
4741        ``GPU`` ``CPU``
4742
4743    Examples:
4744        >>> import mindspore
4745        >>> import numpy as np
4746        >>> from mindspore import Tensor, ops
4747        >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
4748        >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)
4749        >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)
4750        >>> # Next, demonstrate the approximate operation process of this operator:
4751        >>> # 1, indices[0] = [0, 0], indices[1] = [0, 0]
4752        >>> # 2, And input_x[0, 0] = -0.1
4753        >>> # 3, So input_x[indices] = [-0.1, -0.1]
4754        >>> # 4, Satisfy the above formula: input_x[indices].shape=(2) == updates.shape=(2)
4755        >>> op = ops.TensorScatterMul()
4756        >>> # 5, Perform the multiply operation for the first time:
4757        >>> #      first_input_x = input_x[0][0] * updates[0] = [[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]
4758        >>> # 6, Perform the multiply operation for the second time:
4759        >>> #      second_input_x = input_x[0][0] * updates[1] = [[-0.22, 0.3, 3.6], [0.4, 0.5, -3.2]]
4760        >>> output = op(input_x, indices, updates)
4761        >>> print(output)
4762        [[-0.22  0.3   3.6  ]
4763         [ 0.4   0.5   -3.2 ]]
4764    """
4765
4766    @prim_attr_register
4767    def __init__(self):
4768        self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])
4769
4770
4771class TensorScatterDiv(_TensorScatterOp):
4772    r"""
4773    Creates a new tensor by dividing the values from the positions in `input_x` indicated by
4774    `indices`, with values from `updates`. When divided values are provided for the same
4775    index, the result of the update will be to divided these values respectively. Except that
4776    the updates are applied on output `Tensor` instead of input `Parameter`.
4777
4778    Refer to :func:`mindspore.ops.tensor_scatter_div` for more details.
4779
4780    Inputs:
4781        - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].
4782        - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
4783          The rank must be at least 2.
4784        - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,
4785          and updates.shape should be equal to :math:`indices.shape[:-1] + input\_x.shape[indices.shape[-1]:]`.
4786
4787    Outputs:
4788        Tensor, has the same shape and type as `input_x`.
4789
4790    Supported Platforms:
4791        ``GPU`` ``CPU``
4792
4793    Examples:
4794        >>> import mindspore
4795        >>> import numpy as np
4796        >>> from mindspore import Tensor, ops
4797        >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
4798        >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)
4799        >>> updates = Tensor(np.array([1.0, 2.0]), mindspore.float32)
4800        >>> # Next, demonstrate the approximate operation process of this operator:
4801        >>> # 1, indices[0] = [0, 0], indices[1] = [0, 0]
4802        >>> # 2, And input_x[0, 0] = -0.1
4803        >>> # 3, So input_x[indices] = [-0.1, -0.1]
4804        >>> # 4, Satisfy the above formula: input_x[indices].shape=(2) == updates.shape=(2)
4805        >>> op = ops.TensorScatterDiv()
4806        >>> # 5, Perform the division operation for the first time:
4807        >>> #      first_input_x = input_x[0][0] / updates[0] = [[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]
4808        >>> # 6, Perform the division operation for the second time:
4809        >>> #      second_input_x = input_x[0][0] * updates[1] = [[-0.05, 0.3, 3.6], [0.4, 0.5, -3.2]]
4810        >>> output = op(input_x, indices, updates)
4811        >>> print(output)
4812        [[-0.05  0.3  3.6  ]
4813         [ 0.4   0.5  -3.2 ]]
4814    """
4815
4816    @prim_attr_register
4817    def __init__(self):
4818        self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])
4819
4820
4821class ListDiff(Primitive):
4822    r"""
4823    This function calculates the disparity between two numerical lists.
4824
4825    It generates a list of all elements that are present in list `x` but not in list `y`.
4826    The output list `out` retains the same order as the original `x` including duplicate elements.
4827
4828    Additionally, this class outputs a list `idx` that identifies the position of each element
4829    in `out` within the original `x`. That is to say:
4830    :code:`out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` .
4831
4832    Args:
4833        out_idx (:class:`mindspore.dtype`, optional): The dtype of `idx`,
4834            an optioanal datatype of ``mstype.int32`` and ``mstype.int64`` .
4835            Default: ``mstype.int32`` .
4836
4837    Inputs:
4838        - **x** - Values to keep. A 1-D `Tensor`.
4839        - **y** - Values to remove. A 1-D `Tensor`. Must have the same type as `x`. 1-D.
4840
4841    Outputs:
4842        - **out** - The kept values. A 1-D `Tensor`. Has the same type as `x`.
4843        - **idx** - The original index of kept values. A 1-D `Tensor` of type `out_idx`.
4844
4845    Raises:
4846        ValueError: If `x` or `y` shape is not 1D.
4847        TypeError: If `x` or `y` is not a Tensor.
4848        TypeError: If `x` or `y` date type is not int or uint.
4849        TypeError: If `x` has different data type with `y`.
4850        TypeError: If attr `out_idx` not in [mstype.int32, mstype.int64].
4851
4852    Supported Platforms:
4853        ``Ascend`` ``GPU`` ``CPU``
4854
4855    Examples:
4856        >>> x = Tensor(np.arange(1, 7, 1), dtype=mindspore.dtype.int32) # [1, 2, 3, 4, 5, 6]
4857        >>> y = Tensor([1, 3, 5], dtype=mindspore.dtype.int32)
4858        >>> op = ops.ListDiff() # out_idx default is mindspore.dtype.int32
4859        >>> out, idx = op(x, y)
4860        >>> print(out)
4861        [2 4 6]
4862        >>> print(idx)
4863        [1 3 5]
4864    """
4865
4866    @prim_attr_register
4867    def __init__(self, out_idx=mstype.int32):
4868        """Initialize ListDiff"""
4869        self.init_prim_io_names(inputs=['x', 'y'], outputs=['out', 'idx'])
4870        validator.check_value_type("out_idx", out_idx, [mstype.Type], self.name)
4871        validator.check("out_idx", out_idx, "", [mstype.int32, mstype.int64], validator.IN,
4872                        self.name, excp_cls=TypeError)
4873        self.out_idx = out_idx
4874        self.add_prim_attr('out_idx', out_idx)
4875
4876
4877class SplitV(Primitive):
4878    r"""
4879    Splits the input tensor into `num_split` tensors along the given dimension.
4880
4881    The `input_x` tensor will be split into sub-tensors with individual shapes given
4882    by `size_splits` along the split dimension. This requires that `input_x.shape(split_dim)`
4883    is equal to the sum of `size_splits`.
4884
4885    The shape of `input_x` is :math:`(x_1, x_2, ..., x_M, ..., x_R)` whose rank
4886    is `R`. Set the given `split_dim` as M, and :math:`-R \le M < R`. Set the given `num_split`
4887    as `N`, the given `size_splits` as :math:`(x_{m_1}, x_{m_2}, ..., x_{m_N})`,
4888    :math:`x_M=\sum_{i=1}^Nx_{m_i}`. The output is a list of tensor objects, for the
4889    :math:`i`-th tensor, it has the shape of :math:`(x_1, x_2, ..., x_{m_i}, ..., x_R)`.
4890    :math:`x_{m_i}` is the :math:`M`-th dimension of the :math:`i`-th tensor.
4891    Then, the shape of the output tensor is
4892
4893    .. math::
4894
4895        ((x_1, x_2, ..., x_{m_1}, ..., x_R), (x_1, x_2, ..., x_{m_2}, ..., x_R), ...,
4896         (x_1, x_2, ..., x_{m_N}, ..., x_R))
4897
4898    Args:
4899        size_splits (Union[tuple, list]): A tuple or list of sizes of each output tensor along the split
4900            dimension, and the sum of these sizes should equal to the dimension of the
4901            input tensor along `split_dim`. The list may also contain a single instance of
4902            the value -1, which indicates that the size of that dimension should be inferred.
4903        split_dim (int): An int indicates the dimension along which to split.
4904            Must be in the range [-len(input_x.shape), len(input_x.shape)).
4905        num_split (int): The number of output tensors. Must be positive int.
4906
4907    Inputs:
4908        - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ...,x_M ..., x_R)`.
4909
4910    Outputs:
4911        Tensor, a list of `num_split` Tensor objects with the shape :math:`((x_1, x_2, ..., x_{m_1}, ..., x_R),
4912        (x_1, x_2, ..., x_{m_2}, ..., x_R), ..., (x_1, x_2, ..., x_{m_N}, ..., x_R))`, :math:`x_M=\sum_{i=1}^Nx_{m_i}`.
4913        The data type is the same with `input_x`.
4914
4915    Raises:
4916        TypeError: If `input_x` is not a Tensor.
4917        TypeError: If `size_splits` is not a tuple or a list.
4918        TypeError: If element of `size_splits` is not an int.
4919        TypeError: If `split_dim` or `num_split` is not an int.
4920        ValueError: If rank of the `size_splits` is not equal to `num_split`.
4921        ValueError: If sum of the `size_splits` is not equal to the dimension of value along `split_dim`.
4922        ValueError: If `split_dim` is out of the range [-len(input_x.shape), len(input_x.shape)).
4923        ValueError: If the `num_split` is less than or equal to 0.
4924
4925    Supported Platforms:
4926        ``Ascend``
4927
4928    Examples:
4929        >>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.int32)
4930        >>> op = ops.SplitV(size_splits=[1, -1], split_dim=1, num_split=2)
4931        >>> output = op(input_x)
4932        >>> print(output)
4933        (Tensor(shape=[3, 1], dtype=Int32, value=
4934        [[1],
4935         [4],
4936         [7]]), Tensor(shape=[3, 2], dtype=Int32, value=
4937        [[2, 3],
4938         [5, 6],
4939         [8, 9]]))
4940        >>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.int32)
4941        >>> op = ops.SplitV(size_splits=[2, 1], split_dim=0, num_split=2)
4942        >>> output = op(input_x)
4943        >>> print(output)
4944        (Tensor(shape=[2, 3], dtype=Int32, value=
4945        [[1, 2, 3],
4946         [4, 5, 6]]), Tensor(shape=[1, 3], dtype=Int32, value=
4947        [[7, 8, 9]]))
4948    """
4949
4950    @prim_attr_register
4951    def __init__(self, size_splits, split_dim, num_split):
4952        """Initialize SplitV"""
4953        validator.check_value_type("size_splits", size_splits, [tuple, list], self.name)
4954        for elements_of_size_splits in size_splits:
4955            validator.check_value_type("elements of size_splits", elements_of_size_splits, [int], self.name)
4956            if elements_of_size_splits != -1 and elements_of_size_splits < 1:
4957                raise ValueError(f"For \'{self.name}\', all elements of size_splits must be positive (except at most "
4958                                 f"one default value -1), but got: {elements_of_size_splits}.")
4959        validator.check_value_type("split_dim", split_dim, [int], self.name)
4960        validator.check_value_type("num_split", num_split, [int], self.name)
4961        validator.check_positive_int(num_split, "num_split", self.name)
4962        self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
4963
4964
4965class TensorScatterElements(Primitive):
4966    """
4967    Write all elements in `updates` to the index specified by `indices` in `input_x` according to the reduction
4968    operation specified by `reduction`.
4969    `axis` controls the direction of the scatter operation.
4970
4971    Refer to :func:`mindspore.ops.tensor_scatter_elements` for more details.
4972
4973    .. warning::
4974        If there are multiple index vectors in `indices` that correspond to the same position,
4975        the value of that position in the output will be nondeterministic.
4976
4977    .. warning::
4978        This is an experimental API that is subject to change or deletion.
4979
4980    Args:
4981        axis (int, optional): Specify which axis to do scatter operation. Default: ``0`` .
4982        reduction (str, optional): Which reduction operation to scatter, default is ``"none"`` . Other option: "add".
4983
4984    Inputs:
4985        - **data** (Tensor) - The target tensor. Its rank must be at least 1.
4986        - **indices** (Tensor) - The index of `input_x` to do scatter operation whose data type must be int32 or
4987          int64. It has the same rank as `data`. And accepted range is [-s, s) where s is the size along axis.
4988        - **updates** (Tensor) - The tensor doing the scatter operation with `data`,
4989          it has the same type as `data` and the same shape as `indices`.
4990
4991    Outputs:
4992        Tensor, has the same shape and type as `data`.
4993
4994    Supported Platforms:
4995        ``Ascend`` ``GPU`` ``CPU``
4996
4997    Examples:
4998        >>> import mindspore
4999        >>> from mindspore import ops
5000        >>> from mindspore import Tensor
5001        >>> op = ops.TensorScatterElements(0, "none")
5002        >>> data = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
5003        >>> indices = Tensor(np.array([[1, 0, 2], [0, 2, 1]]), mindspore.int32)
5004        >>> updates = Tensor(np.array([[0, 0, 0], [0, 0, 0]]), mindspore.float32)
5005        >>> output = op(data, indices, updates)
5006        >>> print(output)
5007        [[ 0.0  0.0  3.0]
5008         [ 0.0  5.0  0.0]
5009         [ 7.0  0.0  0.0]]
5010        >>> import mindspore as ms
5011        >>> from mindspore import ops
5012        >>> from mindspore import Tensor
5013        >>> op = ops.TensorScatterElements(1, "add")
5014        >>> data = Tensor(np.array([[1, 2, 3, 4, 5]]), mindspore.float32)
5015        >>> indices = Tensor(np.array([[2, 4]]), mindspore.int32)
5016        >>> updates = Tensor(np.array([[8, 8]]), mindspore.float32)
5017        >>> output = op(data, indices, updates)
5018        >>> print(output)
5019        [[ 1  2  11  4  13]]
5020    """
5021
5022    @prim_attr_register
5023    def __init__(self, axis=0, reduction="none"):
5024        """Initialize TensorScatterElements"""
5025        validator.check_value_type("axis", axis, [int], self.name)
5026        validator.check_value_type("reduction", reduction, [str], self.name)
5027        validator.check_string(reduction, ["none", "add"], "reduction", self.name)
5028        self.init_prim_io_names(inputs=['data', 'indices', 'updates'], outputs=['y'])
5029        target = context.get_context("device_target")
5030        if reduction != 'none' and target.lower() == "ascend":
5031            raise ValueError(f"For '{self.name}', "
5032                             f"Currently Ascend device_target only support `reduction`='none', "
5033                             f"but got {reduction}")
5034
5035
5036class ExtractVolumePatches(Primitive):
5037    """
5038    `ops.ExtractVolumePatches` is deprecated from version 2.3 and will be removed in a future version.
5039
5040    Supported Platforms:
5041        Deprecated
5042    """
5043    @deprecated("2.3", "ops.ExtractVolumePatches", False)
5044    @prim_attr_register
5045    def __init__(self, kernel_size, strides, padding):
5046        validator.check_value_type("kernel_size", kernel_size, (int, list, tuple), self.name)
5047        validator.check_value_type("strides", strides, (int, list, tuple), self.name)
5048        if isinstance(kernel_size, (list, tuple)):
5049            kernel_size = tuple(kernel_size)
5050            if len(kernel_size) == 5:
5051                validator.check_int(kernel_size[0], 1, validator.EQ, "kernel_size[0]", self.name)
5052                validator.check_int(kernel_size[1], 1, validator.EQ, "kernel_size[1]", self.name)
5053        if isinstance(strides, (list, tuple)):
5054            strides = tuple(strides)
5055            if len(strides) == 5:
5056                validator.check_int(strides[0], 1, validator.EQ, "strides[0]", self.name)
5057                validator.check_int(strides[1], 1, validator.EQ, "strides[1]", self.name)
5058        self.kernel_size = _check_3d_int_or_tuple("kernel_size", kernel_size, self.name,
5059                                                  allow_five=True, ret_five=True, greater_zero=True)
5060        self.strides = _check_3d_int_or_tuple("strides", strides, self.name,
5061                                              allow_five=True, ret_five=True, greater_zero=True)
5062        self.add_prim_attr("kernel_size", self.kernel_size)
5063        self.add_prim_attr("strides", self.strides)
5064        validator.check_value_type("padding_dtype", padding, (str), self.name)
5065        self.padding = validator.check_string(padding.upper(), ['VALID', 'SAME'], 'padding', self.name)
5066        self.add_prim_attr("padding", self.padding)
5067
5068
5069class ScatterAddWithAxis(Primitive):
5070    """
5071    'ops.ScatterAddWithAxis' is deprecated from version 2.0 and will be removed in a future version,
5072    use 'ops.TensorScatterElements' instead.
5073
5074    Supported Platforms:
5075        Deprecated
5076
5077    Examples:
5078        >>> op = ops.ScatterAddWithAxis(0)
5079        >>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
5080        >>> indices = Tensor(np.array([[1, 0, 2], [0, 2, 1]]), mindspore.int32)
5081        >>> updates = Tensor(np.array([[1, 1, 1], [1, 1, 1]]), mindspore.float32)
5082        >>> output = op(input_x, indices, updates)
5083        >>> print(output)
5084        [[ 2.  3.  3.]
5085         [ 5.  5.  7.]
5086         [ 7.  9.  10.]]
5087        >>> op = ops.ScatterAddWithAxis(1)
5088        >>> input_x = Tensor(np.array([[1, 2, 3, 4, 5]]), mindspore.int32)
5089        >>> indices = Tensor(np.array([[2, 4]]), mindspore.int32)
5090        >>> updates = Tensor(np.array([[8, 8]]), mindspore.int32)
5091        >>> output = op(input_x, indices, updates)
5092        >>> print(output)
5093        [[ 1  2  11  4  13]]
5094    """
5095    __mindspore_signature__ = (
5096        sig.make_sig('input_x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
5097        sig.make_sig('indices', dtype=sig.sig_dtype.T1),
5098        sig.make_sig('updates', dtype=sig.sig_dtype.T)
5099    )
5100
5101    @deprecated("2.0", "ops.TensorScatterElements", False)
5102    @prim_attr_register
5103    def __init__(self, axis=0):
5104        """Initialize ScatterAddWithAxis"""
5105        validator.check_value_type("axis", axis, [int], self.name)
5106        self.init_prim_io_names(
5107            inputs=['input_x', 'indices', 'updates'], outputs=['y'])
5108
5109
5110class Lstsq(Primitive):
5111    r"""
5112    Computes the solutions of the least squares and minimum norm problems of full-rank
5113    matrix `x` of size :math:`(m \times n)` and matrix `a` of size :math:`(m \times k)`.
5114
5115    If :math:`m \geq n`, `Lstsq` solves the least-squares problem:
5116
5117    .. math::
5118
5119       \begin{array}{ll}
5120       \min_y & \|xy-a\|_2
5121       \end{array}
5122
5123    If :math:`m < n`, `Lstsq` solves the least-norm problem:
5124
5125    .. math::
5126
5127       \begin{array}{llll}
5128       \min_y & \|y\|_2 & \text{subject to} & xy = a
5129       \end{array}
5130
5131    Args:
5132        fast (bool, optional): Solving algorithm. Default: ``True`` .
5133
5134            - If `fast` is True, then the solution is computed by solving
5135              the normal equations using Cholesky decomposition.
5136            - If `fast` is False, an algorithm based on numerically robust
5137              completed orthogonal decomposition is used.
5138
5139        l2_regularizer (float, optional): L2 regularization coefficient. Default: ``0.0`` .
5140
5141    Inputs:
5142        - **x** (Tensor) - :math:`(m \times n)` matrix `x`. The input tensor whose data type is
5143          float16, float32 or float64.
5144        - **a** (Tensor) - :math:`(m \times k)` matrix `a`. The input tensor whose data type is
5145          float16, float32 or float64.
5146
5147    Outputs:
5148        Tensor, the least squares or minimum norm problems solution, which has shape
5149        :math:`(n \times k)`. The data type is the same with `x`.
5150
5151    Raises:
5152        TypeError: If the input `x` or `a` is not a Tensor.
5153        TypeError: If dtype of `x` or `a` is not one of: float16, float32, float64.
5154        TypeError: If the dtypes of `x` and `a` are not the same.
5155        ValueError: If the dimension of `x` is not equal to 2.
5156        ValueError: If the dimension of `a` is not equal to 2 or 1.
5157        ValueError: If the length of x_dims[0] is not equal to the length of a_dims[0].
5158
5159    Supported Platforms:
5160        ``CPU``
5161
5162    Examples:
5163        >>> x = Tensor(np.array([[2,1,5],[3,5,1],[1,1,1]]),mindspore.float32)
5164        >>> a = Tensor(np.array([[10,5],[15,8],[7,4]]),mindspore.float32)
5165        >>> op = ops.Lstsq()
5166        >>> output = op(x, a)
5167        >>> print(output)
5168        [[17.000002  11.000002 ]
5169         [-6.5000005 -4.500001 ]
5170         [-3.500002  -2.5000017]]
5171    """
5172
5173    @prim_attr_register
5174    def __init__(self, fast=True, l2_regularizer=0.0):
5175        """Initialize Lstsq"""
5176        validator.check_type_name("fast", fast, True, self.name)
5177        validator.check_type_name("l2_regularizer", l2_regularizer, 0.0, self.name)
5178        self.fast = fast
5179        self.l2_regularizer = l2_regularizer
5180
5181
5182class LowerBound(Primitive):
5183    """
5184    Find the index of the lower bound of `values` in sorted sequence `sorted_x` element-wise.
5185
5186    Args:
5187        out_type (:class:`mindspore.dtype`, optional): An optional data type of
5188            ``mindspore.dtype.int32`` and ``mindspore.dtype.int64`` .
5189            Default: ``mindspore.dtype.int32`` .
5190
5191    Inputs:
5192        - **sorted_x** (Tensor) - The input tensor whose dtype is real number and
5193          the data of each row must be sorted in ascending order. The rank must be 2.
5194        - **values** (Tensor) - The input tensor whose dtype is the same as `sorted_x`
5195          and the first dimension of the shape of `values` must be equal to that of
5196          `sorted_x` . The rank must be 2.
5197
5198    Outputs:
5199        Tensor, whose dtype is determined by `out_type` and whose shape is the same
5200        as that of `values`.
5201
5202    Raises:
5203        TypeError: If `sorted_x` is not a Tensor.
5204        TypeError: If `values` is not a Tensor.
5205        TypeError: If `out_type` is invalid.
5206        TypeError: If the type of `sorted_x` is not the same as that of `values`.
5207        ValueError: If rank of the `sorted_x` is not equal to 2.
5208        ValueError: If rank of the `values` is not equal to 2.
5209        ValueError: If the first dimension of the shape of `sorted_x` is not equal to that of `values`.
5210
5211    Supported Platforms:
5212        ``Ascend`` ``GPU`` ``CPU``
5213
5214    Examples:
5215        >>> import mindspore
5216        >>> import numpy as np
5217        >>> from mindspore import Tensor
5218        >>> from mindspore import ops
5219        >>> lowerbound = ops.LowerBound(out_type = mindspore.int32)
5220        >>> sorted_x = Tensor(np.arange(12).reshape(3, 4).astype(np.int8))
5221        >>> values = Tensor(np.array([[3], [4], [8]]).astype(np.int8))
5222        >>> output = lowerbound(sorted_x, values)
5223        >>> print(output)
5224        [[3]
5225         [0]
5226         [0]]
5227    """
5228
5229    @prim_attr_register
5230    def __init__(self, out_type=mstype.int32):
5231        """Initialize LowerBound"""
5232        valid_values = (mstype.int32, mstype.int64)
5233        validator.check_type_name("out_type", out_type, valid_values, self.name)
5234        self.init_prim_io_names(inputs=['sorted_x', 'values'], outputs=['y'])
5235
5236
5237class UpperBound(Primitive):
5238    """
5239    Returns a tensor that contains the index for finding the upper bound of the value of
5240    the input values element in the input sorted_x.
5241
5242    Args:
5243        out_type (:class:`mindspore.dtype`, optional): Specified output type.
5244            Supported types: ``mindspore.dtype.int32`` and ``mindspore.dtype.int64`` .
5245            Default: ``mindspore.dtype.int32`` .
5246
5247    Inputs:
5248        - **sorted_x** (Tensor) - The input tensor whose dtype is real number. The rank must be 2.
5249          Each row of the `sorted_x` needs to be sorted in ascending order.
5250        - **values** (Tensor) - The input tensor whose dtype is the same as `sorted_x`. The rank must be 2.
5251          The shape[0] of the two inputs must be consistent.
5252
5253    Outputs:
5254        Tensor, whose dtype is determined by `out_type` and whose shape is consistent with `values`.
5255
5256    Raises:
5257        TypeError: If `sorted_x` is not a Tensor.
5258        TypeError: If `values` is not a Tensor.
5259        TypeError: If the type of `sorted_x` is not the same as that of `values`.
5260        ValueError: If rank of the `sorted_x` is not equal to 2.
5261        ValueError: If rank of the `values` is not equal to 2.
5262        ValueError: If the number of rows of `sorted_x` is not consistent with that of `values`.
5263
5264    Supported Platforms:
5265        ``Ascend`` ``GPU`` ``CPU``
5266
5267    Examples:
5268        >>> import mindspore
5269        >>> import numpy as np
5270        >>> from mindspore import Tensor
5271        >>> from mindspore import ops
5272        >>> upperbound = ops.UpperBound(out_type = mindspore.int32)
5273        >>> sorted_x = Tensor(np.arange(12).reshape(3, 4).astype(np.int8))
5274        >>> values = Tensor(np.array([[3], [6], [9]]).astype(np.int8))
5275        >>> output = upperbound(sorted_x, values)
5276        >>> print(output)
5277        [[4]
5278         [3]
5279         [2]]
5280    """
5281
5282    @prim_attr_register
5283    def __init__(self, out_type=mstype.int32):
5284        """Initialize UpperBound"""
5285        valid_values = (mstype.int32, mstype.int64)
5286        validator.check_type_name("out_type", out_type, valid_values, self.name)
5287        self.init_prim_io_names(inputs=['sorted_x', 'values'], outputs=['y'])
5288
5289
5290class LogSpace(Primitive):
5291    r"""
5292    Generates a 1-D Tensor with a length of steps. The tensor's
5293    values are uniformly distributed on a logarithmic scale, ranging from
5294    :math:`base^{start}` to :math:`base^{end}`, including both endpoints.
5295    The logarithmic scale is based on the specified `base`.
5296
5297    .. math::
5298        \begin{aligned}
5299        &step = (end - start)/(steps - 1)\\
5300        &output = [base^{start}, base^{start + 1 * step}, ... , base^{start + (steps-2) * step}, base^{end}]
5301        \end{aligned}
5302
5303    .. warning::
5304        This is an experimental API that is subject to change or deletion.
5305
5306    Args:
5307        steps (int, optional): The steps must be a non-negative integer. Default: ``10`` .
5308        base (int, optional): The base must be a non-negative integer. Default: ``10`` .
5309        dtype (mindspore.dtype, optional): The dtype of output, include ``mstype.float16`` ,
5310            ``mstype.float32`` or ``mstype.float64`` . Default: ``mstype.float32`` .
5311
5312    Inputs:
5313        - **start** (Tensor) - Start value of interval, with shape of 0-D,
5314          dtype is float16, float32 or float64.
5315        - **end** (Tensor) - End value of interval, with shape of 0-D,
5316          dtype is float16, float32 or float64.
5317
5318    Outputs:
5319        Tensor has the shape as :math:`(step, )`. Its datatype is set by the attr 'dtype'.
5320
5321    Raises:
5322        TypeError: If `input` is not a Tensor.
5323        TypeError: If `steps` is not an int.
5324        TypeError: If `base` is not an int.
5325        TypeError: If `dtype` is not mstype.float16, mstype.float32 or
5326            mstype.float64.
5327        ValueError: If `steps` is not a non-negative integer.
5328        ValueError: If `base` is not a non-negative integer.
5329
5330    Supported Platforms:
5331        ``Ascend`` ``GPU`` ``CPU``
5332
5333    Examples:
5334        >>> from mindspore import Tensor, ops
5335        >>> from mindspore import dtype as mstype
5336        >>> logspace = ops.LogSpace(steps = 10, base = 10, dtype=mstype.float32)
5337        >>> start = Tensor(1, mstype.float32)
5338        >>> end = Tensor(10, mstype.float32)
5339        >>> output = logspace(start, end)
5340        >>> print(output)
5341        [1.e+01 1.e+02 1.e+03 1.e+04 1.e+05 1.e+06 1.e+07 1.e+08 1.e+09 1.e+10]
5342    """
5343
5344    @prim_attr_register
5345    def __init__(self, steps=10, base=10, dtype=mstype.float32):
5346        """Initialize Logspace."""
5347        validator.check_value_type("steps", steps, [int], self.name)
5348        validator.check_value_type("base", base, [int], self.name)
5349        validator.check_non_negative_int(steps, "steps", self.name)
5350        validator.check_non_negative_int(base, "base", self.name)
5351        validator.check_value_type("dtype", dtype, [mstype.Type], self.name)
5352        valid_values = (mstype.float16, mstype.float32, mstype.float64)
5353        validator.check_type_name("dtype", dtype, valid_values, self.name)
5354        self.init_prim_io_names(inputs=['start', 'end'], outputs=['y'])
5355
5356
5357class Tril(Primitive):
5358    """
5359    Returns the lower triangular portion of the 2-D matrix or the set of matrices
5360    in a batch. The remaining elements of the resulting Tensor are assigned a value of 0.
5361    The lower triangular section of the matrix comprises of the
5362    elements present on and below the main diagonal.
5363
5364    .. warning::
5365        This is an experimental API that is subject to change or deletion.
5366
5367    Args:
5368        diagonal (int, optional): An optional attribute indicates the diagonal to consider, default: ``0`` ,
5369            indicating the main diagonal.
5370
5371    Inputs:
5372        - **x** (Tensor) - The input tensor with shape :math:`(M, N, *)`
5373          where :math:`*` means any number of additional dimensions.
5374
5375    Outputs:
5376        Tensor, the same shape and data type as the input `x`.
5377
5378    Raises:
5379        TypeError: If `x` is not a Tensor.
5380        TypeError: If `diagonal` is not an int.
5381        ValueError: If the rank of `x` is less than 2.
5382
5383    Supported Platforms:
5384        ``Ascend`` ``GPU`` ``CPU``
5385
5386    Examples:
5387        >>> import numpy as np
5388        >>> from mindspore import Tensor, ops
5389        >>> x = Tensor(np.array([[ 1,  2,  3,  4],
5390        ...                      [ 5,  6,  7,  8],
5391        ...                      [10, 11, 12, 13],
5392        ...                      [14, 15, 16, 17]]))
5393        >>> tril = ops.Tril()
5394        >>> result = tril(x)
5395        >>> print(result)
5396        [[ 1  0  0  0]
5397         [ 5  6  0  0]
5398         [10 11 12  0]
5399         [14 15 16 17]]
5400        >>> x = Tensor(np.array([[ 1,  2,  3,  4],
5401        ...                      [ 5,  6,  7,  8],
5402        ...                      [10, 11, 12, 13],
5403        ...                      [14, 15, 16, 17]]))
5404        >>> tril = ops.Tril(diagonal=1)
5405        >>> result = tril(x)
5406        >>> print(result)
5407        [[ 1  2  0  0]
5408         [ 5  6  7  0]
5409         [10 11 12 13]
5410         [14 15 16 17]]
5411        >>> x = Tensor(np.array([[ 1,  2,  3,  4],
5412        ...                      [ 5,  6,  7,  8],
5413        ...                      [10, 11, 12, 13],
5414        ...                      [14, 15, 16, 17]]))
5415        >>> tril = ops.Tril(diagonal=-1)
5416        >>> result = tril(x)
5417        >>> print(result)
5418        [[ 0  0  0  0]
5419         [ 5  0  0  0]
5420         [10 11  0  0]
5421         [14 15 16  0]]
5422    """
5423
5424    @prim_attr_register
5425    def __init__(self, diagonal=0):
5426        """Initialize Tril."""
5427        self.init_prim_io_names(inputs=["x"], outputs=["y"])
5428        validator.check_value_type("diagonal", diagonal, [int], self.name)
5429
5430
5431class IndexFill(Primitive):
5432    """
5433    Fills the elements under the `dim` dimension of the input Tensor `x` with the input `value`
5434    by selecting the indices in the order given in `index`.
5435
5436    .. warning::
5437        This is an experimental API that is subject to change or deletion.
5438
5439    Refer to :func:`mindspore.ops.index_fill` for more details.
5440
5441    Inputs:
5442        - **x** (Tensor) - Input tensor.
5443        - **dim** (Union[int, Tensor]) - Dimension along which to fill the input tensor. Only supports
5444          a 0-dimensional tensor or an int number.
5445        - **index** (Tensor) - Indices of the input tensor to fill in.
5446        - **value** (Union[bool, int, float, Tensor]) - Value to fill the input tensor.
5447
5448    Outputs:
5449        Tensor, has the same type and shape as input tensor.
5450
5451    Supported Platforms:
5452        ``Ascend`` ``GPU`` ``CPU``
5453
5454    Examples:
5455        >>> import mindspore
5456        >>> import numpy as np
5457        >>> from mindspore import Tensor, ops
5458        >>> index_fill = ops.IndexFill()
5459        >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32))
5460        >>> index = Tensor([0, 2], mindspore.int32)
5461        >>> value = Tensor(-2.0, mindspore.float32)
5462        >>> y = index_fill(x, 1, index, value)
5463        >>> print(y)
5464        [[-2. 2. -2.]
5465         [-2. 5. -2.]
5466         [-2. 8. -2.]]
5467    """
5468
5469    @prim_attr_register
5470    def __init__(self):
5471        """Initialize IndexFill"""
5472        self.init_prim_io_names(inputs=['x', 'dim', 'index', 'value'], outputs=['y'])
5473
5474
5475class IndexPut(Primitive):
5476    r"""
5477    According to the index number of `indexes`, replace the value corresponding to `x1` with the value in `x2`.
5478
5479    Args:
5480        accumulate (int): If accumulate is 1, the elements in x2 are added to x1,
5481            else the elements in x2 replace the corresponding element in x1, should be 0 or 1. Default: ``0`` .
5482
5483    Inputs:
5484        - **x1** (Tensor) - The assigned target tensor, 1-D or higher dimensional.
5485        - **x2** (Tensor) - 1-D Tensor of the same type as `x1`. If the size of `x2` is 1,
5486          it will broadcast to the same size as `x1`.
5487        - **indices** (tuple[Tensor], list[Tensor]) - the indices of type int32 or int64, used to index into x1.
5488          The rank of tensors in indices should be 1-D, size of indices should <= x1.rank and the tensors in indices
5489          should be broadcastable.
5490
5491    Outputs:
5492        Tensor, has the same dtype and shape as `x1`.
5493
5494    Raises:
5495        TypeError: If the dtype of `x1` is not equal to the dtype of `x2`.
5496        TypeError: If `indices` is not tuple[Tensor] or list[Tensor].
5497        TypeError: If the dtype of tensors in `indices` are not int32 or int64.
5498        TypeError: If the dtype of tensors in `indices` are inconsistent.
5499        TypeError: If the dtype of `accumulate` are not int.
5500        ValueError: If rank(x2) is not 1-D.
5501        ValueError: If size(x2) is not 1 or max size of the tensors in `indices` when rank(x1) == size(indices).
5502        ValueError: If size(x2) is not 1 or x1.shape[-1] when rank(x1) > size(indices).
5503        ValueError: If the rank of tensors in `indices` is not 1-D.
5504        ValueError: If the tensors in `indices` is not be broadcastable.
5505        ValueError: If size(indices) > rank(x1).
5506        ValueError: If `accumulate` is not equal to 0 or 1.
5507
5508    Supported Platforms:
5509        ``Ascend`` ``CPU``
5510
5511    Examples:
5512        >>> import mindspore
5513        >>> import numpy as np
5514        >>> from mindspore import Tensor, ops
5515        >>> x1 = Tensor(np.array([[1, 2, 3], [4, 5, 6]]).astype(np.int32))
5516        >>> x2 = Tensor(np.array([3]).astype(np.int32))
5517        >>> indices = [Tensor(np.array([0, 0]).astype(np.int32)), Tensor(np.array([0, 1]).astype(np.int32))]
5518        >>> accumulate = 1
5519        >>> op = ops.IndexPut(accumulate = accumulate)
5520        >>> output = op(x1, x2, indices)
5521        >>> print(output)
5522         [[4 5 3]
5523         [4 5 6]]
5524    """
5525
5526    @prim_attr_register
5527    def __init__(self, accumulate=0):
5528        self.accumulate = accumulate
5529        validator.check_value_type('accumulate', accumulate, [int], self.name)
5530        self.init_prim_io_names(inputs=['x1', 'x2', 'indices'], outputs=['y'])
5531
5532
5533class SegmentMax(Primitive):
5534    r"""
5535    Computes the maximum along segments of a Tensor.
5536
5537    Specifically, it generates a new Tensor `output` such that :math:`output_i=max_j(input\_x_j)`
5538    in which the maximum value is obtained from all elements corresponding
5539    to :math:`j` that meets :math:`segment\_ids[j] == i`.
5540    If a segment contains no elements for a given segment :math:`i`,
5541    then the corresponding element in the output Tensor is set to zero: :math:`output[i] = 0`.
5542
5543    Inputs:
5544        - **input_x** (Tensor) - The input tensor whose dtype is real number and whose rank is not less than 1.
5545        - **segment_ids** (Tensor) - A 1-D tensor whose dtype is int32 or int64. The size of tensor must be equal to
5546          the first dimension of the shape of `input_x`. Values must be sorted in ascending order and need not cover
5547          all values in the full range of valid values, but must be positive integer. Only constant values is allowed.
5548
5549    Outputs:
5550        Tensor, whose dtype and the dimension of the shape is the same as `input_x`. The first dimension of the shape
5551        is equal to the value of the last element of `segment_ids` plus one, and the other dimensions are the same as
5552        those of `input_x`.
5553
5554    Raises:
5555        TypeError: If `input_x` is not a Tensor.
5556        TypeError: If `segment_ids` is not a Tensor.
5557        TypeError: If the dtype of `input_x` is invalid.
5558        TypeError: If the dtype of `segment_ids` is invalid.
5559        ValueError: If the rank of `input_x` is less than 1.
5560        ValueError: If the rank of `segment_ids` is not equal to 1.
5561        ValueError: If the size of `segment_ids` is not equal to the first dimension of the shape of `input_x`.
5562        ValueError: If the values of `segment_ids` are negative.
5563        ValueError: If the values of `segment_ids` are not sorted in ascending order.
5564
5565    Supported Platforms:
5566        ``Ascend`` ``GPU`` ``CPU``
5567
5568    Examples:
5569        >>> x = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], mstype.float64)
5570        >>> segment_ids = Tensor([0, 0, 2], mstype.int64)
5571        >>> op = ops.SegmentMax()
5572        >>> output = op(x, segment_ids)
5573        >>> print(output)
5574        [[4. 5. 6.]
5575         [0. 0. 0.]
5576         [7. 8. 9.]]
5577    """
5578
5579    @prim_attr_register
5580    def __init__(self):
5581        """Initialize SegmentMax"""
5582        self.add_prim_attr("max_length", 1000000)
5583        self.init_prim_io_names(inputs=['input_x', 'segment_ids'], outputs=['output'])
5584
5585
5586class SegmentMin(Primitive):
5587    r"""
5588    Computes the minimum along segments of a Tensor.
5589
5590    Specifically, it generates a new Tensor `output` such that :math:`output_i=min_j(input\_x_j)`
5591    in which the minimum value is obtained from all elements corresponding
5592    to :math:`j` that meets :math:`segment\_ids[j] == i`.
5593    If a segment contains no elements for a given segment :math:`i`,
5594    then the corresponding element in the output Tensor is set to zero: :math:`output[i] = 0`.
5595
5596    Inputs:
5597        - **input_x** (Tensor) - The input tensor whose dtype is real number and whose rank is not less than 1.
5598        - **segment_ids** (Tensor) - A 1-D tensor whose dtype is int32 or int64. The size of tensor must be equal to
5599          the first dimension of the shape of `input_x`. Values must be sorted in ascending order and need not cover
5600          all values in the full range of valid values, but must be positive integer. Only constant values is allowed.
5601
5602    Outputs:
5603        Tensor, whose dtype and the dimension of the shape is the same as `input_x`. The first dimension of the shape
5604        is equal to the value of the last element of `segment_ids` plus one, and the other dimensions are the same as
5605        those of `input_x`.
5606
5607    Raises:
5608        TypeError: If `input_x` is not a Tensor.
5609        TypeError: If `segment_ids` is not a Tensor.
5610        TypeError: If the dtype of `input_x` is invalid.
5611        TypeError: If the dtype of `segment_ids` is invalid.
5612        ValueError: If the rank of `input_x` is less than 1.
5613        ValueError: If the rank of `segment_ids` is not equal to 1.
5614        ValueError: If the size of `segment_ids` is not equal to the first dimension of the shape of `input_x`.
5615        ValueError: If the values of `segment_ids` are negative.
5616        ValueError: If the values of `segment_ids` are not sorted in ascending order.
5617
5618    Supported Platforms:
5619        ``Ascend`` ``GPU`` ``CPU``
5620
5621    Examples:
5622        >>> x = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], mstype.float64)
5623        >>> segment_ids = Tensor([0, 0, 2], mstype.int64)
5624        >>> op = ops.SegmentMin()
5625        >>> output = op(x, segment_ids)
5626        >>> print(output)
5627        [[1. 2. 3.]
5628         [0. 0. 0.]
5629         [7. 8. 9.]]
5630    """
5631
5632    @prim_attr_register
5633    def __init__(self):
5634        """Initialize SegmentMin"""
5635        self.add_prim_attr("max_length", 1000000)
5636        self.init_prim_io_names(inputs=['input_x', 'segment_ids'], outputs=['output'])
5637
5638
5639class SegmentSum(Primitive):
5640    r"""
5641    Computes the cumulative sum along segments of a Tensor.
5642
5643    Specifically, it generates a new Tensor `output` such that :math:`output_i = \sum_j input\_x_j`
5644    in which the cumulative sum is obtained from all elements corresponding
5645    to :math:`j` that meets :math:`segment\_ids[j] == i`.
5646    If a segment contains no elements for a given segment :math:`i`,
5647    then the corresponding element in the output Tensor is set to 0: :math:`output[i] = 0`.
5648
5649    .. warning::
5650        If the dtype of `input_x` is complex number, the gradient can not be calculated.
5651
5652    Inputs:
5653        - **input_x** (Tensor) - The input tensor whose dtype is real number or complex number and whose rank is not
5654          less than 1.
5655        - **segment_ids** (Tensor) - A 1-D tensor whose dtype is int32 or int64. The size of tensor must be equal to
5656          the first dimension of the shape of `input_x`. Values must be sorted in ascending order and need not cover
5657          all values in the full range of valid values, but must be positive integer. Only constant values is allowed.
5658
5659    Outputs:
5660        Tensor, whose dtype and the dimension of the shape is the same as `input_x`. The first dimension of the shape
5661        is equal to the value of the last element of `segment_ids` plus one, and the other dimensions are the same as
5662        those of `input_x`.
5663
5664    Raises:
5665        TypeError: If `input_x` is not a Tensor.
5666        TypeError: If `segment_ids` is not a Tensor.
5667        TypeError: If the dtype of `input_x` is invalid.
5668        TypeError: If the dtype of `segment_ids` is invalid.
5669        ValueError: If the rank of `input_x` is less than 1.
5670        ValueError: If the rank of `segment_ids` is not equal to 1.
5671        ValueError: If the size of `segment_ids` is not equal to the first dimension of the shape of `input_x`.
5672        ValueError: If the values of `segment_ids` are negative.
5673        ValueError: If the values of `segment_ids` are not sorted in ascending order.
5674
5675    Supported Platforms:
5676        ``Ascend`` ``GPU`` ``CPU``
5677
5678    Examples:
5679        >>> x = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], mstype.float64)
5680        >>> segment_ids = Tensor([0, 0, 2], mstype.int64)
5681        >>> op = ops.SegmentSum()
5682        >>> output = op(x, segment_ids)
5683        >>> print(output)
5684        [[5. 7. 9.]
5685         [0. 0. 0.]
5686         [7. 8. 9.]]
5687    """
5688
5689    @prim_attr_register
5690    def __init__(self):
5691        """Initialize SegmentSum"""
5692        self.add_prim_attr("max_length", 1000000)
5693        self.init_prim_io_names(inputs=['input_x', 'segment_ids'], outputs=['output'])
5694
5695
5696class LeftShift(Primitive):
5697    r"""
5698    Shift the value of each position of the tensor to the left several bits.
5699    The inputs are two tensors, dtypes of them must be consistent, and the
5700    shapes of them could be broadcast.
5701    The output does not support implicit type conversion.
5702
5703    .. math::
5704
5705        \begin{aligned}
5706        &out_{i} =x_{i} << y_{i}
5707        \end{aligned}
5708
5709    .. warning::
5710        This is an experimental API that is subject to change or deletion.
5711
5712    Inputs:
5713        - **x1** (Tensor) - The target tensor whose dtype supports all int and uint type,
5714          will be shifted to the left by `x2` in element-wise.
5715        - **x2** (Tensor) - The tensor must have the same dtype as `x1`.
5716          And the tensor must have the same shape as `x1` or could be broadcast with `x1`.
5717
5718    Outputs:
5719        - **output** (Tensor) - The output tensor, has the same dtype as `x1`.
5720          And the shape of the output tensor is the same shape as `x1`, or the same shape
5721          as `x1` and `x2` after broadcasting.
5722
5723    Raises:
5724        TypeError: If `x1` or `x2` has wrong type.
5725        TypeError: If `x1` or `x2` is not tensor.
5726        ValueError: If `x1` and `x2` could not be broadcast.
5727
5728    Supported Platforms:
5729        ``Ascend`` ``GPU`` ``CPU``
5730
5731    Examples:
5732        >>> import numpy as np
5733        >>> from mindspore import Tensor, ops
5734        >>> left_shift = ops.LeftShift()
5735        >>> x1 = Tensor(np.array([1, 2, 3]).astype(np.int8))
5736        >>> x2 = Tensor(np.array([0, 1, -1]).astype(np.int8))
5737        >>> output = left_shift(x1, x2)
5738        >>> print(output)
5739        [1 4 0]
5740    """
5741
5742    @prim_attr_register
5743    def __init__(self):
5744        """Initialize LeftShift"""
5745        self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['y'])
5746
5747
5748class FillDiagonal(Primitive):
5749    """
5750    Fills the main diagonal of a Tensor in-place with a specified value and returns the result.
5751    The input has at least 2 dimensions, and all dimensions of input must be equal in length
5752    when the dimension of input is greater than 2.
5753
5754    .. warning::
5755        This is an experimental API that is subject to change or deletion.
5756
5757    Args:
5758        fill_value (float): The value to fill the diagonal of `input_x`.
5759        wrap (bool, optional): Controls whether the diagonal elements continue onto the
5760            remaining rows in case of a tall matrix(A matrix has more rows than columns).
5761            Examples blow demonstrates how it works on a tall matrix if `wrap` is set ``True`` .
5762            Default: ``False`` .
5763
5764    Inputs:
5765        - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
5766
5767    Outputs:
5768        - **y** (Tensor) - Tensor, has the same shape and data type as the input `input_x`.
5769
5770    Raises:
5771        ValueError: If the dimension of `input_x` is not greater than 1.
5772        ValueError: If the size of each dimension is not equal, when the dimension is greater than 2.
5773
5774    Supported Platforms:
5775        ``Ascend`` ``GPU`` ``CPU``
5776
5777    Examples:
5778        >>> import numpy as np
5779        >>> from mindspore import Tensor, ops
5780        >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32))
5781        >>> fill_value = 9.9
5782        >>> fill_diagonal = ops.FillDiagonal(fill_value)
5783        >>> y = fill_diagonal(x)
5784        >>> print(y)
5785        [[9.9 2.  3. ]
5786         [4.  9.9 6. ]
5787         [7.  8.  9.9]]
5788        >>> x = Tensor(np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4], [5, 5, 5]]).astype(np.int32))
5789        >>> fill_value = 9.0
5790        >>> fill_diagonal = ops.FillDiagonal(fill_value)
5791        >>> y = fill_diagonal(x)
5792        >>> print(y)
5793        [[9 0 0]
5794         [1 9 1]
5795         [2 2 9]
5796         [3 3 3]
5797         [4 4 4]
5798         [5 5 5]]
5799        >>> x = Tensor(np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2], [3, 3, 3],
5800        ...                      [4, 4, 4], [5, 5, 5], [6, 6, 6]]).astype(np.int64))
5801        >>> fill_value = 9.0
5802        >>> wrap = True
5803        >>> fill_diagonal = FillDiagonal(fill_value, wrap)
5804        >>> y = fill_diagonal(x)
5805        >>> print(y)
5806        [[9 0 0]
5807         [1 9 1]
5808         [2 2 9]
5809         [3 3 3]
5810         [9 4 4]
5811         [5 9 5]
5812         [6 6 9]]
5813    """
5814
5815    @prim_attr_register
5816    def __init__(self, fill_value, wrap=False):
5817        """Initialize FillDiagonal"""
5818        validator.check_value_type('fill_value', fill_value, [float], self.name)
5819        self.fill_value = fill_value
5820        validator.check_value_type('wrap', wrap, [bool], self.name)
5821        self.init_prim_io_names(inputs=['input_x'], outputs=['y'])
5822
5823
5824class HammingWindow(Primitive):
5825    r"""
5826    Computes the hamming window function with input window length.
5827
5828    .. math::
5829
5830        w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right),
5831
5832    where :math:`N` is the full window size.
5833
5834    .. warning::
5835        This is an experimental API that is subject to change or deletion.
5836
5837    Args:
5838        periodic (bool, optional): a flag determines whether the returned window trims off
5839            the last duplicate value from the symmetric window. Default: ``True`` .
5840
5841            - If True, returns a window to be used as periodic function, in above formula,
5842              :math:`N = \text{length} + 1`.
5843            - If False, return a symmetric window, :math:`N = \text{length}`.
5844
5845        alpha (float, optional): The coefficient :math:`\alpha` in the equation above. Default: ``0.54`` .
5846        beta (float, optional): The coefficient :math:`\beta` in the equation above. Default: ``0.46`` .
5847        dtype (:class:`mindspore.dtype`, optional): An optional data type of ``mstype.float16`` ,
5848            ``mstype.float32`` and ``mstype.float64`` . Default: ``mstype.float32``.
5849
5850    Inputs:
5851        - **length** (Tensor) - a positive integer tensor controlling the returned window size, must be 1D.
5852
5853    Outputs:
5854        Tensor, A 1-D tensor containing the window, whose shape is :math:`(\text{length},)`.
5855
5856    Raises:
5857        TypeError: If `length` is not a Tensor.
5858        TypeError: If dtype of `length` is not integer data type.
5859        TypeError: If `periodic` is not a bool.
5860        TypeError: If `alpha` is not a float.
5861        TypeError: If `beta` is not a float.
5862        TypeError: If `dtype` is not mindspore.float16, mindspore.float32 or mindspore.float64.
5863        ValueError: If dimension of `length` is not 1.
5864        ValueError: If data of `length` is negative.
5865
5866    Supported Platforms:
5867        ``Ascend`` ``GPU`` ``CPU``
5868
5869    Examples:
5870        >>> import numpy as np
5871        >>> from mindspore import Tensor, ops
5872        >>> # case 1: periodic=True.
5873        >>> length = Tensor(np.array([6]).astype(np.int32))
5874        >>> hamming_window = ops.HammingWindow(periodic=True)
5875        >>> y = hamming_window(length)
5876        >>> print(y)
5877        [0.08000001 0.31       0.77000004 1.         0.77000004 0.31      ]
5878        >>> # case 2: periodic=False.
5879        >>> length = Tensor(np.array([7]).astype(np.int32))
5880        >>> hamming_window = ops.HammingWindow(periodic=False)
5881        >>> y = hamming_window(length)
5882        >>> print(y)
5883        [0.08000001 0.31       0.77000004 1.         0.77000004 0.31       0.08000001]
5884    """
5885
5886    @prim_attr_register
5887    def __init__(self, periodic=True, alpha=0.54, beta=0.46, dtype=mstype.float32):
5888        """Initialize HammingWindow"""
5889        validator.check_value_type("periodic", periodic, [bool], self.name)
5890        validator.check_value_type("alpha", alpha, [float], self.name)
5891        validator.check_value_type("beta", beta, [float], self.name)
5892        validator.check_value_type("dtype", dtype, [mstype.Type], self.name)
5893        valid_values = (mstype.float16, mstype.float32, mstype.float64)
5894        validator.check_type_name("dtype", dtype, valid_values, self.name)
5895        self.init_prim_io_names(inputs=['length'], outputs=['y'])
5896        if dtype == mstype.float16:
5897            self.add_prim_attr('dtype', 1)
5898        elif dtype == mstype.float32:
5899            self.add_prim_attr('dtype', 0)
5900        else:
5901            self.add_prim_attr('dtype', 11)
5902
5903
5904class AffineGrid(Primitive):
5905    r"""
5906    Creates a 2D or 3D flow field (sampling grid) based on a batch of affine matrices `theta`.
5907
5908    .. warning::
5909        This is an experimental API that is subject to change or deletion.
5910
5911    Refer to :func:`mindspore.ops.affine_grid` for more details.
5912
5913    Args:
5914        align_corners (bool, optional): Geometrically, each pixel of input is viewed as a squqre instead of dot.
5915            If True, consider extremum -1 and 1 referring to the centers of the pixels rather than pixel corners.
5916            The default value is ``False`` , extremum -1 and 1 refer to the corners of the pixels, so that sampling is
5917            irrelevant to resolution of the image. Default: ``False`` .
5918
5919    Inputs:
5920        - **theta** (Tensor) - The input tensor of flow field whose dtype is float16, float32.
5921          Input batch of affine matrices with shape :math:`(N, 2, 3)` for 2D grid or :math:`(N, 3, 4)` for 3D grid.
5922        - **output_size** (tuple[int]) - The target output image size.
5923          The value of target output with format :math:`(N, C, H, W)` for 2D grid
5924          or :math:`(N, C, D, H, W)` for 3D grid.
5925
5926    Outputs:
5927        Tensor, a tensor whose data type is same as 'theta', and the shape is :math:`(N, H, W, 2)` for 2D grid
5928        or :math:`(N, D, H, W, 3)` for 3D grid.
5929
5930    Supported Platforms:
5931        ``Ascend`` ``GPU`` ``CPU``
5932
5933    Examples:
5934        >>> import mindspore
5935        >>> from mindspore import Tensor, ops
5936        >>> affinegrid = ops.AffineGrid(align_corners=False)
5937        >>> theta = Tensor([[[0.8, 0.5, 0],[-0.5, 0.8, 0]]], mindspore.float32)
5938        >>> out_size = (1, 3, 2, 3)
5939        >>> output = affinegrid(theta, out_size)
5940        >>> print(output)
5941        [[[[-0.78333336 -0.06666666]
5942        [-0.25       -0.4       ]
5943        [ 0.28333336 -0.73333335]]
5944        [[-0.28333336  0.73333335]
5945        [ 0.25        0.4       ]
5946        [ 0.78333336  0.06666666]]]]
5947    """
5948
5949    @prim_attr_register
5950    def __init__(self, align_corners=False):
5951        """Initialize AffineGrid."""
5952        validator.check_value_type("align_corners", align_corners, [bool], self.name)
5953        self.init_prim_io_names(inputs=['theta', 'output_size'], outputs=['y'])
5954
5955
5956class SegmentMean(Primitive):
5957    r"""
5958    Computes the mean along segments of a Tensor.
5959
5960    Specifically, it generates a new Tensor `output` such that :math:`output_i=mean_j(input\_x_j)`
5961    in which the mean value is obtained from all elements corresponding
5962    to :math:`j` that meets :math:`segment\_ids[j] == i`.
5963    If a segment contains no elements for a given segment :math:`i`,
5964    then the corresponding element in the output Tensor is set to zero: :math:`output[i] = 0`.
5965
5966    .. warning::
5967        If the dtype of `input_x` is complex number, the gradient can not be calculated.
5968
5969    Inputs:
5970        - **input_x** (Tensor) - The input tensor whose dtype is real number or complex number and whose rank is not
5971          less than 1.
5972        - **segment_ids** (Tensor) - A 1-D tensor whose dtype is int32 or int64. The size of tensor must be equal to
5973          the first dimension of the shape of `input_x`. Values must be sorted in ascending order and need not cover
5974          all values in the full range of valid values, but must be positive integer. Only constant values is allowed.
5975
5976    Outputs:
5977        Tensor, whose dtype and the dimension of the shape is the same as `input_x`. The first dimension of the shape
5978        is equal to the value of the last element of `segment_ids` plus one, and the other dimensions are the same as
5979        those of `input_x`.
5980
5981    Raises:
5982        TypeError: If `input_x` is not a Tensor.
5983        TypeError: If `segment_ids` is not a Tensor.
5984        TypeError: If the dtype of `input_x` is invalid.
5985        TypeError: If the dtype of `segment_ids` is invalid.
5986        ValueError: If the rank of `input_x` is less than 1.
5987        ValueError: If the rank of `segment_ids` is not equal to 1.
5988        ValueError: If the size of `segment_ids` is not equal to the first dimension of the shape of `input_x`.
5989        ValueError: If the values of `segment_ids` are negative.
5990        ValueError: If the values of `segment_ids` are not sorted in ascending order.
5991
5992    Supported Platforms:
5993        ``Ascend`` ``GPU`` ``CPU``
5994
5995    Examples:
5996        >>> x = Tensor([[1, 2, 3], [1, 2, 3], [7, 8, 9]], mstype.float64)
5997        >>> segment_ids = Tensor([0, 0, 2], mstype.int64)
5998        >>> op = ops.SegmentMean()
5999        >>> output = op(x, segment_ids)
6000        >>> print(output)
6001        [[1. 2. 3.]
6002         [0. 0. 0.]
6003         [7. 8. 9.]]
6004    """
6005
6006    @prim_attr_register
6007    def __init__(self):
6008        """Initialize SegmentMean"""
6009        self.add_prim_attr("max_length", 1000000)
6010        self.init_prim_io_names(inputs=['input_x', 'segment_ids'], outputs=['output'])
6011
6012
6013class SegmentProd(Primitive):
6014    r"""
6015    Computes the cumulative product along segments of a Tensor.
6016
6017    Specifically, it generates a new Tensor `output` such that :math:`output_i = \prod_j input\_x_j`
6018    in which the cumulative product is obtained from all elements corresponding
6019    to :math:`j` that meets :math:`segment\_ids[j] == i`.
6020    If a segment contains no elements for a given segment :math:`i`,
6021    then the corresponding element in the output Tensor is set to 1: :math:`output[i] = 1`.
6022
6023    .. warning::
6024        If the dtype of `input_x` is complex number, the gradient can not be calculated.
6025
6026    Inputs:
6027        - **input_x** (Tensor) - The input tensor whose dtype is real number or complex number and whose rank is not
6028          less than 1.
6029        - **segment_ids** (Tensor) - A 1-D tensor whose dtype is int32 or int64. The size of tensor must be equal to
6030          the first dimension of the shape of `input_x`. Values must be sorted in ascending order and need not cover
6031          all values in the full range of valid values, but must be positive integer. Only constant values is allowed.
6032
6033    Outputs:
6034        Tensor, whose dtype and the dimension of the shape is the same as `input_x`. The first dimension of the shape
6035        is equal to the value of the last element of `segment_ids` plus one, and the other dimensions are the same as
6036        those of `input_x`.
6037
6038    Raises:
6039        TypeError: If `input_x` is not a Tensor.
6040        TypeError: If `segment_ids` is not a Tensor.
6041        TypeError: If the dtype of `input_x` is invalid.
6042        TypeError: If the dtype of `segment_ids` is invalid.
6043        ValueError: If the rank of `input_x` is less than 1.
6044        ValueError: If the rank of `segment_ids` is not equal to 1.
6045        ValueError: If the size of `segment_ids` is not equal to the first dimension of the shape of `input_x`.
6046        ValueError: If the values of `segment_ids` are negative.
6047        ValueError: If the values of `segment_ids` are not sorted in ascending order.
6048
6049    Supported Platforms:
6050        ``Ascend`` ``GPU`` ``CPU``
6051
6052    Examples:
6053        >>> x = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], mstype.float64)
6054        >>> segment_ids = Tensor([0, 0, 2], mstype.int64)
6055        >>> op = ops.SegmentProd()
6056        >>> output = op(x, segment_ids)
6057        >>> print(output)
6058        [[ 4. 10. 18.]
6059         [ 1.  1.  1.]
6060         [ 7.  8.  9.]]
6061    """
6062
6063    @prim_attr_register
6064    def __init__(self):
6065        """Initialize SegmentProd"""
6066        self.add_prim_attr("max_length", 1000000)
6067        self.init_prim_io_names(inputs=['input_x', 'segment_ids'], outputs=['output'])
6068
6069
6070class PopulationCount(Primitive):
6071    r"""
6072    Computes element-wise population count(a.k.a bitsum, bitcount).
6073
6074    Refer to :func:`mindspore.ops.population_count` for more details.
6075
6076    Inputs:
6077        - **input_x** (Tensor) - Tensor of any dimension. The data type must be int16 or uint16 (Ascend).
6078          The data type must be int8, int16, int32, int64, uint8, uint16, uint32, uint64 (CPU and GPU).
6079
6080    Outputs:
6081        Tensor, with the same shape as the input, and the data type is uint8.
6082
6083    Supported Platforms:
6084        ``Ascend`` ``GPU`` ``CPU``
6085
6086    Examples:
6087        >>> import mindspore
6088        >>> from mindspore import Tensor, ops
6089        >>> input_x = Tensor([0, 1, 3], mindspore.int16)
6090        >>> output = ops.PopulationCount()(input_x)
6091        >>> print(output)
6092        [0 1 2]
6093    """
6094
6095    @prim_attr_register
6096    def __init__(self):
6097        """Initialize PopulationCount"""
6098        self.init_prim_io_names(inputs=['input'], outputs=['output'])
6099
6100
6101class TopK(Primitive):
6102    """
6103    Finds values and indices of the `k` largest entries along the last dimension.
6104
6105    .. warning::
6106        - If sorted is set to False, it will use the aicpu operator, the performance may be reduced. In addition, due to
6107          different memory layout and traversal methods on different platforms, the display order of calculation results
6108          may be inconsistent when `sorted` is False.
6109
6110    If the `input_x` is a one-dimensional Tensor, finds the `k` largest entries in the Tensor,
6111    and outputs its value and index as a Tensor. values[`k`] is the `k` largest item in `input_x`,
6112    and its index is indices [`k`].
6113
6114    For a multi-dimensional matrix,
6115    calculates the first `k` entries in each row (corresponding vector along the last dimension), therefore:
6116
6117    .. math::
6118
6119        values.shape = indices.shape = input.shape[:-1] + [k]
6120
6121    If the two compared elements are the same, the one with the smaller index value is returned first.
6122
6123    Args:
6124        sorted (bool, optional): If ``True`` , the obtained elements will be sorted by the values in descending order.
6125            If ``False`` , the obtained elements will not be sorted. Default: ``True`` .
6126
6127    Inputs:
6128        - **input_x** (Tensor) - Input to be computed, 0-D input is supported on GPU, but not on Ascend or CPU.
6129          supported dtypes:
6130
6131          - Ascend: int8, uint8, int32, int64, float16, float32.
6132          - GPU: float16, float32.
6133          - CPU: all numeric types.
6134
6135        - **k** (Union(Tensor, int)) - The number of top elements to be computed along the last dimension.
6136          If `k` is a Tensor, the supported dtype is int32 and it should be 0-D or 1-D with shape :math:`(1, )` .
6137
6138    Outputs:
6139        A tuple consisting of `values` and `indexes`.
6140
6141        - **values** (Tensor) - The `k` largest elements in each slice of the last dimension.
6142        - **indices** (Tensor) - The indices of values within the last dimension of input.
6143
6144    Raises:
6145        TypeError: If `sorted` is not a bool.
6146        TypeError: If `input_x` is not a Tensor.
6147        TypeError: If `k` is not an int.
6148        TypeError: If dtype of `input_x` is not supported.
6149
6150    Supported Platforms:
6151        ``Ascend`` ``GPU`` ``CPU``
6152
6153    Examples:
6154        >>> from mindspore import Tensor
6155        >>> from mindspore import ops
6156        >>> import mindspore
6157        >>> input_x = Tensor([1, 2, 3, 4, 5], mindspore.float16)
6158        >>> k = 3
6159        >>> values, indices = ops.TopK(sorted=True)(input_x, k)
6160        >>> print((values, indices))
6161        (Tensor(shape=[3], dtype=Float16, value= [ 5.0000e+00,  4.0000e+00,  3.0000e+00]), Tensor(shape=[3],
6162          dtype=Int32, value= [4, 3, 2]))
6163    """
6164
6165    @prim_attr_register
6166    def __init__(self, sorted=True):
6167        """Initialize TopK."""
6168        self.sorted = validator.check_value_type("sorted", sorted, [bool], self.name)
6169        self.add_prim_attr("sorted", self.sorted)
6170        self.init_prim_io_names(inputs=['input', 'k'],
6171                                outputs=['values', 'indices'])
6172
6173
6174class Bincount(Primitive):
6175    """
6176    Counts the number of occurrences of each value in an integer array.
6177
6178    .. warning::
6179        This is an experimental API that is subject to change or deletion.
6180
6181    Inputs:
6182        - **array** (Tensor) - A Tensor of type int32, whose value can not be less than zero.
6183        - **size** (Tensor) - A non-negative Tensor of type int32.
6184        - **weights** (Tensor) - A Tensor with the same shape as array, or a length-0 Tensor, in which case it acts as
6185          all weights equal to 1. Must be one of the following types: int32, int64, float32, float64.
6186
6187    Outputs:
6188        A Tensor. Has the same type as weights.
6189
6190    Raises:
6191        TypeError: If dtype of `array` is not int32.
6192        TypeError: If dtype of `size` is not int32.
6193        ValueError: If `size` is negative.
6194        ValueError: If `weights` are empty.
6195        ValueError: If size of `weights` is not zero and the shape of `weights` is different with the shape of `array`.
6196        TypeError: If dtype of `weights` is not in int32,int64,float32,float64
6197
6198    Supported Platforms:
6199        ``Ascend`` ``GPU`` ``CPU``
6200
6201    Examples:
6202        >>> import mindspore
6203        >>> import numpy as np
6204        >>> from mindspore import Tensor, ops
6205        >>> array = Tensor(np.array([1, 2, 2, 3, 3, 3, 4, 4, 4, 4]), mindspore.int32)
6206        >>> size = Tensor(5, mindspore.int32)
6207        >>> weights = Tensor(np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), mindspore.float32)
6208        >>> bincount = ops.Bincount()
6209        >>> bins = bincount(array, size, weights)
6210        >>> print(bins)
6211        [0. 1. 2. 3. 4.]
6212    """
6213
6214    @prim_attr_register
6215    def __init__(self):
6216        """Initialize Bincount"""
6217        self.init_prim_io_names(inputs=['array', 'size', 'weights'], outputs=['bins'])
6218
6219
6220class CountNonZero(Primitive):
6221    """
6222    Calculates the total number of non-zero entries in the input tensor along the
6223    specified dimensions.
6224
6225    Refer to :func:`mindspore.ops.count_nonzero` for more details.
6226
6227    Args:
6228        dims (Union[int, tuple(int), list(int)], optional): The dimensions to reduce.
6229            Default: ``None`` , reduce over all dimensions.
6230
6231    Inputs:
6232        - **x** (Tensor) - Input data is used to count non-zero numbers. With shape
6233          :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
6234
6235    Outputs:
6236          Tensor, number of nonzero element across axis specified by `dims`.
6237
6238    Supported Platforms:
6239        ``Ascend`` ``CPU``
6240
6241    Examples:
6242        >>> x = Tensor([[0, 0, 1], [1, 1, 2], [0, 0, 1]], dtype=mindspore.int64)
6243        >>> countnonzero = ops.CountNonZero(dims=[1])
6244        >>> y = countnonzero(x)
6245        >>> print(y)
6246        [1 3 1]
6247    """
6248
6249    @prim_attr_register
6250    def __init__(self, dims=None):
6251        dims = [] if dims is None else dims
6252        self.init_prim_io_names(inputs=['x'], outputs=['y'])
6253        validator.check_value_type('dims', dims, [int, list, tuple], "CountNonZero")
6254        if isinstance(dims, (list, tuple)):
6255            for i, each in enumerate(dims):
6256                validator.check_value_type(f'dims[{i}]', each, [int], "CountNonZero")
6257        self.dims = dims
6258        self.add_prim_attr("dims", self.dims)
6259