• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# This is the Python adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
2#
3# Copyright 2020-2024 Huawei Technologies Co., Ltd
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16# ============================================================================
17"""standard_method"""
18
19from __future__ import absolute_import
20from mindspore import Tensor, CSRTensor, COOTensor
21from mindspore import dtype as mstype
22from mindspore._c_expression import Tensor as Tensor_
23from mindspore.common import mutable
24import mindspore.common._monad as monad
25from mindspore.common.sparse_tensor import RowTensorInner
26from mindspore.ops.composite.base import _append, _insert, _pop, _list_clear, _reverse, \
27    _extend, _dict_setitem, _dict_clear, _haskey, _update, _fromkeys
28from mindspore.ops.operations._sequence_ops import TensorToTuple
29
30from ... import _checkparam as validator
31from ..._checkparam import check_is_number, check_reshape_shp, check_axis_in_range, \
32    check_axis_valid, check_and_canonicalize_axes
33from ...ops import functional as F
34from ...ops import operations as P
35from ...ops import composite
36from ...ops.operations import array_ops
37from ...ops.composite import MultitypeFuncGraph, env_get, hyper_add, \
38    zeros_like, ones_like, repeat_elements, multitype_ops
39from ...ops.composite.multitype_ops import _constexpr_utils as const_utils
40from ...ops.composite.multitype_ops import _compile_utils as compile_utils
41from ...ops.operations.math_ops import Median
42from ...ops.operations._inner_ops import Format
43from ...ops.operations import _csr_ops
44from ...ops.operations import _map_tensor_ops
45from ...ops.operations._sequence_ops import TensorToScalar
46from ...ops.primitive import constexpr, _primexpr
47from ...common import dtype as mstype
48from ...ops.operations._sequence_ops import ListAppend, ListInsert, SequenceMax, SequenceMin, \
49    SequenceIndex
50
51__all__ = ['MultitypeFuncGraph', 'env_get',
52           'hyper_add', 'zeros_like', 'ones_like']
53
54shape_ = P.Shape()
55dtype_ = P.DType()
56abs_ = P.Abs()
57ndim_ = P.Rank()
58cumsum_ = P.CumSum()
59size_op_ = P.Size()
60_format = Format()
61_reduce_sum_default = P.ReduceSum()
62_reduce_sum_keepdims = P.ReduceSum(True)
63_csr_mm = _csr_ops.CSRMM()
64
65itemsize_map = {mstype.bool_: 1, mstype.int8: 1, mstype.uint8: 1,
66                mstype.float16: 2, mstype.int16: 2, mstype.uint16: 2,
67                mstype.float32: 4, mstype.int32: 4, mstype.uint32: 4,
68                mstype.float64: 8, mstype.int64: 8, mstype.uint64: 8}
69
70nan_tensor = Tensor(float('nan'), dtype=mstype.float32)
71
72
73def mean(x, axis=None, keep_dims=False):
74    """
75    Reduces a dimension of a tensor by averaging all elements in the dimension.
76
77    Args:
78        axis (Union[None, int, tuple(int), list(int)]): Dimensions of reduction,
79            when axis is None or empty tuple, reduce all dimensions. Default: ().
80        keep_dims (bool): Whether to keep the reduced dimensions. Default: False.
81
82    Returns:
83        Tensor, has the same data type as input tensor.
84
85    Supported Platforms:
86        ``Ascend`` ``GPU`` ``CPU``
87
88    Examples:
89        >>> import numpy as np
90        >>> from mindspore import Tensor
91        >>> input_x = Tensor(np.array([1, 2, 3], dtype=np.float32))
92        >>> output = input_x.mean()
93        >>> print(output)
94        2.0
95    """
96    return F.mean(x, axis, keep_dims)
97
98
99def ndimension(x):
100    """Return the number of tensor dimensions."""
101    return len(x.shape)
102
103
104def prod(input, axis=None, keep_dims=False, dtype=None):
105    """
106    Reduces a dimension of a tensor by product all elements in the dimension.
107
108    Args:
109        input (Tensor): Input Tensor.
110        axis (Union[None, int, tuple(int), list(int)]): Dimensions of reduction,
111            when axis is None or empty tuple, reduce all dimensions. Default: ``None``.
112        keep_dims (bool): Whether to keep the reduced dimensions. Default: False.
113        dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
114
115    Returns:
116        Tensor, has the same data type as input tensor.
117
118    Supported Platforms:
119        ``Ascend`` ``GPU`` ``CPU``
120
121    Examples:
122        >>> import numpy as np
123        >>> from mindspore import Tensor
124        >>> input_x = Tensor(np.array([1, 2, 3], dtype=np.float32))
125        >>> output = input_x.prod()
126        >>> print(output)
127        6.0
128    """
129    return F.prod(input, axis, keep_dims, dtype)
130
131
132def addcdiv(input, tensor1, tensor2, value=1):
133    """
134    Performs the element-wise division of tensor tensor1 by tensor tensor2,
135    multiply the result by the scalar value and add it to input_data.
136
137    Args:
138        input (Tensor): The tensor to be added.
139        tensor1 (Tensor): The numerator tensor.
140        tensor1 (Tensor): The denominator tensor.
141        value (Union[Tensor, Number]): The multiplier for tensor1/tensor2. Default: 1.
142
143    Returns:
144        Tensor, has the same shape and dtype as tensor1 / tensor2.
145    """
146    return F.addcdiv(input, tensor1, tensor2, value)
147
148
149def addcmul(input, tensor1, tensor2, value=1):
150    """
151    Performs the element-wise product of tensor tensor1 and tensor tensor2,
152    multiply the result by the scalar value and add it to input_data.
153
154    Args:
155        input (Tensor): The tensor to be added.
156        tensor1 (Tensor): The tensor to be multiplied.
157        tensor2 (Tensor): The tensor to be multiplied.
158        value (Union[Tensor, Number]): The multiplier for tensor1*tensor2. Default: 1.
159
160    Returns:
161        Tensor, has the same shape and dtype as tensor1 * tensor2.
162    """
163    return F.addcmul(input, tensor1, tensor2, value)
164
165
166def all_(x, axis=(), keep_dims=False):
167    """
168    Check all array elements along a given axis evaluate to True.
169
170    Args:
171        x (Tensor): A Tensor to be reduced.
172        axis (Union[None, int, tuple(int)): Dimensions of reduction.
173        keep_dims (bool): Whether to keep the reduced dimensions.
174
175    Returns:
176        Tensor, has the same data type as x.
177    """
178    return F.all(x, axis, keep_dims)
179
180
181def angle(x):
182    r"""
183    For details, please refer to :func:`mindspore.ops.angle`.
184    """
185    return F.angle(x)
186
187
188def any_(x, axis=(), keep_dims=False):
189    """
190    Check any array element along a given axis evaluate to True.
191
192    Args:
193        x (Tensor): A Tensor to be reduced.
194        axis (Union[None, int, tuple(int)): Dimensions of reduction.
195        keep_dims (bool): Whether to keep the reduced dimensions.
196
197    Returns:
198        Tensor, has the same data type as x.
199    """
200    if axis is None:
201        axis = ()
202    reduce_any = P.ReduceAny(keep_dims)
203    return reduce_any(x, axis)
204
205
206def atan2(input, other):
207    r"""
208    Computes the first input tensor multiplied by the logarithm of second input tensor element-wise.
209    Refer to :func:`mindspore.ops.atan2` for more details.
210    """
211    return F.atan2(input, other)
212
213
214def bincount(x, weights=None, minlength=0):
215    r"""
216    For details, please refer to :func:`mindspore.ops.bincount`.
217    """
218    return F.bincount(x, weights, minlength)
219
220
221def H(x):
222    """Returns a view of a matrix (2-D tensor) conjugated and transposed."""
223    output = x.swapaxes(0, 1)
224    if x.dtype in (mstype.complex64, mstype.complex128):
225        return output.conj()
226    return output
227
228
229def histc(x, bins=100, min=0., max=0.):
230    """
231    For details, please refer to :func:`mindspore.ops.histc`.
232    """
233    return F.histc(x, bins, min, max)
234
235
236def geqrf(x):
237    """
238    For details, please refer to :func:`mindspore.ops.geqrf`.
239    """
240    return F.geqrf(x)
241
242
243def size_(x):
244    """
245    Return the number of elements in tensor `x`.
246
247    Note:
248        To strictly follow Numpy's behaviour, return 1 for tensor scalar.
249
250    Args:
251        x (Tensor): Input tensor.
252
253    Returns:
254        size(int).
255    """
256    return size_op_(x)
257
258
259def itemsize_(x):
260    """
261    Return length of one tensor element in bytes.
262
263    Args:
264        x (Tensor): Input tensor.
265
266    Returns:
267        itemsize(int).
268    """
269    return get_itemsize(x.dtype)
270
271
272def nbytes_(x):
273    """
274    Return total number of bytes taken by the tensor.
275
276    Args:
277        x (Tensor): Input tensor.
278
279    Returns:
280        nbytes(int).
281    """
282    return itemsize_(x) * F.shape_mul(shape_(x))
283
284
285def strides_(x):
286    """
287    Return the tuple of bytes to step in each dimension when traversing a tensor.
288
289    Args:
290        x (Tensor): Input tensor.
291
292    Returns:
293        strides (tuple[int]).
294    """
295    strides = ()
296    ndim = P.Rank()(x)
297    tensor_shape = shape_(x)
298    for i in F.make_range(0, ndim):
299        stride = itemsize_(x)
300        for j in F.make_range(i + 1, ndim):
301            stride *= tensor_shape[j]
302        strides += (stride,)
303    return strides
304
305
306def slogdet(x):
307    r"""
308    For details, please refer to :func:`mindspore.ops.slogdet`.
309    """
310    return F.slogdet(x)
311
312
313def cauchy(x, median=0.0, sigma=1.0):
314    r"""
315    Fills the tensor with numbers drawn from the Cauchy distribution. It is
316    defined as follows:
317
318    .. math::
319        f(x)= \frac{1}{\pi} \frac{\sigma}{(x-median)^2 +\sigma^2}
320
321    Args:
322        x (Tensor): Input tensor.
323        median (float, optional): the location parameter, specifying the location
324            of the peak of the distribution. Default: 0.0.
325        sigma (float, optional): the scale parameter which specifies the half-width
326            at half-maximum. Default: 1.0.
327
328    Returns:
329        Tensor. A Tensor with the same type and shape of input.
330    """
331    out = P.Cauchy(list(x.shape), median, sigma)()
332    return F.cast(out, x.dtype)
333
334
335def log_normal(x, mean=1.0, std=2.0):
336    r"""
337    Fills the elements of the input tensor with log normal values initialized by
338    given mean and std:
339
340    .. math::
341        \text{f}(x;1.0,2.0)=\frac{1}{x\delta \sqrt[]{2\pi} }e^{-\frac{(\ln x-\mu )^2}{2\delta ^2} }
342
343    where \mu, \delta is mean and standard deviation of log normal distribution respectively.
344
345    Args:
346        x (Tensor): Input tensor.
347        mean (float, optional): the mean of normal distribution. With float data type.
348            Default: 1.0.
349        std (float, optional): the std of normal distribution. With float data type.
350            Default: 2.0.
351
352    Returns:
353        Tensor. A Tensor with the same type and shape of input.
354    """
355    log_normal = P.LogNormalReverse(mean, std)
356    return log_normal(x)
357
358
359def chunk(x, chunks, axis=0):
360    r"""
361    For details, please refer to :func:`mindspore.ops.chunk`.
362    """
363    return F.chunk(x, chunks, axis)
364
365
366def tril(x, diagonal=0):
367    r"""
368    For details, please refer to :func:`mindspore.ops.tril`.
369    """
370    return F.tril(x, diagonal)
371
372
373def hasattr(x, attr):  # pylint: disable=redefined-builtin
374    """
375    Return whether an object has the attribute.
376
377    Args:
378        x (object): Input object.
379        attr (string): The name of attribute
380
381    Returns:
382        Boolean value, indicates whether the object x has attribute attr.
383    """
384    out = getattr(x, attr, mstype._null)
385    return not isinstance(out, mstype._NullType)
386
387
388def astype(x, dtype, copy=True):  # pylint: disable=redefined-outer-name
389    """
390    Return a copy of the tensor, casted to a specified type.
391
392    Args:
393        dtype (Union[:class:`mindspore.dtype`, str]): Designated tensor dtype, can be in format
394            of :class:`mindspore.dtype.float32` or `float32`.
395            Default: :class:`mindspore.dtype.float32`.
396        copy (bool, optional): By default, astype always returns a newly allocated
397            tensor. If this is set to false, the input tensor is returned instead
398            of a copy if possible. Default: True.
399
400    Returns:
401        Tensor, with the designated dtype.
402
403    Raises:
404        TypeError: If `dtype` has types not specified above, or values cannot be understood.
405
406    Supported Platforms:
407        ``Ascend`` ``GPU`` ``CPU``
408
409    Examples:
410        >>> import numpy as np
411        >>> from mindspore import Tensor
412        >>> x = Tensor(np.ones((1,2,2,1), dtype=np.float32))
413        >>> x = x.astype("int32")
414        >>> print(x.dtype)
415        Int32
416    """
417    dtype = check_astype_dtype_const(dtype)
418    if not copy and dtype == x.dtype:
419        return x
420    return F.cast(x, dtype)
421
422
423def minimum(x, y):
424    r"""
425    Computes the minimum of input tensors element-wise.
426
427    Refer to :func:`mindspore.ops.minimum` for more detail.
428    """
429    return F.minimum(x, y)
430
431
432def multinomial(input, num_samples, replacement=True, seed=None):
433    r"""
434    Returns a tensor sampled from the multinomial probability distribution located in the corresponding
435    row of the input tensor.
436
437    Refer to :func:`mindspore.ops.multinomial` for more detail.
438    """
439    return F.multinomial(input, num_samples, replacement, seed)
440
441
442def tile(x, reps):
443    r"""
444    Replicates an input tensor with given reps times.
445
446    Creates a new tensor by replicating `input_x` `reps` times. The i'th dimension of
447    output tensor has `input_x.shape[i] * reps[i]` elements, and the values of `input_x`
448    are replicated `reps[i]` times along the i'th dimension.
449
450    Note:
451        The length of `reps` must be greater or equal to the length of dimension in `input_x`.
452
453    Args:
454        reps (tuple[int]): The parameter that specifies the number of replications,
455            the parameter type is tuple, and the data type is int, i.e., :math:`(y_1, y_2, ..., y_S)`.
456            The length of `reps` cannot be smaller than the length of the shape of `input_x`.
457            Only constant value is allowed.
458
459    Returns:
460        Tensor, has the same data type as the `input_x`. Suppose the length of `reps` is `d`,
461        the dimension of `input_x` is `input_x.dim`, and the shape of `input_x` is :math:`(x_1, x_2, ..., x_S)`.
462
463        - If `input_x.dim = d`, then the shape of their corresponding positions can be multiplied, and
464          the shape of Outputs is :math:`(x_1*y_1, x_2*y_2, ..., x_S*y_R)`.
465        - If `input_x.dim < d`, fill in multiple 1 in the length of the shape of `input_x` until their
466          lengths are consistent. Such as set the shape of `input_x` as :math:`(1, ..., x_1, x_2, ..., x_S)`,
467          then the shape of their corresponding positions can be multiplied, and the shape of Outputs is
468          :math:`(1*y_1, ..., x_S*y_R)`.
469
470    Raises:
471        TypeError: If `reps` is not a tuple or its elements are not all int.
472        ValueError: If the elements of `reps` are not all greater than 0.
473        ValueError: If the length of `reps` are smaller than the length of dimension in `input_x`.
474
475    Supported Platforms:
476        ``Ascend`` ``GPU`` ``CPU``
477
478    Examples:
479        >>> import mindspore as ms
480        >>> from mindspore import Tensor
481        >>> input_x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.float32)
482        >>> reps = (2, 3)
483        >>> output = input_x.tile(reps)
484        >>> print(output)
485        [[1.  2.  1.  2.  1.  2.]
486        [3.  4.  3.  4.  3.  4.]
487        [1.  2.  1.  2.  1.  2.]
488        [3.  4.  3.  4.  3.  4.]]
489        >>> reps = (2, 3, 2)
490        >>> output = input_x.tile(reps)
491        >>> print(output)
492        [[[1. 2. 1. 2.]
493        [3. 4. 3. 4.]
494        [1. 2. 1. 2.]
495        [3. 4. 3. 4.]
496        [1. 2. 1. 2.]
497        [3. 4. 3. 4.]]
498        [[1. 2. 1. 2.]
499        [3. 4. 3. 4.]
500        [1. 2. 1. 2.]
501        [3. 4. 3. 4.]
502        [1. 2. 1. 2.]
503        [3. 4. 3. 4.]]]
504    """
505    return F.tile(x, reps)
506
507
508def short(x):
509    """
510    Return a copy of the tensor, cast to int16 type, equivalent to self.astype(ms.int16).
511    """
512    return F.cast(x, mstype.int16)
513
514
515def transpose(x, *axis):
516    r"""
517    Return a view of the tensor with axes transposed.
518
519    For a 1-D tensor this has no effect, as a transposed vector is simply the
520    same vector. For a 2-D tensor, this is a standard matrix transpose. For a
521    n-D tensor, if axes are given, their order indicates how the axes are permuted.
522    If axes are not provided and tensor.shape = (i[0], i[1],...i[n-2], i[n-1]),
523    then tensor.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0]).
524
525    Args:
526        axes(Union[None, tuple(int), list(int), int], optional): If axes is None or
527            blank, tensor.transpose() will reverse the order of the axes. If axes is tuple(int)
528            or list(int), tensor.transpose() will transpose the tensor to the new axes order.
529            If axes is int, this form is simply intended as a convenience alternative to the
530            tuple/list form.
531
532    Returns:
533        Tensor, has the same dimension as input tensor, with axes suitably permuted.
534
535    Raises:
536        TypeError: If input arguments have types not specified above.
537        ValueError: If the number of `axes` is not euqal to a.ndim.
538
539    Supported Platforms:
540        ``Ascend`` ``GPU`` ``CPU``
541
542    Examples:
543        >>> import numpy as np
544        >>> from mindspore import Tensor
545        >>> x = Tensor(np.ones((1,2,3), dtype=np.float32))
546        >>> x = x.transpose()
547        >>> print(x.shape)
548        (3, 2, 1)
549    """
550    ndim = F.rank(x)
551    perm = validator.check_transpose_axis(axis, ndim)
552    return F.transpose(x, perm)
553
554
555def T(x):
556    """
557    Return the transposed tensor.
558    """
559    if x.ndim <= 1:
560        return x
561    return transpose(x)
562
563
564# `tensor.T` is used as a property in graph mode
565T_ = T
566
567
568def reshape(x, *shape):
569    """
570    Give a new shape to a tensor without changing its data.
571
572    Args:
573        shape(Union[int, tuple(int), list(int)]): The new shape should be compatible
574            with the original shape. If an integer, then the result will be a 1-D
575            array of that length. One shape dimension can be -1. In this case, the
576            value is inferred from the length of the array and remaining dimensions.
577
578    Returns:
579        Tensor, with new specified shape.
580
581    Raises:
582        TypeError: If new_shape is not integer, list or tuple, or `x` is not tensor.
583        ValueError: If new_shape is not compatible with the original shape.
584
585    Supported Platforms:
586        ``Ascend`` ``GPU`` ``CPU``
587
588    Examples:
589        >>> from mindspore import Tensor
590        >>> from mindspore import dtype as mstype
591        >>> x = Tensor([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]], dtype=mstype.float32)
592        >>> output = x.reshape((3, 2))
593        >>> print(output)
594        [[-0.1  0.3]
595        [ 3.6  0.4]
596        [ 0.5 -3.2]]
597    """
598    new_shape = check_reshape_shp(shape)
599    return F.reshape(x, new_shape)
600
601
602def reshape_as(x, other):
603    """
604    Rearranges the input Tensor based on the `other` shape.
605    """
606    return F.reshape(x, other.shape)
607
608
609def reverse(x, axis):
610    """
611    Reverses specific dimensions of a tensor.
612
613    .. warning::
614        The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "input_x".
615
616    Args:
617        - **x** (Tensor) - The target tensor. The data type is Number except float64.
618        The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
619        - **axis** (Union[tuple(int), list(int)]): The indices of the dimensions to reverse.
620
621    Outputs:
622        Tensor, has the same shape and type as `x`.
623
624    Raises:
625        TypeError: If `axis` is neither list nor tuple.
626        TypeError: If element of `axis` is not an int.
627
628    Supported Platforms:
629        ``Ascend`` ``GPU`` ``CPU``
630
631    Examples:
632        >>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
633        >>> output = ops.reverse(input_x, axis=[1])
634        >>> print(output)
635        [[4 3 2 1]
636         [8 7 6 5]]
637        >>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
638        >>> output = ops.reverse(input_x, axis=[1, 0])
639        >>> print(output)
640        [[8 7 6 5]
641         [4 3 2 1]]
642    """
643    return F.reverse(x, axis)
644
645
646def reverse_sequence(x, seq_lengths, seq_dim, batch_dim=0):
647    """
648    Reverses variable length slices.
649
650    Args:
651        x (Tensor): The input to reverse, supporting all number types including bool.
652        seq_lengths (Tensor): Must be a 1-D vector with int32 or int64 types.
653        seq_dim (int): The dimension where reversal is performed. Required.
654        batch_dim (int): The input is sliced in this dimension. Default: 0.
655
656    Returns:
657        Reversed tensor with the same shape and data type as input.
658
659    Raises:
660        TypeError: If `seq_dim` or `batch_dim` is not an int.
661        ValueError: If value of `batch_dim` is equal to or greater than length of shape of input.
662
663    Supported Platforms:
664        ``Ascend`` ``GPU`` ``CPU``
665
666    Examples:
667        >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
668        >>> seq_lengths = Tensor(np.array([1, 2, 3]))
669        >>> output = x.reverse_sequence(seq_lengths, seq_dim=1)
670        >>> print(output)
671        [[1. 2. 3.]
672         [5. 4. 6.]
673         [9. 8. 7.]]
674        >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
675        >>> seq_lengths = Tensor(np.array([1, 2, 3]))
676        >>> output = x.reverse_sequence(seq_lengths, seq_dim=0, batch_dim=1)
677        >>> print(output)
678        [[1. 5. 9.]
679         [4. 2. 6.]
680         [7. 8. 3.]]
681        >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
682        >>> seq_lengths = Tensor(np.array([2, 2, 3]))
683        >>> output = x.reverse_sequence(seq_lengths, seq_dim=1)
684        >>> print(output)
685        [[2. 1. 3.]
686         [5. 4. 6.]
687         [9. 8. 7.]]
688        >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
689        >>> seq_lengths = Tensor(np.array([3, 2, 3]))
690        >>> output = x.reverse_sequence(seq_lengths, seq_dim=1)
691        >>> print(output)
692        [[3. 2. 1.]
693         [5. 4. 6.]
694         [9. 8. 7.]]
695        >>> x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.float32)
696        >>> seq_lengths = Tensor(np.array([4, 4]))
697        >>> output = x.reverse_sequence(seq_lengths, seq_dim=1)
698        >>> print(output)
699        [[4. 3. 2. 1.]
700         [8. 7. 6. 5.]]
701    """
702    return F.reverse_sequence(x, seq_lengths, seq_dim, batch_dim)
703
704
705def ravel(x):
706    """
707    Return a contiguous flattened tensor.
708
709    Returns:
710        Tensor, a 1-D tensor, containing the same elements of the input.
711
712    Supported Platforms:
713        ``Ascend`` ``GPU`` ``CPU``
714
715    Examples:
716        >>> import numpy as np
717        >>> from mindspore import Tensor
718        >>> x = Tensor(np.ones((2,3,4), dtype=np.float32))
719        >>> output = x.ravel()
720        >>> print(output.shape)
721        (24,)
722    """
723    return reshape(x, (-1,))
724
725
726def flatten(x, order='C', *, start_dim=0, end_dim=-1):
727    r"""
728    Flatten a tensor along dimensions from `start_dim` to `start_dim`.
729
730    Args:
731        x (Tensor): Input tensor.
732        order (str, optional): Only 'C' and 'F' are supported. 'C' means to flatten in row-major (C-style) order.
733            'F' means to flatten in column-major (Fortran-style) order. Default: 'C'.
734
735    Keyword Args:
736        start_dim (int, optional): The first dimension to flatten. Default: 0.
737        end_dim (int, optional): The last dimension to flatten. Default: -1.
738
739    Returns:
740        Tensor. If `x` is a 0-dimensional, a 1-dimensional Tensor will be returned.
741
742    Supported Platforms:
743        ``Ascend`` ``GPU`` ``CPU``
744
745    Raises:
746        TypeError: If `order` is not string type.
747        ValueError: If `order` is string type, but not 'C' or 'F'.
748        TypeError: If `start_dim` or `end_dim` is not int.
749        ValueError: If `start_dim` is greater than `end_dim` after canonicalized.
750        ValueError: If `start_dim` or `end_dim` is not in range of [-x.dim, x.dim-1].
751
752    Examples:
753        >>> import numpy as np
754        >>> from mindspore import Tensor
755        >>> x = Tensor(np.ones((2,3,4), dtype=np.float32))
756        >>> output = x.flatten()
757        >>> print(output.shape)
758        (24,)
759    """
760    return F.flatten(x, order, start_dim=start_dim, end_dim=end_dim)
761
762
763def scatter(self, axis, index, src):
764    """
765    Update the value in `src` to tensor according to the specified index.
766    """
767    return F.scatter(self, axis, index, src)
768
769
770def slice_scatter(input, src, axis=0, start=None, end=None, step=1):
771    r"""
772    Embeds the src into the input Tensor according to `axis`.
773    """
774    return F.slice_scatter(input, src, axis, start, end, step)
775
776
777def select_scatter(input, src, axis, index):
778    r"""
779    On the specified dimension `axis` of `input` , `src` is scattered into `input` on the specified `index` of `input` .
780    """
781    return F.select_scatter(input, src, axis, index)
782
783
784def swapaxes(input, axis0, axis1):
785    """
786    Interchange two axes of a tensor.
787    """
788    return F.swapaxes(input, axis0, axis1)
789
790
791def swapdims(x, dim0, dim1):
792    """
793    Interchange two dims of a tensor.
794    """
795    return F.swapdims(x, dim0, dim1)
796
797
798def squeeze(x, axis=None):
799    """
800    Remove single-dimensional entries from the shape of a tensor.
801
802    Args:
803        axis (Union[None, int, list(int), tuple(int)], optional): Default is None.
804
805    Returns:
806        Tensor, with all or a subset of the dimensions of length 1 removed.
807
808    Raises:
809        TypeError: If input arguments have types not specified above.
810        ValueError: If specified axis has shape entry :math:`> 1`.
811
812    Supported Platforms:
813        ``Ascend`` ``GPU`` ``CPU``
814
815    Examples:
816        >>> import numpy as np
817        >>> from mindspore import Tensor
818        >>> x = Tensor(np.ones((1,2,2,1), dtype=np.float32))
819        >>> x = x.squeeze()
820        >>> print(x.shape)
821        (2, 2)
822    """
823    return F.squeeze(x, axis)
824
825
826def unbind(input, dim=0):
827    """For details, please refer to :func:`mindspore.ops.unbind`."""
828    return P.Unstack(axis=dim)(input)
829
830
831def argmax(x, axis=None, keepdims=False):
832    """
833    Returns the indices of the maximum values of a tensor across a dimension.
834
835    Args:
836        axis (Union[int, None], optional): The dimension to reduce.
837          If `axis` is None, the indices of the maximum value within the
838          flattened input will be returned. Default: ``None``.
839        keepdims (bool, optional): Whether the output tensor retains the
840          specified dimension. Ignored if `axis` is None. Default: False.
841
842    Returns:
843        Tensor, indices of the maximum values across a dimension.
844
845    Raises:
846        ValueError: if `axis` is out of range.
847
848    Supported Platforms:
849        ``Ascend`` ``GPU`` ``CPU``
850
851    Examples:
852        >>> import numpy as np
853        >>> from mindspore import Tensor
854        >>> a = Tensor(np.arange(10, 16).reshape(2, 3).astype("float32"))
855        >>> print(a.argmax())
856        5
857    """
858    return F.argmax(x, axis, keepdims)
859
860
861def argmin(x, axis=None, keepdims=False):
862    """
863    Returns the indices of the minimum values along an axis.
864
865    Args:
866        a (Union[int, float, bool, list, tuple, Tensor]): Input array.
867        axis (int, optional): By default, the index is into
868            the flattened array, otherwise along the specified axis.
869            Defaults to None.
870        keepdims (boolean, optional): Whether the output tensor retains the specified
871            dimension. Ignored if `axis` is None. Default: False.
872
873    Returns:
874        Tensor, array of indices into the array. It has the same
875        shape as a.shape with the dimension along axis removed.
876
877    Raises:
878        ValueError: if axis is out of range.
879
880    Supported Platforms:
881        ``Ascend`` ``GPU`` ``CPU``
882
883    Examples:
884        >>> import numpy as np
885        >>> from mindspore import Tensor
886        >>> a = Tensor(np.arange(10, 16).reshape(2, 3).astype("float32"))
887        >>> print(a.argmin())
888        0
889    """
890    # P.Argmin only supports float
891    x = x.astype(mstype.float32)
892    is_axis_none = False
893    if axis is None:
894        x = ravel(x)
895        axis = 0
896        is_axis_none = True
897    else:
898        axis = check_axis_in_range(axis, F.rank(x))
899    out = P.Argmin(axis)(x)
900    if keepdims and not is_axis_none:
901        out = expand_dims(out, axis)
902    return out
903
904
905def argmax_with_value(x, axis=0, keep_dims=False):
906    """Calculates the maximum value with corresponding index, and returns indices and values."""
907    return F.max(x, axis, keep_dims)
908
909
910def argmin_with_value(x, axis=0, keep_dims=False):
911    """Calculates the minimum value with corresponding index, and returns indices and values."""
912    return F.min(x, axis, keep_dims)
913
914
915def median(input, global_median, axis=0, keep_dims=False):
916    r"""
917    Computes the median of input tensor.
918
919    .. warning::
920        When attr `global_median` is True, the second output Tensor value is meaningless.
921
922    """
923    check_axis_in_range(axis, input.ndim)
924    median_ = Median(global_median, axis, keep_dims)
925    return median_(input)
926
927
928def msort(x):
929    """
930    For details, please refer to :func:`mindspore.ops.msort`.
931    """
932    return F.msort(x)
933
934
935def mm(mat1, mat2):
936    """
937    For details, please refer to :func:`mindspore.ops.mm`.
938    """
939    return F.mm(mat1, mat2)
940
941
942def mT(x):
943    """
944    Returns a view of this tensor with the last two dimensions transposed.
945    x.mT is equivalent to x.transpose(-2, -1).
946    """
947    return swapaxes(x, -2, -1)
948
949
950def nan_to_num(x, nan=0.0, posinf=None, neginf=None):
951    """
952    For details, please refer to :func:`mindspore.ops.nan_to_num`.
953    """
954    return F.nan_to_num(x, nan, posinf, neginf)
955
956
957def cumsum(x, axis=None, dtype=None):
958    """
959    Returns the cumulative sum of the elements along a given axis.
960
961    Note:
962        If ``x.dtype`` is :class:`int8`, :class:`int16` or :class:`bool`, the result
963        `dtype` will be elevated to :class:`int32`, :class:`int64` is not supported.
964
965    Args:
966        x (Tensor): Input tensor.
967        axis (int, optional): Axis along which the cumulative sum is computed. The
968            default (None) is to compute the cumsum over the flattened array.
969        dtype (:class:`mindspore.dtype`, optional): If not specified, stay the same as original,
970            tensor, unless it has an integer dtype with a precision less than :class:`float32`.
971            In that case, :class:`float32` is used.
972
973    Returns:
974        Tensor.
975
976    Supported Platforms:
977        ``Ascend`` ``GPU`` ``CPU``
978
979    Examples:
980        >>> import numpy as np
981        >>> from mindspore import Tensor
982        >>> a = Tensor(np.ones((3,3)).astype("float32"))
983        >>> output = a.cumsum(axis=0)
984        >>> print(output)
985        [[1. 1. 1.]
986        [2. 2. 2.]
987        [3. 3. 3.]]
988    """
989    original_dtype = x.dtype
990    # If original tensor is int, and has precision less then int32, convert
991    # to int32
992    if x.dtype in (mstype.bool_, mstype.int8, mstype.int16, mstype.uint8, mstype.int16):
993        x = x.astype(mstype.int32)
994    if axis is None:
995        x = x.ravel()
996        axis = 0
997    check_axis_in_range(axis, x.ndim)
998    if dtype is not None:
999        dtype = check_astype_dtype_const(dtype)
1000        if original_dtype != dtype:
1001            return cumsum_(x, axis).astype(dtype, copy=False)
1002    return cumsum_(x, axis)
1003
1004
1005def cummin(x, axis):
1006    """
1007    Returns the cumulative minimum of elements and the index.
1008    """
1009    return F.cummin(x, axis)
1010
1011
1012def cummax(x, axis):
1013    """
1014    Returns the cumulative maximum of elements and the index.
1015    """
1016    return F.cummax(x, axis)
1017
1018
1019def index_fill(x, axis, index, value):
1020    """
1021    Fills the elements under the axis dimension of the input Tensor with the input value
1022    by selecting the indices in the order given in index.
1023    """
1024    return F.index_fill(x, axis, index, value)
1025
1026
1027def index_select(x, axis, index):
1028    """
1029    Returns a new tensor which indexes the `x` tensor along dimension `axis` using the entries in `index` .
1030    """
1031    return F.index_select(x, axis, index)
1032
1033
1034def copy(x):
1035    """
1036    Returns a copy of the tensor.
1037
1038    Note:
1039        The current implementation does not support `order` argument.
1040
1041    Args:
1042        x (Tensor): Input tensor.
1043
1044    Returns:
1045        Copied tensor.
1046
1047    Supported Platforms:
1048        ``Ascend`` ``GPU`` ``CPU``
1049
1050    Examples:
1051        >>> import numpy as np
1052        >>> from mindspore import Tensor
1053        >>> a = Tensor(np.ones((3,3)).astype("float32"))
1054        >>> output = a.copy()
1055        >>> print(output)
1056        [[1. 1. 1.]
1057        [1. 1. 1.]
1058        [1. 1. 1.]]
1059    """
1060    if x.size == 0:
1061        return x
1062    origin_dtype = x.dtype
1063    if origin_dtype == mstype.bool_:
1064        return F.logical_not(F.logical_not(x))
1065    if origin_dtype != mstype.float64:
1066        x = x.astype(mstype.float32)
1067    x = x / 1.0
1068    x = x.astype(origin_dtype)
1069    return x
1070
1071
1072def max(input, axis=None, keepdims=False, *, initial=None,  # pylint: disable=redefined-builtin
1073        where=True, return_indices=False):  # pylint: disable=redefined-outer-name
1074    """
1075    Returns the maximum of a tensor or maximum along an axis.
1076
1077    Args:
1078        x (Tensor): Input Tensor.
1079        axis (None or int or tuple of ints, optional): defaults to None. Axis or
1080            axes along which to operate. By default, flattened input is used. If
1081            this is a tuple of ints, the maximum is selected over multiple axes,
1082            instead of a single axis or all the axes as before.
1083        keepdims (bool, optional): defaults to False.
1084            If this is set to True, the axes which are reduced are left in the
1085            result as dimensions with size one. With this option, the result will
1086            broadcast correctly against the input array.
1087
1088    Keyword Args:
1089        initial (scalar, optional):
1090            The minimum value of an output element. Must be present to allow
1091            computation on empty slice.
1092        where (bool Tensor, optional): defaults to True.
1093            A boolean array which is broadcasted to match the dimensions of array,
1094            and selects elements to include in the reduction. If non-default value
1095            is passed, initial must also be provided.
1096        return_indices (bool, optional): Whether to return the index of the minimum value. Default: False.
1097                If `axis` is a list or tuple of ints, it must be False.
1098
1099    Returns:
1100        Tensor or scalar, maximum of input tensor. If `axis` is None, the result is a scalar
1101        value. If `axis` is given, the result is an array of dimension ``a.ndim - 1``.
1102
1103    Raises:
1104        TypeError: if the input is not a tensor.
1105
1106    Supported Platforms:
1107        ``Ascend`` ``GPU`` ``CPU``
1108
1109    Examples:
1110        >>> import numpy as np
1111        >>> from mindspore import Tensor
1112        >>> import mindspore.numpy as np
1113        >>> a = Tensor(np.arange(4).reshape((2,2)).astype('float32'))
1114        >>> output = a.max()
1115        >>> print(output)
1116        3.0
1117    """
1118    if isinstance(axis, (list, tuple)):
1119        return compile_utils.reduce_(input, P.ReduceMax(keepdims), cmp_fn=F.maximum,
1120                                     axis=axis, keepdims=keepdims, initial=initial, where=where)
1121    values, indices = F.max(input, axis, keepdims, initial=initial, where=where)
1122    if not return_indices:
1123        return values
1124    return values, indices
1125
1126
1127def min(input, axis=None, keepdims=False, *, initial=None,  # pylint: disable=redefined-builtin
1128        where=True, return_indices=False):  # pylint: disable=redefined-outer-name
1129    """
1130    Returns the minimum of a tensor or minimum along an axis.
1131
1132    Args:
1133        a (Tensor): Input data.
1134        axis (None or int or tuple of ints, optional): defaults to None. Axis or
1135            axes along which to operate. By default, flattened input is used. If
1136            this is a tuple of ints, the minimum is selected over multiple axes,
1137            instead of a single axis or all the axes as before.
1138        keepdims (bool, optional): defaults to False.
1139            If this is set to True, the axes which are reduced are left in the
1140            result as dimensions with size one. With this option, the result will
1141            broadcast correctly against the input array.
1142
1143    Keyword Args:
1144        initial (scalar, optional):
1145            The maximum value of an output element. Must be present to allow
1146            computation on empty slice.
1147        where (bool Tensor, optional): defaults to True.
1148            A boolean array which is broadcasted to match the dimensions of array,
1149            and selects elements to include in the reduction. If non-default value
1150            is passed, initial must also be provided.
1151        return_indices (bool, optional): Whether to return the index of the minimum value. Default: False.
1152                If `axis` is a list or tuple of ints, it must be False.
1153
1154    Returns:
1155        Tensor or scalar, minimum of `a`. If axis is None, the result is a scalar
1156        value. If `axis` is given, the result is an array of dimension ``a.ndim - 1``.
1157
1158    Raises:
1159        TypeError: if the input is not a tensor.
1160
1161    Supported Platforms:
1162        ``Ascend`` ``GPU`` ``CPU``
1163
1164    Examples:
1165        >>> import numpy as np
1166        >>> from mindspore import Tensor
1167        >>> import mindspore.numpy as np
1168        >>> a = Tensor(np.arange(4).reshape((2,2)).astype('float32'))
1169        >>> output = a.min()
1170        >>> print(output)
1171        0.0
1172    """
1173    if isinstance(axis, (list, tuple)):
1174        return compile_utils.reduce_(input, P.ReduceMin(keepdims), cmp_fn=F.minimum,
1175                                     axis=axis, keepdims=keepdims, initial=initial, where=where)
1176    values, indices = F.min(input, axis, keepdims, initial=initial, where=where)
1177    if not return_indices:
1178        return values
1179    return values, indices
1180
1181
1182def pow(x, y):  # pylint: disable=redefined-builtin
1183    """
1184    Calculate the power of Tensor.
1185    """
1186    return F.pow(x, y)
1187
1188
1189def log(x):
1190    """
1191    Calculate the logarithm of Tensor.
1192    """
1193    return F.log(x)
1194
1195
1196def log10(input):
1197    """
1198    Calculate the base-10 logarithm of Tensor.
1199    """
1200    return F.log10(input)
1201
1202
1203def log2(input):
1204    """
1205    Calculate the base-2 logarithm of Tensor.
1206    """
1207    return F.log2(input)
1208
1209
1210def logaddexp(input, other):
1211    """
1212    Computes the logarithm of the sum of exponentiations of the inputs.
1213    """
1214    return F.logaddexp(input, other)
1215
1216
1217def logaddexp2(input, other):
1218    """
1219    Computes the logarithm of the sum of exponentiations in base of 2 of the inputs.
1220    """
1221    return F.logaddexp2(input, other)
1222
1223
1224def logcumsumexp(input, axis):
1225    """
1226    Computes the logarithm of the sum of exponentiations of the inputs along specified dimension.
1227    """
1228    return F.logcumsumexp(input, axis)
1229
1230
1231def logsumexp(input, axis, keepdims=False):
1232    """
1233    Reduces a dimension of a tensor by calculating exponential for all elements in the dimension,
1234    then calculate logarithm of the sum.
1235    """
1236    return F.logsumexp(input, axis, keepdims)
1237
1238
1239def round_(x):
1240    """
1241    Returns half to even of a tensor element-wise.
1242    """
1243    return F.round(x)
1244
1245
1246def roll(x, shifts, dims):
1247    """
1248    Rolls the elements of a tensor along an axis.
1249    """
1250    dims = dims if dims is not None else 0
1251    return F.Roll(shifts, dims)(x)
1252
1253
1254def rot90(x, k, dims):
1255    """
1256    Rotate a n-D tensor by 90 degrees in the plane specified by dims axis.
1257    """
1258    return F.rot90(x, k, dims)
1259
1260
1261def rad2deg(x):
1262    """
1263    Returns a new tensor with each of the elements of `x` converted from angles in radians to degrees.
1264    """
1265    return F.rad2deg(x)
1266
1267
1268def deg2rad(x):
1269    """
1270    Calculates a new tensor with each of the elements of `x` converted from angles in degrees to radians.
1271    """
1272    return F.deg2rad(x)
1273
1274
1275def dot(input, other):
1276    r"""
1277    For details, please refer to :func:`mindspore.ops.dot`.
1278    """
1279    return composite.dot(input, other)
1280
1281
1282def copysign(x, other):
1283    """
1284    Create a new floating-point tensor with the magnitude of `x` and the sign of `other`, element-wise.
1285    """
1286    return F.copysign(x, other)
1287
1288
1289def numel(input):
1290    """
1291    Returns a Scalar of type int that represents the total number of elements in the Tensor.
1292    """
1293    return F.numel(input)
1294
1295
1296def permute(input, *axis):
1297    """
1298    Permutes the dimensions of the input tensor according to input permutation.
1299    """
1300    ndim = F.rank(input)
1301    perm = validator.check_transpose_axis(axis, ndim)
1302    return F.permute(input, perm)
1303
1304
1305def positive(input):
1306    """
1307    Return self Tensor.
1308    """
1309    return F.positive(input)
1310
1311
1312def remainder(input, divisor):
1313    """
1314    Returns element-wise remainder of division.
1315    """
1316    return F.remainder(input, divisor)
1317
1318
1319def unique_consecutive(input, return_idx=False, return_counts=False, axis=None):
1320    """
1321    Returns the elements that are unique in each consecutive group of equivalent elements in the input tensor.
1322    """
1323    return F.unique_consecutive(input, return_idx, return_counts, axis)
1324
1325
1326def unique_with_pad(x, pad_num):
1327    """
1328    Returns unique elements and relative indexes in 1-D tensor, filled with padding num.
1329    """
1330    return F.unique_with_pad(x, pad_num)
1331
1332
1333def resize(x, *new_shape):
1334    """
1335    Changes shape and size of array in-place.
1336
1337    Note:
1338        Instead of changing the size of the input array and returns nothing as in numpy,
1339        this method returns a new Tensor with the input size.
1340        Numpy argument `refcheck` is not supported.
1341
1342    Args:
1343        new_shape (Union[ints, tuple of ints]): Shape of resized array.
1344
1345    Returns:
1346        Tensor.
1347
1348    Supported Platforms:
1349        ``Ascend`` ``GPU`` ``CPU``
1350
1351    Examples:
1352        >>> from mindspore import numpy as np
1353        >>> x = np.array([[0, 1], [2, 3]])
1354        >>> x = x.resize(2, 3)
1355        >>> print(x)
1356        [[0 1 2]
1357        [3 0 0]]
1358    """
1359    if not new_shape:
1360        return x
1361    if len(new_shape) == 1:
1362        if isinstance(new_shape[0], tuple):
1363            new_shape = new_shape[0]
1364    flattened = x.ravel()
1365    cur_size = F.shape_mul(x.shape)
1366    new_size = F.shape_mul(new_shape)
1367    diff_size = new_size - cur_size
1368    if diff_size > 0:
1369        pad_val = F.fill(x.dtype, (diff_size,), 0)
1370        res = P.Concat()((flattened, pad_val))
1371    else:
1372        res = flattened[:new_size]
1373    return res.reshape(new_shape)
1374
1375
1376def det(input):
1377    """
1378    Computes the determinant of one or more square matrices.
1379    """
1380    return F.det(input)
1381
1382
1383def diagonal(x, offset=0, axis1=0, axis2=1):
1384    """
1385    Returns specified diagonals.
1386
1387    Args:
1388        offset (int, optional): Offset of the diagonal from the main diagonal.
1389            Can be positive or negative. Defaults to main diagonal.
1390        axis1 (int, optional): Axis to be used as the first axis of the 2-D
1391            sub-arrays from which the diagonals should be taken. Defaults to
1392            first axis (0).
1393        axis2 (int, optional): Axis to be used as the second axis of the 2-D
1394            sub-arrays from which the diagonals should be taken. Defaults to
1395            second axis.
1396
1397    Returns:
1398        Tensor, if `a` is 2-D, then `a` 1-D array containing the diagonal.
1399
1400    Raises:
1401        ValueError: if the input tensor has less than two dimensions.
1402
1403    Supported Platforms:
1404        ``Ascend`` ``GPU`` ``CPU``
1405
1406    Examples:
1407        >>> import mindspore.numpy as np
1408        >>> a = np.arange(4).reshape(2,2)
1409        >>> print(a)
1410        [[0 1]
1411        [2 3]]
1412        >>> output = a.diagonal()
1413        >>> print(output)
1414        [0 3]
1415    """
1416    ndim = x.ndim
1417    if ndim < 2:
1418        const_utils.raise_value_error(
1419            'diagonal requires an array of at least two dimensions')
1420    return F.diagonal(x, offset, axis1, axis2)
1421
1422
1423def diagonal_scatter(input, src, offset, dim1=0, dim2=1):
1424    r"""
1425    Embed `src` into the diagonal of `input` according to the `dim1` and `dim2`.
1426    """
1427    return F.diagonal_scatter(input, src, offset, dim1, dim2)
1428
1429
1430def digamma(input):
1431    """
1432    Computes the logarithmic derivative of the gamma function on input.
1433    """
1434    return F.digamma(input)
1435
1436
1437def lgamma(input):
1438    """
1439    Computes the natural logarithm of the absolute value of the gamma function on input.
1440    """
1441    return F.lgamma(input)
1442
1443
1444def i0(x):
1445    """
1446    For details, please refer to :func:`mindspore.ops.i0`.
1447    """
1448    return F.i0(x)
1449
1450
1451def isclose(x1, x2, rtol=1e-05, atol=1e-08, equal_nan=False):
1452    """
1453    Returns a boolean tensor where two tensors are element-wise equal within a tolerance.
1454    """
1455    return F.isclose(x1, x2, rtol, atol, equal_nan)
1456
1457
1458def isneginf(input):
1459    """
1460    Tests element-wise for negative infinity, returns result as bool array.
1461    """
1462    return F.isneginf(input)
1463
1464
1465def isposinf(input):
1466    """
1467    Tests element-wise for positive infinity, returns result as bool array.
1468    """
1469    return F.isposinf(input)
1470
1471
1472def isreal(input):
1473    """
1474    Tests element-wise for real number.
1475    """
1476    return F.isreal(input)
1477
1478
1479def flip(x, dims):
1480    """
1481    For details, please refer to :func:`mindspore.ops.flip`.
1482    """
1483    return F.flip(x, dims)
1484
1485
1486def fliplr(x):
1487    """
1488    For details, please refer to :func:`mindspore.ops.fliplr`.
1489    """
1490    return F.fliplr(x)
1491
1492
1493def flipud(x):
1494    """
1495    For details, please refer to :func:`mindspore.ops.flipud`.
1496    """
1497    return F.flipud(x)
1498
1499
1500def float_power(x, exponent):
1501    """
1502    For details, please refer to :func:`mindspore.ops.float_power`.
1503    """
1504    return F.float_power(x, exponent)
1505
1506
1507def fmax(input, other):
1508    """
1509    For details, please refer to :func:`mindspore.ops.fmax`.
1510    """
1511    return F.fmax(input, other)
1512
1513
1514def fmin(input, other):
1515    """
1516    For details, please refer to :func:`mindspore.ops.fmin`.
1517    """
1518    return F.fmin(input, other)
1519
1520
1521def fmod(x, other):
1522    """
1523    For details, please refer to :func:`mindspore.ops.fmod`.
1524    """
1525    return F.fmod(x, other)
1526
1527
1528def is_floating_point(x):
1529    """
1530    For details, please refer to :func:`mindspore.ops.is_floating_point`.
1531    """
1532    return F.is_floating_point(x)
1533
1534
1535def is_signed(x):
1536    """
1537    For details, please refer to :func:`mindspore.ops.is_signed`.
1538    """
1539    return x.dtype in mstype.signed_type
1540
1541
1542def is_complex(x):
1543    """
1544    For details, please refer to :func:`mindspore.ops.is_complex`.
1545    """
1546    return F.is_complex(x)
1547
1548
1549def inv(x):
1550    """
1551    Computes Reciprocal of input tensor element-wise.
1552    """
1553    return F.inv(x)
1554
1555
1556def inverse(input):
1557    """
1558    Computes the inverse of a square matrix.
1559    """
1560    return F.inverse(input)
1561
1562
1563def invert(x):
1564    """
1565    Flips all bits of input tensor element-wise.
1566    """
1567    return F.invert(x)
1568
1569
1570def trace(x, offset=0, axis1=0, axis2=1, dtype=None):
1571    """
1572    Returns the sum along diagonals of the array.
1573
1574    Args:
1575        offset (int, optional): Offset of the diagonal from the main diagonal.
1576            Can be positive or negative. Defaults to main diagonal.
1577        axis1 (int, optional): Axis to be used as the first axis of the 2-D
1578            sub-arrays from which the diagonals should be taken. Defaults to
1579            first axis (0).
1580        axis2 (int, optional): Axis to be used as the second axis of the 2-D
1581            sub-arrays from which the diagonals should be taken. Defaults to
1582            second axis.
1583        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
1584            output Tensor.
1585
1586    Returns:
1587        Tensor, sum_along_diagonals.
1588
1589    Raises:
1590        ValueError: if the input tensor has less than two dimensions.
1591
1592    Supported Platforms:
1593        ``Ascend`` ``GPU`` ``CPU``
1594
1595    Examples:
1596        >>> import mindspore.numpy as np
1597        >>> x = np.eye(3)
1598        >>> print(x.trace())
1599        3.0
1600    """
1601    if offset == 0 and axis1 == 0 and axis2 == 1 and dtype is None:
1602        return F.trace(x)
1603    d = x.diagonal(offset, axis1=axis1, axis2=axis2)
1604    shape = d.shape
1605    if dtype is None:
1606        dtype = d.dtype
1607    dtype = check_astype_dtype_const(dtype)
1608    if shape[-1] == 0:
1609        return F.fill(dtype, shape[:-1], 0)
1610    res = F.reduce_sum(d.astype(mstype.float32), -1)
1611    return res.astype(dtype)
1612
1613
1614def take(x, indices, axis=None, mode='clip'):
1615    """
1616    Takes elements from an array along an axis.
1617
1618    Args:
1619        a (Tensor): Source array with shape `(Ni…, M, Nk…)`.
1620        indices (Tensor): The indices with shape `(Nj...)` of the values to extract.
1621        axis (int, optional): The axis over which to select values. By default,
1622            the flattened input array is used. Defaults to None.
1623        mode ('raise', 'wrap', 'clip', optional): Defaults to "clip".
1624
1625            - edge: Pads with the edge values of `arr`.
1626            - raise: Raises an error;
1627            - wrap: Wraps around;
1628            - clip: Clips to the range. 'clip' mode means that all indices that are
1629              too large are replaced by the index that addresses the last element
1630              along that axis. Note that this disables indexing with negative numbers.
1631
1632    Returns:
1633        Tensor, the indexed result.
1634
1635    Raises:
1636        ValueError: if axis is out of range.
1637        TypeError: if the input is not a Tensor.
1638
1639    Supported Platforms:
1640        ``Ascend`` ``GPU`` ``CPU``
1641
1642    Examples:
1643        >>> import mindspore.numpy as np
1644        >>> a = np.array([4, 3, 5, 7, 6, 8])
1645        >>> indices = np.array([0, 1, 4])
1646        >>> output = a.take(indices)
1647        >>> print(output)
1648        [4 3 6]
1649    """
1650    if mode not in ('raise', 'wrap', 'clip'):
1651        const_utils.raise_value_error(
1652            'raise should be one of "raise", "wrap", or "clip"')
1653    if axis is None:
1654        a = x.ravel()
1655        axis = 0
1656    else:
1657        a = x
1658    ndim = a.ndim
1659    axis = check_axis_in_range(axis, ndim)
1660
1661    shape_a = a.shape
1662    shape_indices = indices.shape
1663    size_indices = indices.size
1664    indices = compile_utils.check_indices(shape_a[axis], indices, mode)
1665
1666    # reshapes indices to shape (Ni..., Nj..., Nk)
1667    shape_ni = tuple_slice(shape_a, None, axis)
1668    shape_nk = tuple_slice(shape_a, axis + 1, None)
1669    shape_out = shape_ni + shape_indices + shape_nk
1670    shape_indices = expanded_shape(ndim, size_indices, axis)
1671    indices = indices.reshape(shape_indices)
1672    shape_indices = shape_ni + (indices.size,) + shape_nk
1673    indices = F.broadcast_to(indices, shape_indices)
1674
1675    res = F.gather_d(a, axis, indices)
1676    return res.reshape(shape_out)
1677
1678
1679def ms_type(input, dtype=None):
1680    r"""
1681    Change the dtype of the Tensor to the `dtype` . Return the type if `dtype` is None.
1682    """
1683    if dtype is None:
1684        return str(input.dtype)
1685    return input.astype(dtype)
1686
1687
1688def type_as(input, other):
1689    r"""
1690    Change the dtype of `input` to the dtype of `other`.
1691    """
1692    return input.astype(other.dtype)
1693
1694
1695def _infer_out_shape(*shapes):
1696    """
1697    Returns shape of output after broadcasting. Raises ValueError if shapes cannot be broadcast.
1698    """
1699    shape_out = list()
1700    max_len = ms_max([len(it) for it in shapes])
1701    for i in range(max_len):
1702        items = [it[i - (max_len - len(it))] if i - (max_len - len(it))
1703                                                >= 0 else 1 for it in shapes]
1704        max_size = 0 if 0 in items else ms_max(items)
1705        shape_out.append(max_size)
1706    return tuple(shape_out)
1707
1708
1709def choose(x, choices, mode='clip'):
1710    """
1711    Construct an array from an index array and a list of arrays to choose from.
1712
1713    Args:
1714        choices (sequence of arrays): Choice arrays. `a` and all of the `choices` must
1715            be broadcastable to the same shape. If `choices` is itself an array, then
1716            its outermost dimension (i.e., the one corresponding to ``choices.shape[0]``)
1717            is taken as defining the "sequence".
1718        mode ('raise', 'wrap', 'clip', optional): Specifies how indices outside
1719            ``[0, n-1]`` will be treated:
1720
1721            'raise' – raise an error (default);
1722
1723            'wrap' – wrap around;
1724
1725            'clip' – clip to the range. 'clip' mode means that all indices that are
1726            too large are replaced by the index that addresses the last element
1727            along that axis. Note that this disables indexing with negative numbers.
1728
1729    Returns:
1730        Tensor, the merged result.
1731
1732    Supported Platforms:
1733        ``Ascend`` ``GPU`` ``CPU``
1734
1735    Raises:
1736        ValueError: if ``len(condlist) != len(choicelist)``.
1737
1738    Examples:
1739        >>> import mindspore.numpy as np
1740        >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33]]
1741        >>> x = np.array([2, 3, 1, 0])
1742        >>> print(x.choose(choices))
1743        [20 31 12  3]
1744    """
1745    if check_is_tensor(F.typeof(choices)):
1746        shape_choice = _infer_out_shape(x.shape, choices.shape[1:])
1747        choices = F.broadcast_to(choices, (choices.shape[0],) + shape_choice)
1748    else:
1749        # broadcasts choices to the same shape if choices is a sequence
1750        choicelist = []
1751        shapes = ()
1752        for choice in choices:
1753            if not check_is_tensor(F.typeof(choice)):
1754                choice = const_utils.make_tensor(choice)
1755            shapes += (choice.shape,)
1756            choicelist.append(choice)
1757        shape_choice = _infer_out_shape(x.shape, *shapes)
1758        tmp = []
1759        for choice in choicelist:
1760            tmp.append(F.broadcast_to(choice, shape_choice))
1761        choices = F.stack(tmp)
1762
1763    if x.ndim == 0 or choices.ndim == 0:
1764        const_utils.raise_value_error('input cannot be scalars')
1765    a = F.broadcast_to(x, shape_choice)
1766    dtype = choices.dtype
1767    # adjusts dtype for F.tensor_mul and F.gather_nd
1768    a = a.astype(mstype.int32)
1769    choices = choices.astype(mstype.int32)
1770    a = compile_utils.check_indices(
1771        choices.shape[0], a, mode, allow_negative_index=False)
1772
1773    grids = []
1774    ndim = len(a.shape)
1775    for i in range(ndim):
1776        dim_grid = const_utils.make_tensor(
1777            F.make_range(a.shape[i]), mstype.int32)
1778        dim_shape = expanded_shape(ndim, a.shape[i], i)
1779        dim_grid = F.broadcast_to(dim_grid.reshape(dim_shape), a.shape)
1780        grids.append(dim_grid)
1781    grid = P.Stack(-1)(grids)
1782    indices = P.Concat(-1)((a.reshape(a.shape + (1,)), grid))
1783    return F.gather_nd(choices, indices).astype(dtype)
1784
1785
1786def searchsorted(x, v, side='left', sorter=None):
1787    """
1788    Finds indices where elements should be inserted to maintain order.
1789
1790    Args:
1791        v (Union[int, float, bool, list, tuple, Tensor]): Values to insert into `a`.
1792        side ('left', 'right', optional): If 'left', the index of the first suitable
1793            location found is given. If 'right', return the last such index. If there is
1794            no suitable index, return either 0 or N (where N is the length of `a`).
1795        sorter (Union[int, float, bool, list, tuple, Tensor]): 1-D optional array of
1796            integer indices that sort array `a` into ascending order. They are typically
1797            the result of argsort.
1798
1799    Returns:
1800        Tensor, array of insertion points with the same shape as `v`.
1801
1802    Raises:
1803        ValueError: if argument for `side` or `sorter` is invalid.
1804
1805    Supported Platforms:
1806        ``Ascend`` ``GPU`` ``CPU``
1807
1808    Examples:
1809        >>> from mindspore import numpy as np
1810        >>> x = np.array([1,2,3,4,5])
1811        >>> print(x.searchsorted(3))
1812        2
1813    """
1814
1815    if side not in ('left', 'right'):
1816        raise ValueError(f"For 'Tensor.searchsorted', the argument 'side' should be one of in "
1817                            f"['left', 'right'], but got {side}.")
1818    if not isinstance(v, Tensor):
1819        v = const_utils.make_tensor(v)
1820    if sorter is not None:
1821        if not isinstance(sorter, (int, list, tuple, Tensor)):
1822            raise TypeError("For Tensor.searchsorted, the type of the argument 'sorter' must be one of 'int', "
1823                            "'list', 'tuple', 'Tensor', but got {}.".format(type(sorter)))
1824        if not isinstance(sorter, Tensor):
1825            sorter = const_utils.make_tensor(sorter)
1826        if sorter.size != x.size:
1827            raise ValueError('The size of sorter must be the same as the Tensor')
1828
1829    dtype = mstype.int32
1830    right = (side == 'right')
1831    search_sorted_ = P.SearchSorted(dtype, right)
1832    return search_sorted_(x, v, sorter)
1833
1834
1835def fill(x, value):
1836    """
1837    `Tensor.fill` is deprecated, please use `ops.fill` instead.
1838    """
1839    if value is None:
1840        if x.dtype not in (mstype.float16, mstype.float32, mstype.float64):
1841            const_utils.raise_type_error("If None is used as value, the original Tensor's dtype must be float.")
1842        value = nan_tensor
1843        return F.tile(value, x.shape).astype(x.dtype)
1844    return F.fill(x.dtype, x.shape, value)
1845
1846
1847def fills(x, value):
1848    """
1849    `Tensor.fills` is deprecated, please use `ops.fill` instead.
1850    """
1851    return F.fills(x, value)
1852
1853
1854def fill_diagonal(x, fill_value, wrap=False):
1855    """
1856    Fills the main diagonal of a Tensor with a specified value and returns the result. The input has at least
1857    2 dimensions, and all dimensions of input must be equal in length when the dimension of input is greater than 2.
1858    """
1859
1860    return P.FillDiagonal(fill_value, wrap)(x)
1861
1862
1863def ptp(x, axis=None, keepdims=False):
1864    """
1865    The name of the function comes from the acronym for "peak to peak".
1866
1867    Note:
1868        Numpy arguments `dtype` and `out` are not supported.
1869
1870    Args:
1871        x (Tensor): Input tensor.
1872        axis (Union[None, int, tuple(int)]): Axis or axes along which the range is computed.
1873            The default is to compute the variance of the flattened array. Default: ``None``.
1874        keepdims (bool): Default is False.
1875
1876    Returns:
1877        Tensor.
1878
1879    Raises:
1880        TypeError: if the input is not a tensor.
1881
1882    Supported Platforms:
1883        ``Ascend`` ``GPU`` ``CPU``
1884
1885    Examples:
1886        >>> from mindspore import Tensor
1887        >>> x = Tensor([[4.0, 9.0, 2.0, 10.0], [6.0, 9.0, 7.0, 12.0]]).astype("float32")
1888        >>> print(x.ptp(axis=1))
1889        [8. 6.]
1890        >>> print(x.ptp(axis=0))
1891        [2. 0. 5. 2.]
1892    """
1893    if not isinstance(keepdims, bool):
1894        const_utils.raise_type_error('keepdims should be boolean')
1895    if axis is None:
1896        axis = ()
1897    else:
1898        validator.check_axis_type(axis, True, True, False)
1899        axis = check_axis_valid(axis, x.ndim)
1900
1901    return x.max(axis, keepdims) - x.min(axis, keepdims)
1902
1903
1904def clamp(x, min=None, max=None):
1905    """
1906    Clamps all elements in `x` into the range `[min, max]`.
1907    """
1908    return F.clamp(x, min, max)
1909
1910
1911def clip(x, min=None, max=None):
1912    """
1913    Clamps all elements in `x` into the range `[min, max]`.
1914    """
1915    return F.clamp(x, min, max)
1916
1917
1918def var(x, axis=None, ddof=0, keepdims=False):
1919    """
1920    Compute the variance along the specified axis.
1921    The variance is the average of the squared deviations from the mean, i.e.,
1922    :math:`var = mean(abs(x - x.mean())**2)`.
1923
1924    Return the variance, which is computed for the flattened array by default,
1925    otherwise over the specified axis.
1926
1927    Note:
1928        Numpy arguments `dtype`, `out` and `where` are not supported.
1929
1930    Args:
1931        x (Tensor): A Tensor to be calculated.
1932        axis (Union[None, int, tuple(int)]): Axis or axes along which the variance is computed.
1933            The default is to compute the variance of the flattened array. Default: `None`.
1934        ddof (int): Means Delta Degrees of Freedom. Default: 0.
1935            The divisor used in calculations is :math:`N - ddof`, where :math:`N` represents the number of elements.
1936        keepdims (bool): Default: `False`.
1937
1938    Supported Platforms:
1939        ``Ascend`` ``GPU`` ``CPU``
1940
1941    Returns:
1942        Standard deviation tensor.
1943
1944    Examples:
1945        >>> import mindspore.numpy as np
1946        >>> input_x = np.array([1., 2., 3., 4.])
1947        >>> print(input_x.var())
1948        1.25
1949    """
1950    if 0 in x.shape:
1951        return nan_tensor.astype(x.dtype)
1952    if not isinstance(ddof, int) or not isinstance(keepdims, int):
1953        const_utils.raise_type_error("integer argument expected")
1954
1955    if axis is None:
1956        axis = ()
1957    else:
1958        axis = check_and_canonicalize_axes(axis, x.ndim)
1959    x_mean = F.mean(x, axis, True)
1960    x_sub = F.tensor_sub(x, x_mean)
1961    x_pow = F.tensor_pow(x_sub, 2)
1962    if keepdims:
1963        x_sum = _reduce_sum_keepdims(x_pow, axis)
1964    else:
1965        x_sum = _reduce_sum_default(x_pow, axis)
1966
1967    if axis == ():
1968        axis = F.make_range(x.ndim)
1969    nums = 1
1970    for ax in axis:
1971        nums *= x.shape[ax]
1972    return F.tensor_div(x_sum, nums - ddof)
1973
1974
1975def std(x, axis=None, ddof=0, keepdims=False):
1976    """
1977    Compute the standard deviation along the specified axis.
1978    The standard deviation is the square root of the average of the squared deviations
1979    from the mean, i.e., :math:`std = sqrt(mean(abs(x - x.mean())**2))`.
1980
1981    Return the standard deviation, which is computed for the flattened array by default,
1982    otherwise over the specified axis.
1983
1984    Note:
1985        Numpy arguments `dtype`, `out` and `where` are not supported.
1986
1987    Args:
1988        x (Tensor): A Tensor to be calculated.
1989        axis (Union[None, int, tuple(int)]): Axis or axes along which the standard
1990            deviation is computed. Default: `None`.
1991
1992            If `None`, compute the standard deviation of the flattened array.
1993        ddof (int): Means Delta Degrees of Freedom. The divisor used in calculations is :math:`N - ddof`,
1994            where :math:`N` represents the number of elements. Default: 0.
1995        keepdims: Default: `False`.
1996
1997    Returns:
1998        Standard deviation tensor.
1999
2000    Supported Platforms:
2001        ``Ascend`` ``GPU`` ``CPU``
2002
2003    Examples:
2004        >>> import mindspore.numpy as np
2005        >>> input_x = np.array([1., 2., 3., 4.])
2006        >>> print(input_x.std())
2007        1.118034
2008    """
2009    x_var = var(x, axis, ddof, keepdims)
2010    return F.tensor_pow(x_var, 0.5)
2011
2012
2013def gather_elements(input, dim, index):
2014    r"""
2015    Gathers elements along an axis specified by dim.
2016
2017    Refer to :func:`mindspore.ops.gather_elements` for more detail.
2018    """
2019    return F.gather_elements(input, dim, index)
2020
2021
2022def sum(input, axis=None, dtype=None, keepdims=False, initial=None):  # pylint: disable=redefined-builtin
2023    """
2024    Return sum of array elements over a given axis.
2025
2026    Note:
2027        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and
2028        `extobj` are not supported.
2029
2030    Args:
2031        input (Union[int, float, bool, list, tuple, Tensor]): Elements to sum.
2032        axis (Union[None, int, tuple(int)]): Axis or axes along which a sum is performed. Default: ``None``.
2033            If None, sum all of the elements of the input array.
2034            If axis is negative it counts from the last to the first axis.
2035            If axis is a tuple of ints, a sum is performed on all of the axes specified in the tuple
2036            instead of a single axis or all the axes as before.
2037        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
2038            output Tensor.
2039        keepdims (bool): If this is set to True, the axes which are reduced are left in the result as
2040            dimensions with size one. With this option, the result will broadcast correctly against the input array.
2041            If the default value is passed, then keepdims will not be passed through to the sum method of
2042            sub-classes of ndarray, however any non-default value will be. If the sub-class method does not
2043            implement keepdims any exceptions will be raised.
2044        initial (scalar): Starting value for the sum.
2045
2046    Returns:
2047        Tensor. A tensor with the same shape as input, with the specified axis removed.
2048        If input tensor is a 0-d array, or if axis is None, a scalar is returned.
2049
2050    Raises:
2051        TypeError: If input is not array_like or `axis` is not int or tuple of ints or
2052            `keepdims` is not integer or `initial` is not scalar.
2053        ValueError: If any axis is out of range or duplicate axes exist.
2054
2055    Supported Platforms:
2056        ``Ascend`` ``GPU`` ``CPU``
2057
2058    Examples:
2059        >>> import mindspore.numpy as np
2060        >>> input_x = np.array([-1, 0, 1]).astype('int32')
2061        >>> print(input_x.sum())
2062        0
2063        >>> input_x = np.arange(10).reshape(2, 5).astype('float32')
2064        >>> print(input_x.sum(axis=1))
2065        [10. 35.]
2066    """
2067    if initial is not None and not isinstance(initial, (int, float, bool)):
2068        raise TypeError(f"For Tensor.sum, initial must be int, float or bool, but got {type(initial)}.")
2069    res = F.sum(input, axis, keepdims)
2070    if initial is not None:
2071        res += initial
2072    if dtype is not None:
2073        res = res.astype(dtype)
2074    return res
2075
2076
2077@_primexpr
2078def _check_sum_to_size(size, input_dim, shape_input):
2079    """Check the length of size of sum_to_size."""
2080    if len(size) > input_dim:
2081        raise ValueError(f"For sum_to_size, size {size} is not expandable to the tensor size {shape_input}.")
2082
2083
2084@_primexpr
2085def _count_axes(size, input_shape, shape_input):
2086    """Count the sum axes for sum_to_size."""
2087    axes = []
2088    for i in range(len(size)):
2089        element = size[i]
2090        if element != input_shape[i] and element == 1:
2091            axes.append(i)
2092        elif element != input_shape[i]:
2093            raise ValueError(f"For sum_to_size, size {size} is not expandable to the tensor size {shape_input}.")
2094    return axes
2095
2096
2097def sum_to_size(input, *size):
2098    """
2099    Sum `input` to the `size`. `size` must be expandable to the Tensor size.
2100    """
2101    if len(size) == 1 and isinstance(size[0], tuple):
2102        size = size[0]
2103    shape_input = input.shape
2104    _check_sum_to_size(size, input.ndim, shape_input)
2105    if len(size) < input.ndim:
2106        pre_axis = tuple(axis for axis in range(input.ndim - len(size)))
2107        input = input.sum(pre_axis)
2108
2109    axes = _count_axes(size, input.shape, shape_input)
2110    if axes:
2111        return input.sum(tuple(axes), keepdims=True)
2112    return input
2113
2114
2115def nansum(input, axis=None, keepdims=False, *, dtype=None):
2116    """
2117    Computes sum of all elements, treating NaNs as zero.
2118    """
2119    return F.nansum(input, axis=axis, keepdims=keepdims, dtype=dtype)
2120
2121
2122def nanmean(input, axis=None, keepdims=False, *, dtype=None):
2123    r"""
2124    Computes the mean of input tensor, ignoring NaN.
2125    """
2126    return F.nanmean(input, axis, keepdims, dtype=dtype)
2127
2128
2129def nanmedian(input, axis=-1, keepdims=False):
2130    r"""
2131    Computes the median and indices of input tensor, ignoring NaN.
2132    If all elements in the specified dimensions are NaN, the result will be NaN.
2133    """
2134    return F.nanmedian(input, axis, keepdims)
2135
2136
2137def repeat(x, repeats, axis=None):
2138    """
2139    Repeat elements of an array.
2140
2141    Args:
2142        x (Tensor): Input tensor.
2143        repeats (Union[int, tuple, list]): The number of repetitions for each element.
2144            `repeats` is broadcasted to fit the shape of the given axis.
2145        axis (int, optional): The axis along which to repeat values. By default,
2146            use the flattened input tensor, and return a flat output tensor.
2147
2148    Returns:
2149        Tensor, has the same shape as input tensor except along the given axis.
2150
2151    Raises:
2152        ValueError: if axis is out of range.
2153        TypeError: if input is not a Tensor.
2154
2155    Supported Platforms:
2156        ``Ascend`` ``GPU`` ``CPU``
2157
2158    Examples:
2159        >>> import mindspore.numpy as np
2160        >>> x = np.array(3)
2161        >>> print(x.repeat(4))
2162        [3 3 3 3]
2163        >>> x = np.array([[1,2],[3,4]])
2164        >>> print(x.repeat(2))
2165        [1 1 2 2 3 3 4 4]
2166        >>> print(x.repeat(3, axis=1))
2167        [[1 1 1 2 2 2]
2168        [3 3 3 4 4 4]]
2169        >>> print(x.repeat([1,2], axis=0))
2170        [[1 2]
2171        [3 4]
2172        [3 4]]
2173    """
2174    if not isinstance(repeats, (tuple, list)):
2175        repeats = (repeats,)
2176    for element in repeats:
2177        if not isinstance(element, int):
2178            const_utils.raise_type_error("Each element should be integer")
2179    if axis is None:
2180        x = ravel(x)
2181        axis = 0
2182    if not isinstance(axis, int):
2183        const_utils.raise_type_error('axes should be integers')
2184    check_axis_in_range(axis, x.ndim)
2185    axis = axis + x.ndim if axis < 0 else axis
2186
2187    if len(repeats) == 1:
2188        repeats = repeats[0]
2189        if repeats == 0:
2190            return empty_tensor(x.dtype)
2191        return repeat_elements(x, repeats, axis)
2192    size = x.shape[axis]
2193    if len(repeats) != size:
2194        const_utils.raise_value_error(
2195            'operands could not be broadcast together')
2196    subs = P.Split(axis, size)(x)
2197    repeated_subs = []
2198    for sub_item, rep in zip(subs, repeats):
2199        if rep != 0:
2200            repeated_subs.append(repeat_elements(sub_item, rep, axis))
2201    return P.Concat(axis)(repeated_subs)
2202
2203
2204def repeat_interleave(x, repeats, dim=None):
2205    """
2206    For details, please refer to :func:`mindspore.ops.repeat_interleave`.
2207    """
2208    return F.repeat_interleave(x, repeats, dim)
2209
2210
2211def hardshrink(x, lambd=0.5):
2212    r"""
2213    Apply the Hard Shrink function for a tensor. Calculates the output according to the input elements.
2214
2215    The formula is defined as follows:
2216
2217    .. math::
2218        \text{HardShrink}(x) =
2219        \begin{cases}
2220        x, & \text{ if } x > \lambda \\
2221        x, & \text{ if } x < -\lambda \\
2222        0, & \text{ otherwise }
2223        \end{cases}
2224
2225    Args:
2226        x (Tensor): Input tensor.
2227        lambd (float): The threshold :math:`\lambda` defined by the Hard Shrink formula. Default: 0.5.
2228
2229    Returns:
2230        Tensor, has the same shape and data type as input tensor.
2231
2232    Raises:
2233        TypeError: If `lambd` is not a float.
2234        TypeError: If dtype of the input tensor is neither float16 nor float32.
2235
2236    Supported Platforms:
2237        ``Ascend`` ``GPU`` ``CPU``
2238
2239    Examples:
2240        >>> import mindspore.numpy as np
2241        >>> x = np.array([[0.5,  1,  2.0], [0.0533, 0.0776, -2.1233]])
2242        >>> print(x.hardshrink())
2243        [[ 0.      1.      2.    ]
2244        [ 0.      0.     -2.1233]]
2245    """
2246    return P.HShrink(lambd)(x)
2247
2248
2249def heaviside(x, values):
2250    r"""
2251    For details, please refer to :func:`mindspore.ops.heaviside`.
2252    """
2253    return F.heaviside(x, values)
2254
2255
2256def hypot(x, other):
2257    r'''
2258    For details, please refer to :func:`mindspore.ops.hypot`.
2259    '''
2260    return F.hypot(x, other)
2261
2262
2263def softmax(input, axis, dtype=None):
2264    return F.softmax(input, axis, dtype=dtype)
2265
2266
2267def soft_shrink(input, lambd=0.5):
2268    """Apply the soft shrink function for a tensor. Calculates the output according to the input elements."""
2269    return F.soft_shrink(input, lambd)
2270
2271
2272def matrix_determinant(input):
2273    """Computes the determinant of one or more square matrices."""
2274    return F.matrix_determinant(input)
2275
2276
2277def log_matrix_determinant(input):
2278    """Computes the sign and the log of the absolute value of the determinant of one or more square matrices."""
2279    return F.log_matrix_determinant(input)
2280
2281
2282def getitem(data, index):
2283    """Implementation of `getitem`."""
2284    return data.__getitem__(index)
2285
2286
2287def setitem(data, index, value):
2288    """Implementation of `setitem`."""
2289    return data.__setitem__(index, value)
2290
2291
2292def item(data, *args):
2293    """Implementation of `item`."""
2294    return compile_utils.tensor_item(data, *args)
2295
2296
2297def itemset(data, *args):
2298    """Implementation of `itemset`."""
2299    return compile_utils.tensor_itemset(data, *args)
2300
2301
2302@constexpr
2303def cast_to_str(data):
2304    return str(data)
2305
2306
2307def str_func(*data):
2308    """Implementation of `str`."""
2309    data_len = len(data)
2310    if data_len >= 2:
2311        const_utils.raise_type_error("str() requires 0 or 1 arguments.")
2312    if data_len == 0:
2313        return ''
2314    data = data[0]
2315    if F.isconstant(data):
2316        return cast_to_str(data)
2317    return data.__str__()
2318
2319
2320@constexpr
2321def cast_to_bool(data):
2322    return bool(data)
2323
2324
2325def bool_func(*data):
2326    """Implementation of `bool`."""
2327    data_len = len(data)
2328    if data_len >= 2:
2329        const_utils.raise_type_error("bool() requires 0 or 1 arguments.")
2330    if data_len == 0:
2331        return False
2332    data = data[0]
2333    if isinstance(data, (Tensor, Tensor_)):
2334        tensor_shape = F.shape(data)
2335        tensor_shape_len = len(tensor_shape)
2336        if tensor_shape_len == 0 or (tensor_shape_len == 1 and tensor_shape[0] == 1):
2337            data = F.cast(data, mstype.bool_)
2338            return TensorToScalar()(data)
2339        raise ValueError("The truth value of an array with more than one element is ambiguous.")
2340    if not F.isconstant(data):
2341        if hasattr(data, "__bool__"):
2342            return data.__bool__()
2343        if hasattr(data, "__len__"):
2344            return len(data) != 0
2345        return F.scalar_cast(data, mstype.bool_)
2346    if isinstance(data, (CSRTensor, COOTensor, RowTensorInner)):
2347        raise TypeError("bool() does not support sparse tensor input.")
2348    return cast_to_bool(data)
2349
2350
2351@constexpr
2352def cast_to_int(*data):
2353    target = data[0]
2354    if isinstance(target, (Tensor, Tensor_)):
2355        target = Tensor(target, internal=True)
2356    if len(data) == 1:
2357        return int(target)
2358    return int(target, data[1])
2359
2360
2361def int_func(*data):
2362    """Implementation of `int`."""
2363    data_len = len(data)
2364    if data_len >= 3:
2365        const_utils.raise_type_error("int() requires 0, 1 or 2 arguments.")
2366    if data_len == 0:
2367        return 0
2368    target = data[0]
2369    base = 10
2370    if data_len == 2:
2371        base = data[1]
2372    if not F.isconstant(target):
2373        if base != 10:
2374            const_utils.raise_type_error("int() does not support non-constant input when 'base' is specified.")
2375        if isinstance(target, Tensor):
2376            tensor_shape = F.shape(target)
2377            tensor_shape_len = len(tensor_shape)
2378            if tensor_shape_len == 0 or (tensor_shape_len == 1 and tensor_shape[0] == 1):
2379                target = F.cast(target, mstype.int64)
2380                return TensorToScalar()(target)
2381            raise ValueError(f"Can not convert Tensor with more than one element to Scalar, "
2382                             f"while the data's shape is : {tensor_shape}")
2383        return F.scalar_cast(target, mstype.int64)
2384    if isinstance(target, (CSRTensor, COOTensor, RowTensorInner)):
2385        const_utils.raise_type_error(
2386            "int() does not support sparse tensor input.")
2387    return cast_to_int(*data)
2388
2389
2390@constexpr
2391def cast_to_float(data):
2392    if isinstance(data, (Tensor, Tensor_)):
2393        data = Tensor(data, internal=True)
2394    return float(data)
2395
2396
2397def float_func(*data):
2398    """Implementation of `float`."""
2399    data_len = len(data)
2400    if data_len >= 2:
2401        const_utils.raise_type_error("float() requires 0 or 1 arguments.")
2402    if data_len == 0:
2403        return 0.0
2404    data = data[0]
2405    if not F.isconstant(data):
2406        if isinstance(data, Tensor):
2407            tensor_shape = F.shape(data)
2408            tensor_shape_len = len(tensor_shape)
2409            if tensor_shape_len == 0 or (tensor_shape_len == 1 and tensor_shape[0] == 1):
2410                data = F.cast(data, mstype.float32)
2411                return TensorToScalar()(data)
2412            raise ValueError(f"Can not convert Tensor with more than one element to Scalar, "
2413                             f"while the data's shape is: {tensor_shape}")
2414        return F.scalar_cast(data, mstype.float32)
2415    if isinstance(data, (CSRTensor, COOTensor, RowTensorInner)):
2416        const_utils.raise_type_error(
2417            "float() does not support sparse tensor input.")
2418    return cast_to_float(data)
2419
2420
2421def list_func(data):
2422    """Implementation of `list`."""
2423    if isinstance(data, (CSRTensor, COOTensor, RowTensorInner)):
2424        raise TypeError(
2425            "list() does not support single sparse tensor input.")
2426    if isinstance(data, dict):
2427        data = data.keys()
2428    if isinstance(data, (tuple, list)) and F.is_sequence_shape_unknown(data):
2429        ret = mutable([], True)
2430        if F.is_dynamic_sequence_element_unknown(data):
2431            return ret
2432    else:
2433        ret = F.make_list()
2434    for i in data:
2435        ret = ret + F.make_list(i)
2436    return ret
2437
2438def tuple_func(data):
2439    """Implementation of `tuple`."""
2440    if isinstance(data, (CSRTensor, COOTensor, RowTensorInner)):
2441        raise TypeError("tuple() does not support single sparse tensor input.")
2442    if isinstance(data, dict):
2443        data = data.keys()
2444    if isinstance(data, (tuple, list)) and F.is_sequence_shape_unknown(data):
2445        ret = mutable((), True)
2446        if F.is_dynamic_sequence_element_unknown(data):
2447            return ret
2448    else:
2449        ret = F.make_tuple()
2450    for i in data:
2451        ret = ret + F.make_tuple(i)
2452    return ret
2453
2454
2455def ms_zip(*data):
2456    """Implementation of `tuple`."""
2457    x = ()
2458    for i in data:
2459        if isinstance(i, Tensor):
2460            if len(F.shape(i)) == 0:
2461                raise TypeError("Cannot iterate over a scalar tensor.")
2462            i = tuple(i)
2463        x = x + (i,)
2464    return composite.zip_operation(*x)
2465
2466
2467def max_tensor(*data):
2468    """Get the max of tensor inputs."""
2469    if len(data) == 1:
2470        data = data[0]
2471    max_tensor_data = data[0]
2472    for input_data in data:
2473        max_tensor_data = P.Maximum()(max_tensor_data, input_data)
2474    return max_tensor_data
2475
2476
2477def get_max_min_data_len(*data):
2478    """Get the real length of data."""
2479    len_data = 0
2480    if isinstance(data, tuple) and len(data) == 1 and isinstance(data[0], (dict, list, tuple)):
2481        data = data[0]
2482        if isinstance(data, dict):
2483            data = iter(data)
2484    if isinstance(data, (dict, list, tuple)):
2485        len_data = len(data)
2486    else:
2487        raise TypeError("max() or min() does not support the data type.")
2488    return len_data
2489
2490
2491def get_tensor_num(data):
2492    """Get the number of tensor in data."""
2493    tensor_num = 0
2494    for input_data in data:
2495        if isinstance(input_data, Tensor):
2496            tensor_shape = F.shape(input_data)
2497            tensor_shape_len = len(tensor_shape)
2498            if tensor_shape_len != 0 and not (tensor_shape_len == 1 and tensor_shape[0] == 1):
2499                raise ValueError("The truth value of an array with more than one element is ambiguous.")
2500            tensor_num = tensor_num + 1
2501    return tensor_num
2502
2503
2504def exist_tensor(data):
2505    """Check if tensor exist in sequence."""
2506    for input_data in data:
2507        if isinstance(input_data, Tensor):
2508            return True
2509        if isinstance(input_data, (list, tuple)):
2510            if exist_tensor(input_data):
2511                return True
2512    return False
2513
2514
2515def check_sequence_all_variable_scalar(x, str_info):
2516    """Check whether x can be used in SequenceMax and SequenceMin"""
2517    if F.is_sequence_shape_unknown(x):
2518        if F.is_dynamic_sequence_element_unknown(x):
2519            raise ValueError(str_info + "() arg is an empty sequence.")
2520        if not isinstance(x[0], (int, float)):
2521            raise ValueError(
2522                "When the input to " + str_info + "() is dynamic length sequence, only support scalar type input")
2523        return True
2524    contain_variable_scalar = False
2525    for i in x:
2526        if not isinstance(i, (int, float)):
2527            return False
2528        if not contain_variable_scalar and not F.isconstant(i):
2529            contain_variable_scalar = True
2530    return contain_variable_scalar
2531
2532
2533def ms_max_one_element(x):
2534    """Implementation of `max` which inputs has only one element."""
2535    if isinstance(x, Tensor):
2536        tensor_shape = F.shape(x)
2537        tensor_shape_len = len(tensor_shape)
2538        if tensor_shape_len == 0:
2539            raise TypeError("Cannot iterate over a scalar tensor.")
2540        if tensor_shape_len >= 2:
2541            raise ValueError("The truth value of an array with more than one element is ambiguous.")
2542        return x.max()
2543    # Deal with Tensor in tuple or list
2544    if isinstance(x, (list, tuple)):
2545        if check_sequence_all_variable_scalar(x, "max"):
2546            return SequenceMax()(x)
2547        if len(x) == 0:
2548            raise ValueError("max() arg is an empty sequence.")
2549        tensor_num = get_tensor_num(x)
2550        if F.isconstant(tensor_num) and F.isconstant(len(x)) and tensor_num == len(x):
2551            return max_tensor(x)
2552        if tensor_num != 0:
2553            return F._py_interpret("max(x)", {}, {"x": x})
2554        if exist_tensor(x):
2555            raise TypeError("max() cannot support tensor in list or tuple nested now.")
2556    if not isinstance(x, (int, float, bool)):
2557        return F._py_interpret("max(x)", {}, {"x": x})
2558    raise TypeError("The object is not iterable.")
2559
2560
2561def ms_max(*data):
2562    """Implementation of `max`."""
2563    len_data = get_max_min_data_len(data)
2564    if len_data <= 0: # pylint: disable=no-else-raise
2565        raise TypeError("max() requires 1 argument at least.")
2566    elif len_data == 1:
2567        x = data[0]
2568        return ms_max_one_element(x)
2569    elif len_data >= 2:
2570        tensor_num = get_tensor_num(data)
2571        # All inputs is Tensor
2572        if F.isconstant(tensor_num) and F.isconstant(len_data) and tensor_num == len_data:
2573            return max_tensor(*data)
2574        if tensor_num != 0:
2575            return F._py_interpret("max(data)", {}, {"data": data})
2576        # exist tensor in list/tuple
2577        if exist_tensor(data):
2578            raise ValueError("The truth value of an array with more than one element is ambiguous.")
2579    return F._py_interpret("max(data)", {}, {"data": data})
2580
2581
2582def min_tensor(*data):
2583    """Get the min of tensor inputs."""
2584    if len(data) == 1:
2585        data = data[0]
2586    min_tensor_data = data[0]
2587    for input_data in data:
2588        min_tensor_data = P.Minimum()(min_tensor_data, input_data)
2589    return min_tensor_data
2590
2591
2592def min_list_tuple(seq1, seq2):
2593    """Get the min of two sequence."""
2594    if len(seq1) == 0:
2595        return seq1
2596    if len(seq2) == 0:
2597        return seq2
2598    min_len = min(len(seq1), len(seq2))
2599    for i in min_len:
2600        if seq1[i] == seq2[i]:
2601            continue
2602        iter_min = ms_min([seq1[i], seq2[i]])
2603        if iter_min == seq1[i]:
2604            return seq1
2605        return seq2
2606    return seq1
2607
2608
2609def ms_min_one_element(x):
2610    """Implementation of `min` which inputs has only one element."""
2611    if isinstance(x, Tensor):
2612        tensor_shape = F.shape(x)
2613        tensor_shape_len = len(tensor_shape)
2614        if tensor_shape_len == 0:
2615            raise TypeError("Cannot iterate over a scalar tensor.")
2616        if tensor_shape_len >= 2:
2617            raise ValueError("The truth value of an array with more than one element is ambiguous.")
2618        return x.min()
2619    # Deal with Tensor in tuple or list
2620    if isinstance(x, (list, tuple)):
2621        if check_sequence_all_variable_scalar(x, "min"):
2622            return SequenceMin()(x)
2623        if len(x) == 0:
2624            raise ValueError("min() arg is an empty sequence.")
2625        tensor_num = get_tensor_num(x)
2626        if F.isconstant(tensor_num) and F.isconstant(len(x)) and tensor_num == len(x):
2627            return min_tensor(x)
2628        if tensor_num != 0:
2629            return F._py_interpret("min(x)", {}, {"x": x})
2630        if exist_tensor(x):
2631            raise TypeError("min() cannot support tensor in list or tuple nested now.")
2632    if not isinstance(x, (int, float, bool)):
2633        return F._py_interpret("min(x)", {}, {"x": x})
2634    raise TypeError("The object is not iterable.")
2635
2636
2637def ms_min(*data):
2638    """Implementation of `min`."""
2639    len_data = get_max_min_data_len(data)
2640    if len_data <= 0: # pylint: disable=no-else-raise
2641        raise TypeError("min() requires 1 argument at least.")
2642    elif len_data == 1:
2643        x = data[0]
2644        return ms_min_one_element(x)
2645    elif len_data >= 2:
2646        tensor_num = get_tensor_num(data)
2647        # All inputs is Tensor
2648        if F.isconstant(tensor_num) and F.isconstant(len_data) and tensor_num == len_data:
2649            return min_tensor(*data)
2650        if tensor_num != 0:
2651            return F._py_interpret("min(data)", {}, {"data": data})
2652        # exist tensor in list/tuple
2653        if exist_tensor(data):
2654            raise ValueError("The truth value of an array with more than one element is ambiguous.")
2655    return F._py_interpret("min(data)", {}, {"data": data})
2656
2657
2658def ms_sum(*data):
2659    """Implementation of `sum`."""
2660    len_data = len(data)
2661    if len_data <= 0 or len_data > 2:
2662        raise TypeError("sum() requires 1 or 2 arguments.")
2663    x = data[0]
2664    if isinstance(x, (int, float, bool)):
2665        data_type = F.typeof(x)
2666        raise TypeError(str(data_type) + " object is not iterable.")
2667    if isinstance(x, Tensor):
2668        tensor_shape = F.shape(x)
2669        if len(tensor_shape) == 0:
2670            raise TypeError("Cannot iterate over a scalar tensor.")
2671    if isinstance(x, dict):
2672        x = x.keys()
2673    result = 0
2674    if len_data == 2:
2675        result = data[1]
2676    if isinstance(x, Tensor):
2677        result += x.sum(0)
2678    else:
2679        for element in x:
2680            result += element
2681    return result
2682
2683
2684def ms_len(data):
2685    """Implementation of `len`."""
2686    return data.__len__()
2687
2688
2689def floor(x):
2690    """Rounds a tensor down to the closest integer element-wise."""
2691    return F.floor(x)
2692
2693
2694def floor_divide(input, other):
2695    r"""
2696    Divides the first input tensor by the second input tensor element-wise and round down to the closest integer.
2697    """
2698    return F.floor_divide(input, other)
2699
2700
2701def uadd(x):
2702    """Implementation of `uadd`."""
2703    return x.__pos__()
2704
2705
2706def usub(x):
2707    """Implementation of `usub`."""
2708    return x.__neg__()
2709
2710
2711def scalar_truediv(x, y):
2712    """Implementation of `scalar_truediv`."""
2713    return x.__truediv__(y)
2714
2715
2716def scalar_floordiv(x, y):
2717    """Implementation of `scalar_floordiv`."""
2718    return x.__floordiv__(y)
2719
2720
2721def bool_(x):
2722    """Implementation of `bool`."""
2723    return x.__bool__()
2724
2725
2726def check_len_(x):
2727    """Check length is not 0"""
2728    return x.__len__() != 0
2729
2730
2731def real_bool_(x):
2732    """bool function to get truth value"""
2733    return bool(x)
2734
2735
2736def enumerate_(x, start=0):
2737    """Enumerate list or tuple or tensor."""
2738    x_type = F.typeof(x)
2739    ret = ()
2740    op_name = "enumerate"
2741    if isinstance(x, (int, float, bool)):
2742        raise TypeError(f"For 'enumerate', the 'first input' should be tuple or list or tensor, but got {type(x)}.")
2743    if check_is_const_int(start, op_name, "start"):
2744        if check_is_tensor(x_type):
2745            for i in range(x.shape[0]):
2746                ret += ((start + i, x[i]),)
2747        else:
2748            ret = zip(range(start, start + len(x)), x)
2749    return ret
2750
2751
2752def expand_tensor_as(x, y):
2753    """Expand tensor"""
2754    return F.broadcast_to(x, shape_(y))
2755
2756
2757def broadcast_to(x, shape):
2758    """Broadcasts tensor to a given shape."""
2759    return F.broadcast_to(x, shape)
2760
2761
2762def expand_dims(x, axis):
2763    """
2764    Insert a dimension of shape 1 at the specified axis of Tensor.
2765    """
2766    validator.check_is_int(axis, 'axis')
2767    return P.ExpandDims()(x, axis)
2768
2769
2770def unsqueeze(input, dim):
2771    """For details, please refer to :func:`mindspore.ops.unsqueeze`."""
2772    return P.ExpandDims()(input, dim)
2773
2774
2775def masked_fill(x, mask, value):
2776    """
2777    Fills elements of Tensor with value where mask is True.
2778    """
2779    check_is_tensor(mask)
2780    check_type_name('mask', mask.dtype, [mstype.bool_], "Tensor")
2781    return F.masked_fill(x, mask, value)
2782
2783
2784def col2im(*inputs):
2785    """
2786    inputs: input_x, output_size, kernel_size, dilation, padding_value, stride
2787    Combines an array of sliding local blocks into a large containing tensor.
2788    """
2789    return F.col2im(*inputs)
2790
2791
2792def narrow(input, axis, start, length):
2793    """
2794    Returns a narrowed tensor from input tensor.
2795    The dimension axis is input from start to start + length.
2796    """
2797    return F.narrow(input, axis, start, length)
2798
2799
2800def to_csr(x):
2801    """
2802    Convert a Tensor to CSRTensor.
2803    Please refer to tensor.py Tensor::to_csr(self) for more details.
2804    """
2805    return F.dense_to_sparse_csr(x)
2806
2807
2808def to_coo(x):
2809    """
2810    Convert a Tensor to COOTensor.
2811    Please refer to tensor.py Tensor::to_coo(self) for more details.
2812    """
2813    return F.dense_to_sparse_coo(x)
2814
2815
2816def tolist(x):
2817    """
2818    Convert a Tensor to List, if the input is Tensor scalar, Python scalar will be returned.
2819    """
2820    return x.asnumpy().tolist()
2821
2822
2823@constexpr
2824def check_select_condition(cond_type):
2825    """
2826    Check select input condition.
2827    """
2828    if isinstance(cond_type, mstype.TensorType):
2829        return
2830    raise TypeError(
2831        f"For select, the argument condition should be Tensor, but got {cond_type}.")
2832
2833
2834@constexpr
2835def check_select_input(y, x_dtype):
2836    """
2837    Check select input x and y.
2838    """
2839    if not isinstance(y, (int, float)):
2840        raise TypeError(f"For 'Tensor.select', the argument 'y' should be Tensor, int or float,"
2841                        f" but got {type(y)}.")
2842    if isinstance(y, int) and x_dtype != mstype.int32:
2843        raise TypeError(f"For 'Tensor.select', if the argument 'y' is int,"
2844                        f" then the tensor type should be int32 but got {x_dtype}")
2845    if isinstance(y, float) and x_dtype != mstype.float32:
2846        raise TypeError(f"For 'Tensor.select', if the argument 'y' is float,"
2847                        f" then the tensor type should be float32 but got {x_dtype}")
2848
2849
2850def select(x, condition, y):
2851    """Returns the selected elements for tensor 'x' and input 'y' according to input 'condition'"""
2852    check_select_condition(F.typeof(condition))
2853    if not isinstance(y, Tensor):
2854        check_select_input(y, x.dtype)
2855    input_y = y
2856    if isinstance(y, (int, float)):
2857        input_y = F.zeros_like(x) + y
2858        if isinstance(y, int):
2859            input_y = F.cast(input_y, mstype.int32)
2860        else:
2861            input_y = F.cast(input_y, mstype.float32)
2862    return F.select(condition, x, input_y)
2863
2864
2865def view(x, *shape):
2866    """Reshape tensor, if shape is -1, reshape tensor into one dimension"""
2867    shape = check_view_shape(shape)
2868    return F.reshape(x, shape)
2869
2870
2871def view_as(input, other):
2872    """View self Tensor as the same shape as `other` ."""
2873    if not isinstance(other, (Tensor, Tensor_)):
2874        raise TypeError(f"For view_as, the input other must be a Tensor, but got {type(other)}")
2875    return F.reshape(input, other.shape)
2876
2877
2878def bitwise_and(x, y):
2879    """Returns bitwise `and` of two tensors element-wise."""
2880    return F.bitwise_and(x, y)
2881
2882
2883def bitwise_or(x, y):
2884    """Returns bitwise `or` of two tensors element-wise."""
2885    return F.bitwise_or(x, y)
2886
2887
2888def bitwise_xor(x, y):
2889    """Returns bitwise `xor` of two tensors element-wise."""
2890    return F.bitwise_xor(x, y)
2891
2892
2893def bitwise_left_shift(x, y):
2894    """Returns bitwise left shift of `x` by `other` bits."""
2895    return F.bitwise_left_shift(x, y)
2896
2897
2898def bitwise_right_shift(x, y):
2899    """Returns bitwise right shift of `x` by `other` bits."""
2900    return F.bitwise_right_shift(x, y)
2901
2902
2903def exp(x):
2904    """Returns exponential of a tensor element-wise."""
2905    return F.exp(x)
2906
2907
2908def real(x):
2909    r"""
2910    For details, please refer to :func:`mindspore.ops.real`.
2911    """
2912    return F.real(x)
2913
2914
2915def rsqrt(x):
2916    r"""
2917    For details, please refer to :func:`mindspore.ops.rsqrt`.
2918    """
2919    return F.rsqrt(x)
2920
2921
2922def reciprocal(x):
2923    r"""
2924    For details, please refer to :func:`mindspore.ops.reciprocal`.
2925    """
2926    return F.reciprocal(x)
2927
2928
2929def sqrt(x):
2930    """Returns sqrt of a tensor element-wise."""
2931    return F.sqrt(x)
2932
2933
2934def square(x):
2935    """Returns square of a tensor element-wise."""
2936    return F.square(x)
2937
2938
2939def sub(x, y):
2940    """Returns sub of a tensor element-wise."""
2941    return F.sub(x, y)
2942
2943
2944def t(input):
2945    """Transposes a 2-D tensor."""
2946    return F.t(input)
2947
2948
2949def tan(x):
2950    """Returns tangent of `x`."""
2951    return F.tan(x)
2952
2953
2954def tanh(x):
2955    """Returns hyperbolic tangent of `x`."""
2956    return F.tanh(x)
2957
2958
2959def cosh(x):
2960    """
2961    Computes hyperbolic cosine of `x` element-wise.
2962    """
2963    return F.cosh(x)
2964
2965
2966def ger(input, vec2):
2967    """Ger product of `input` and `vec2`."""
2968    return F.ger(input, vec2)
2969
2970
2971def gt(x, y):
2972    """Compare the value of the input parameters :math:`x > y` element-wise."""
2973    return F.gt(x, y)
2974
2975
2976def ge(x, y):
2977    """Compare the value of the input parameters :math:`x >= y` element-wise."""
2978    return F.ge(x, y)
2979
2980
2981def tensor_scatter_add(x, indices, updates):
2982    """
2983    Creates a new tensor by adding the values from the positions in `x` indicated by
2984    `indices`, with values from `updates`. When multiple values are given for the same
2985    index, the updated result will be the sum of all values.
2986    """
2987    return F.tensor_scatter_add(x, indices, updates)
2988
2989
2990def tensor_scatter_sub(x, indices, updates):
2991    """
2992    Creates a new tensor by subtracting the values from the positions in `x` indicated by
2993    `indices`, with values from `updates`. When multiple values are given for the same
2994    index, the updated result will be the sum of all values.
2995    """
2996    return F.tensor_scatter_sub(x, indices, updates)
2997
2998
2999def tensor_scatter_mul(input_x, indices, updates):
3000    """
3001    Create a new tensor by multiplying the values from the positions in `input_x` indicated by
3002    `indices`, with values from `updates`. When multiple value are given for the same index,
3003    the output result will be the division of values.
3004    """
3005    return F.tensor_sactter_mul(input_x, indices, updates)
3006
3007
3008def tensor_sactter_div(input_x, indices, updates):
3009    """
3010    Create a new tensor by division the values from the positions in `input_x` indicated by
3011    `indices`, with values from `updates`. When multiple value are given for the same index,
3012    the output result will be the division of values.
3013    """
3014    return F.tensor_scatter_div(input_x, indices, updates)
3015
3016
3017def tensor_scatter_max(x, indices, updates):
3018    """
3019    By comparing the value at the position indicated by `indices` in `x` with the value in the `updates`,
3020    the value at the index will eventually be equal to the largest one to create a new tensor.
3021    """
3022    return F.tensor_scatter_max(x, indices, updates)
3023
3024
3025def tensor_scatter_min(x, indices, updates):
3026    """
3027    By comparing the value at the position indicated by `indices` in `x` with the value in the `updates`,
3028    the value at the index will eventually be equal to the smallest one to create a new tensor.
3029    """
3030    return F.tensor_scatter_min(x, indices, updates)
3031
3032
3033def unsorted_segment_min(x, segment_ids, num_segments):
3034    """Apply the unsorted segment min function for a tensor. Calculates the output according to the input elements."""
3035    return F.unsorted_segment_min(x, segment_ids, num_segments)
3036
3037
3038def unsorted_segment_max(x, segment_ids, num_segments):
3039    """Apply the unsorted segment max function for a tensor. Calculates the output according to the input elements."""
3040    return F.unsorted_segment_max(x, segment_ids, num_segments)
3041
3042
3043def unsorted_segment_prod(x, segment_ids, num_segments):
3044    """Apply the unsorted segment prod function for a tensor. Calculates the output according to the input elements."""
3045    return F.unsorted_segment_prod(x, segment_ids, num_segments)
3046
3047
3048def negative(input):
3049    r"""
3050    Return a new tensor with the negative of the elements of input.
3051    """
3052    return F.neg(input)
3053
3054
3055def nonzero(input, as_tuple=False):
3056    """
3057    Return a Tensor of the positions of all non-zero values.
3058    """
3059    return F.nonzero(input, as_tuple)
3060
3061
3062def new_zeros(x, size, *, dtype=None):
3063    r"""
3064    Return a tensor of `size` filled with zeros. By default, the returned tensor has the same dtype as `x`.
3065    """
3066    _dtype = x.dtype if dtype is None else dtype
3067    return F.zeros(size, dtype=_dtype)
3068
3069
3070def new_ones(x, size, *, dtype=None):
3071    r"""
3072    Return a tensor of `size` filled with ones. By default, the returned tensor has the same dtype as `x`.
3073    """
3074    _dtype = x.dtype if dtype is None else dtype
3075    return F.ones(size, dtype=_dtype)
3076
3077
3078def diag(x):
3079    """
3080    Constructs a diagonal tensor with a given diagonal values.
3081    """
3082    return F.diag(x)
3083
3084
3085def diagflat(input, offset=0):
3086    """
3087    Creates a two-dimensional Tensor with the flattened input as a diagonal.
3088    """
3089    return F.diagflat(input, offset)
3090
3091
3092def masked_select(input, mask):
3093    """
3094    Returns a new 1-D Tensor which indexes the input tensor according to the boolean mask.
3095    """
3096    return F.masked_select(input, mask)
3097
3098
3099def inplace_update(x, v, indices):
3100    """
3101    Update specified rows of x with values in v according to indices.
3102    """
3103    return F.inplace_update(x, v, indices)
3104
3105
3106def coo_to_csr(x):
3107    """convert coo to csr."""
3108    row_indices = x.indices[:, 0]
3109    col_indices = x.indices[:, 1]
3110    idx_dtype = x.indices.dtype
3111    row_indices, sort_idx = F.sort(row_indices.astype(mstype.float32))
3112    row_indices = row_indices.astype(idx_dtype)
3113    col_indices = col_indices[sort_idx]
3114    values = x.values[sort_idx]
3115    indptr = F.coo2csr(row_indices, x.shape[0])
3116    return CSRTensor(indptr, col_indices, values, x.shape)
3117
3118
3119def coo_to_dense(x):
3120    """convert coo to dense."""
3121    zeros_tensor = F.zeros(x.shape, dtype=x.values.dtype)
3122    return F.tensor_scatter_update(zeros_tensor, x.indices, x.values)
3123
3124
3125def coo_coalesce(x):
3126    """Returns the coalesced sparse tensor of the input."""
3127    shape = const_utils.make_tensor(x.shape)
3128    res_indices, res_values, _ = P.Coalesce()(
3129        x.indices.transpose(), x.values, shape)
3130    return COOTensor(res_indices.transpose(), res_values, x.shape)
3131
3132
3133def csr_to_coo(x):
3134    """convert csr to coo."""
3135    if x.ndim != 2:
3136        const_utils.raise_value_error(
3137            "Currently only support 2-D CSRTensor when converting to COOTensor.")
3138    row_indices = F.csr2coo(x.indptr, x.values.shape[0])
3139    coo_indices = P.Stack(1)((row_indices, x.indices))
3140    return COOTensor(coo_indices, x.values, x.shape)
3141
3142
3143def csr_to_dense(x):
3144    """convert csr to dense."""
3145    return F.csr_to_dense(x)
3146
3147
3148def random_categorical(x, num_sample, seed=0, dtype=mstype.int64):
3149    r"""
3150    Generates random samples from a given categorical distribution tensor.
3151    Refer to :func:`mindspore.ops.random_categorical` for more detail.
3152    """
3153    validator.check_is_int(num_sample, 'num_sample')
3154    validator.check_is_int(seed, 'seed')
3155    return F.random_categorical(x, num_sample, seed, dtype)
3156
3157
3158@constexpr
3159def empty_tensor(dtype):
3160    """Return empty tensor"""
3161    return Tensor_([], dtype)
3162
3163
3164@constexpr
3165def get_itemsize(x_type):
3166    """get itemsize from tensor's dtype."""
3167    return itemsize_map[x_type]
3168
3169
3170@constexpr(check=False)
3171def check_is_tensor(x):
3172    """check whether x is tensor."""
3173    if isinstance(x, mstype.TensorType):
3174        return True
3175    return False
3176
3177
3178def check_is_const_int(x, op_name, arg_name):
3179    """check whether x is const int."""
3180    if x is None:
3181        raise TypeError(
3182            f"For '{op_name}', the '{arg_name}' should be a const int number, but got not const.")
3183    if not isinstance(x, int):
3184        raise TypeError(
3185            f"For '{op_name}', the '{arg_name}' should be a const int number, but got {x}.")
3186    return True
3187
3188
3189@constexpr
3190def const_tensor_to_bool(x):
3191    """convert bool tensor to bool condition
3192        def const_tensor_to_bool(x):
3193        convert bool tensor to bool condition
3194        if x.shape == (1,):
3195            return bool(x[0])
3196        return bool(x)
3197    """
3198    if x is None:
3199        raise ValueError("Only tensor which shape is () or (1,) can be converted to bool, but got None")
3200    x = x.asnumpy()
3201    if x.shape == ():
3202        return bool(x)
3203    if x.shape == (1,):
3204        return bool(x[0])
3205    raise ValueError(
3206        f"Only tensor which shape is () or (1,) can be converted to bool, but got tensor shape is {x.shape}")
3207
3208
3209@_primexpr
3210def check_view_shape(x):
3211    """Check view function input shape"""
3212    if not x:
3213        raise ValueError("The shape variable should not be empty")
3214    if isinstance(x[0], tuple):
3215        if len(x) != 1:
3216            raise ValueError(f"Only one tuple is needed, but got {x}")
3217        x = x[0]
3218    return x
3219
3220
3221check_astype_dtype_const = constexpr(validator.check_astype_dtype)
3222max_ = constexpr(validator.max_)
3223min_ = constexpr(validator.min_)
3224expanded_shape = validator.expanded_shape
3225tuple_slice = validator.tuple_slice
3226check_type_support = constexpr(validator.check_type_support)
3227check_type_name = constexpr(validator.check_type_name)
3228check_value_type = constexpr(validator.check_value_type)
3229check_is_int = constexpr(validator.check_is_int)
3230check_bool_type = constexpr(validator.check_bool)
3231check_is_int = constexpr(validator.check_is_int)
3232check_bool = constexpr(validator.check_bool)
3233
3234
3235@constexpr
3236def empty_compile(dtype, shape):
3237    """Returns an empty Tensor."""
3238    return Tensor_(dtype, shape)
3239
3240
3241def tensor_bool(x):
3242    """tensor as condition, if is constant, return immediate bool value"""
3243    is_cond = F.is_tensor_bool_cond(x)
3244    if is_cond and F.isconstant(x):
3245        return const_tensor_to_bool(x)
3246    return F.cast(x, mstype.bool_)
3247
3248
3249def and_(x, y):
3250    """Implementation of `and` (`&`)."""
3251    return x.__and__(y)
3252
3253
3254def or_(x, y):
3255    """Implementation of `or` (`|`)."""
3256    return x.__or__(y)
3257
3258
3259def matmul(x, y):
3260    """Implementation of `matmul` (`@`)."""
3261    return F.matmul(x, y)
3262
3263
3264def inner(x, other):
3265    """Computes the inner product of 2 tensors."""
3266    return F.inner(x, other)
3267
3268
3269def float_bool(x):
3270    """Implementation of `float_bool`."""
3271    return x != 0.0
3272
3273
3274def xdivy(x, y):
3275    r"""
3276    Divides the first input tensor by the second input tensor element-wise. Returns zero when `x` is zero.
3277    """
3278    return F.xdivy(x, y)
3279
3280
3281def int_bool(x):
3282    """Implementation of `int_bool`."""
3283    return x != 0
3284
3285
3286def str_bool(x):
3287    """Implementation of `str_bool`."""
3288    if x == "":
3289        return False
3290    return True
3291
3292
3293def matrix_power(input, n):
3294    """
3295    Raises a square matrix to the (integer) power `n` .
3296    """
3297    return F.matrix_power(input, n)
3298
3299
3300def log1p(x):
3301    r"""
3302    Returns the natural logarithm of one plus the input tensor element-wise.
3303    Refer to :func:`mindspore.ops.log1p` for more detail.
3304    """
3305    return F.log1p(x)
3306
3307
3308def logit(x, eps=None):
3309    r"""
3310    Calculate the logit of a tensor element-wise. When eps is not None, element in 'x' is clamped to [eps, 1-eps].
3311    When eps is None, input 'x' is not clamped.
3312
3313    `x` refer to self tensor.
3314
3315    .. math::
3316        \begin{align}
3317        y_{i} & = \ln(\frac{z_{i}}{1 - z_{i}}) \\
3318        z_{i} & = \begin{cases}
3319        x_{i} & \text{if eps is None} \\
3320        \text{eps} & \text{if } x_{i} \lt \text{eps} \\
3321        x_{i} & \text{if } \text{eps} \leq x_{i} \leq 1 - \text{eps} \\
3322        1 - \text{eps} & \text{if } x_{i} \gt 1 - \text{eps}
3323        \end{cases}
3324        \end{align}
3325    """
3326
3327    if eps is None:
3328        eps = -1.0
3329    check_value_type('eps', eps, (float,), 'Tensor.logit')
3330    return F.logit(x, eps)
3331
3332
3333def logdet(x):
3334    """Returns the log determinant of one or batches of square matrices."""
3335    return F.logdet(x)
3336
3337
3338def lerp(start, end, weight):
3339    """Does a linear interpolation of two tensors start and end based on a float or tensor weight."""
3340    return F.lerp(start, end, weight)
3341
3342
3343# pylint: disable=redefined-builtin
3344def norm(A, ord=None, dim=None, keepdim=False, *, dtype=None):
3345    """Returns the matrix norm or vector norm of a given tensor."""
3346    return F.norm(A, ord, dim, keepdim, dtype=dtype)
3347
3348
3349def renorm(input_x, p, dim, maxnorm):
3350    """
3351    Renormalizes the sub-tensors along dimension `dim`, and each sub-tensor's p-norm should not exceed the
3352    'maxnorm'. The values of current sub-tensor don't need change if the p-norm of the sub-tensor is less than
3353    `maxnorm`. Otherwise the sub-tensor needs to be modified to the original value of the corresponding position
3354    divided by the p-norm of the substensor and then multiplied by `maxnorm`.
3355    """
3356    return F.renorm(input_x, p, dim, maxnorm)
3357
3358
3359def sequence_index(sequence, target, start=None, end=None):
3360    """Implementation of `tuple_index`."""
3361    if start is None:
3362        start = 0
3363    if end is None:
3364        end = len(sequence)
3365    return SequenceIndex()(sequence, target, start, end)
3366
3367
3368def none_bool(x):
3369    """Implementation of `none_bool`."""
3370    return False
3371
3372
3373def func_bool(x):
3374    """Implementation of `func_bool`."""
3375    return True
3376
3377
3378def float_floordiv(x, y):
3379    """Implementation of `float_floordiv`."""
3380    return floor(x / y)
3381
3382
3383def ceil(x):
3384    """
3385    Rounds a tensor up to the closest integer element-wise.
3386    """
3387    return F.ceil(x)
3388
3389
3390def top_k(input_x, k, sorted=True):
3391    """
3392    `Tensor.top_k` is deprecated, please use `Tensor.topk` instead.
3393    """
3394    check_is_int(k, 'k')
3395    check_bool(sorted, 'sorted')
3396    return F.top_k(input_x, k, sorted)
3397
3398
3399def topk(input_x, k, dim=None, largest=True, sorted=True):
3400    r"""
3401    For details, please refer to :func:`mindspore.ops.topk`.
3402    """
3403    check_is_int(k, 'k')
3404    check_bool_type(sorted, 'sorted')
3405    return F.topk(input_x, k, dim, largest=largest, sorted=sorted)
3406
3407
3408def subtract(x, other, *, alpha=1):
3409    r"""
3410    Computes the element-wise subtraction of input tensors.
3411    """
3412    return F.sub(x, other * alpha)
3413
3414
3415def true_divide(divident, divisor):
3416    r"""
3417    Computes the element-wise division of input tensors.
3418    """
3419    return F.div(divident, divisor, rounding_mode=None)
3420
3421
3422# pylint: disable=redefined-outer-name
3423def triu(input, diagonal=0):
3424    r"""
3425    Returns the triangular matrix based on the diagonal.
3426    """
3427    return F.triu(input, diagonal)
3428
3429
3430#############
3431# Iteration #
3432#############
3433
3434
3435def ms_hasnext(xs):
3436    """Whether the input has next element"""
3437    return len(xs) > 0
3438
3439
3440def ms_next(xs):
3441    """Get next element and res elements"""
3442    return xs[0], xs[1:]
3443
3444
3445def dict_next(xs):
3446    """Next array."""
3447    keys = xs.keys()
3448    new_keys = F.make_list()
3449    new_values = F.make_list()
3450    for i in range(1, len(keys)):
3451        new_keys.append(keys[i])
3452        new_values.append(xs[keys[i]])
3453    return keys[0], F.make_dict(new_keys, new_values)
3454
3455
3456def list_append(self_, list_item):
3457    """Append into list"""
3458    if F.is_sequence_shape_unknown(self_):
3459        return ListAppend()(self_, list_item)
3460    return _append(self_, list_item)
3461
3462
3463def list_insert(self_, index, obj):
3464    """Insert into list"""
3465    if F.is_sequence_shape_unknown(self_) or not F.isconstant(index):
3466        return ListInsert()(self_, index, obj)
3467    return _insert(self_, index, obj)
3468
3469
3470def list_pop(self_, index=-1):
3471    """Pop from list"""
3472    self_, pop_val = _pop(self_, index)
3473    return self_, pop_val
3474
3475
3476def list_clear(self_):
3477    """Clear the list"""
3478    return _list_clear(self_)
3479
3480
3481def list_reverse(self_):
3482    """Reverse the obj in list"""
3483    return _reverse(self_)
3484
3485
3486def list_extend(self_, obj):
3487    """Append obj to list"""
3488    return _extend(self_, obj)
3489
3490
3491def dict_get(self_, key_index, default_value=None):
3492    """Get value by key from dict"""
3493    if not _haskey(self_, key_index):
3494        return default_value
3495    return F.dict_getitem(self_, key_index)
3496
3497
3498def dict_setitem(self_, key, target):
3499    """Dictionary setitem"""
3500    return _dict_setitem(self_, key, target)
3501
3502
3503def dict_clear(self_):
3504    """Clear the dict"""
3505    return _dict_clear(self_)
3506
3507
3508def dict_haskey(self_, key_index):
3509    """Check if key is in dict"""
3510    return _haskey(self_, key_index)
3511
3512
3513def dict_update(self_, dict_obj):
3514    """Update the dict"""
3515    return _update(self_, dict_obj)
3516
3517
3518def dict_fromkeys(self_, seq, value=None):
3519    """Check if key is in dict"""
3520    return _fromkeys(self_, seq, value)
3521
3522
3523#################
3524# Array methods #
3525#################
3526
3527
3528def filter_(fun, iter_):
3529    """Support the use of built-in function filter."""
3530    result = []
3531    for elem in iter_:
3532        if fun(elem):
3533            result.append(elem)
3534    return result
3535
3536
3537##################
3538# Sparse methods #
3539##################
3540
3541
3542def csr_softmax(logits, dtype):
3543    """Implementation of `sum` for CSRTensor."""
3544    return F.sparse_matrix_softmax(logits, dtype)
3545
3546
3547def csr_add(a, b, alpha, beta):
3548    """Implementation of "csr_add" for CSRTensor."""
3549    return F.csr_add(a, b, alpha, beta)
3550
3551
3552def csr_astype(x, dtype):
3553    """Implementation of `astype` for CSRTensor."""
3554    data = x.values.astype(dtype)
3555    return F.make_csr_tensor(x.indptr, x.indices, data, x.shape)
3556
3557
3558def csr_sum(x, axis):
3559    """Implementation of `sum` for CSRTensor."""
3560    return F.csr_reduce_sum(x, axis)
3561
3562
3563def csr_abs(x):
3564    """Implementation of `abs` for CSRTensor."""
3565    data = F.absolute(x.values)
3566    return F.make_csr_tensor(x.indptr, x.indices, data, x.shape)
3567
3568
3569def csr_mv(x, dense_vector):
3570    """Implementation of `mv` for CSRTensor."""
3571    return F.csr_mv(x, dense_vector)
3572
3573
3574def csr_mm(x, matrix):
3575    """Implementation of `mm` for CSRTensor."""
3576    if isinstance(matrix, CSRTensor):
3577        return F.csr_mm(x, matrix)
3578    return _csr_mm(x.indptr, x.indices, x.values, x.shape, matrix)
3579
3580
3581def csr_to_tuple(x):
3582    """Implementation of `to_tuple` for CSRTensor."""
3583    res = (x.indptr, x.indices, x.values, x.shape)
3584    return res
3585
3586
3587def coo_astype(x, dtype):
3588    """Implementation of `astype` for COOTensor."""
3589    data = x.values.astype(dtype)
3590    return F.make_coo_tensor(x.indices, data, x.shape)
3591
3592
3593def coo_to_tuple(x):
3594    """Implementation of `to_tuple` for COOTensor."""
3595    return x.indices, x.values, x.shape
3596
3597
3598def coo_abs(x):
3599    """Implementation of `abs` for COOTensor."""
3600    data = F.absolute(x.values)
3601    return F.make_coo_tensor(x.indices, data, x.shape)
3602
3603
3604def coo_add(x, y, thresh):
3605    """Implementation of `add` for COOTensor."""
3606    return F.coo_add(x, y, thresh)
3607
3608
3609################
3610# Sparse Attrs #
3611################
3612
3613
3614def sparse_size_(x):
3615    """
3616    Return the size of SparseTensor.values. That is the number of non-zero values in SparseTensor.
3617    """
3618    return size_(x.values)
3619
3620
3621def sparse_ndim_(x):
3622    """
3623    Return the ndim of SparseTensor, according to its dense shape.
3624    """
3625    return F.tuple_len(x.shape)
3626
3627
3628def bernoulli(input, p=0.5, seed=None):
3629    """
3630    Randomly draws binary numbers from a Bernoulli distribution.
3631    """
3632    return F.bernoulli(input, p, seed)
3633
3634
3635def gather_nd(input_x, indices):
3636    r"""
3637    Gathers slices from a tensor by indices.
3638    Refer to :func:`mindspore.ops.gather_nd` for more detail.
3639    """
3640    return F.gather_nd(input_x, indices)
3641
3642
3643def gather(input_x, input_indices, axis, batch_dims=0):
3644    r"""
3645    Returns the slice of the input tensor corresponding to the elements of `input_indices` on the specified `axis`.
3646    Refer to :func:`mindspore.ops.gather` for more detail.
3647    """
3648    return F.gather(input_x, input_indices, axis, batch_dims)
3649
3650
3651def split(tensor, split_size_or_sections, axis=0):
3652    """
3653    Splits the Tensor into chunks along the given axis.
3654    Refer to :func:`mindspore.ops.split` for more detail.
3655    """
3656    return F.split(tensor, split_size_or_sections, axis)
3657
3658
3659def tensor_split(input, indices_or_sections, axis=0):
3660    """
3661    Splits a tensor into multiple sub-tensors along the given axis.
3662    Refer to :func:`mindspore.ops.tensor_split` for more detail.
3663    """
3664    return F.tensor_split(input, indices_or_sections, axis=axis)
3665
3666
3667def vsplit(input, indices_or_sections):
3668    """
3669    Splits a tensor into multiple sub-tensors vertically. It is equivalent to `ops.tensor_split` with :math:`axis=0` .
3670    Refer to :func:`mindspore.ops.vsplit` for more detail.
3671    """
3672    return F.vsplit(input, indices_or_sections)
3673
3674
3675def hsplit(input, indices_or_sections):
3676    """
3677    Splits a tensor into multiple sub-tensors horizontally. It is equivalent to `ops.tensor_split` with :math:`axis=1` .
3678    Refer to :func:`mindspore.ops.hsplit` for more detail.
3679    """
3680    return F.hsplit(input, indices_or_sections)
3681
3682
3683def dsplit(input, indices_or_sections):
3684    """
3685    Splits a tensor into multiple sub-tensors along the 3rd axis.
3686    It is equivalent to `ops.tensor_split` with :math:`axis=2` .
3687    Refer to :func:`mindspore.ops.tensor_split` for more detail.
3688    """
3689    return F.dsplit(input, indices_or_sections)
3690
3691
3692def xlogy(x, y):
3693    r"""
3694    Computes the first input tensor multiplied by the logarithm of second input tensor element-wise.
3695    Refer to :func:`mindspore.ops.xlogy` for more details.
3696    """
3697    return F.xlogy(x, y)
3698
3699
3700def eigvals(x):
3701    r"""
3702    Computes the eigenvalues of a square matrix(batch square matrices).
3703    Refer to :func:`mindspore.ops.eigvals` for more detail.
3704    """
3705    return F.eigvals(x)
3706
3707
3708def erf(x):
3709    r"""
3710    Computes the Gauss error function of `x` element-wise.
3711    Refer to :func:`mindspore.ops.erf` for more detail.
3712    """
3713    return F.erf(x)
3714
3715
3716def erfc(x):
3717    r"""
3718    Computes the complementary error function of `x` element-wise.
3719    Refer to :func:`mindspore.ops.erfc` for more details.
3720    """
3721    return F.erfc(x)
3722
3723
3724def isfinite(x):
3725    r"""
3726    Determines which elements are finite for each position.
3727    Refer to :func:`mindspore.ops.isfinite` for more details.
3728    """
3729    return F.isfinite(x)
3730
3731
3732def sin(x):
3733    r"""
3734    For details, please refer to :func:`mindspore.ops.sin`.
3735    """
3736    return F.sin(x)
3737
3738
3739def sinc(x):
3740    r"""
3741    For details, please refer to :func:`mindspore.ops.sinc`.
3742    """
3743    return F.sinc(x)
3744
3745
3746def cos(x):
3747    r"""
3748    Computes cosine of input element-wise.
3749    """
3750    return F.cos(x)
3751
3752
3753def count_nonzero(x, axis=(), keep_dims=False, dtype=mstype.int32):
3754    r"""
3755    For details, please refer to :func:`mindspore.ops.count_nonzero`.
3756    """
3757    return F.count_nonzero(x, axis, keep_dims, dtype)
3758
3759
3760def cov(x, *, correction=1, fweights=None, aweights=None):
3761    r"""
3762    For details, please refer to :func:`mindspore.ops.cov`.
3763    """
3764    return F.cov(x, correction=correction, fweights=fweights, aweights=aweights)
3765
3766
3767def acos(x):
3768    r"""
3769    Computes arccosine of input tensors element-wise.
3770    """
3771    return F.acos(x)
3772
3773
3774def asin(x):
3775    r"""
3776    Computes arcsine of input tensors element-wise.
3777    """
3778    return F.asin(x)
3779
3780
3781def acosh(input):
3782    r"""
3783    Computes inverse hyperbolic cosine of the inputs element-wise.
3784    """
3785    return F.acosh(input)
3786
3787
3788def add(input, other):
3789    r"""
3790    Computes the element-wise addition of input tensors.
3791    """
3792    return F.add(input, other)
3793
3794
3795def addr(x, vec1, vec2, beta=1, alpha=1):
3796    r"""
3797    Computes the outer-product of `vec1` and `vec2` and adds it to `x`.
3798    """
3799    return F.addr(x, vec1, vec2, beta=beta, alpha=alpha)
3800
3801
3802def addbmm(x, batch1, batch2, *, beta=1, alpha=1):
3803    r"""
3804    Performs matrix multiplication with a reduced sum, and add `x` to the result.
3805    """
3806    return F.addbmm(x, batch1, batch2, beta=beta, alpha=alpha)
3807
3808
3809def addmm(x, mat1, mat2, *, beta=1, alpha=1):
3810    r"""
3811    Performs matrix multiplication, and add `x` to the result.
3812    """
3813    return F.addmm(x, mat1, mat2, beta=beta, alpha=alpha)
3814
3815
3816def addmv(x, mat, vec, beta=1, alpha=1):
3817    r"""
3818    Multiplies matrix `mat` and vector `vec`. The vector `x` is added to the final result.
3819    """
3820    return F.addmv(x, mat, vec, beta=beta, alpha=beta)
3821
3822
3823def adjoint(x):
3824    r"""
3825    Computes the conjucated matrix with the last 2 dimensions transposed.
3826    """
3827    return F.adjoint(x)
3828
3829
3830def asinh(x):
3831    r"""
3832    Computes inverse hyperbolic sine of the input element-wise.
3833    """
3834    return F.asinh(x)
3835
3836
3837def atan(input):
3838    r"""
3839    Computes inverse tangent of the input element-wise.
3840    """
3841    return F.atan(input)
3842
3843
3844def atanh(x):
3845    r"""
3846    Computes inverse hyperbolic tangent of the input element-wise.
3847    """
3848    return F.atanh(x)
3849
3850
3851def baddbmm(x, batch1, batch2, beta=1, alpha=1):
3852    r"""
3853    For details, please refer to :func:`mindspore.ops.baddbmm`.
3854    """
3855    return F.baddbmm(x, batch1, batch2, beta=beta, alpha=alpha)
3856
3857
3858def bmm(input_x, mat2):
3859    r"""
3860    Computes matrix multiplication between two tensors by batch.
3861    """
3862    return F.bmm(input_x, mat2)
3863
3864
3865def value_(x):
3866    r"""
3867    Get the value of Parameter or Tensor x. If x is Parameter, will change the type from RefTensor to Tensor.
3868    """
3869    return P.Load()(x, monad.U)
3870
3871
3872def to(input_x, dtype):
3873    r"""
3874    Performs tensor dtype conversion.
3875    """
3876    return P.Cast()(input_x, dtype)
3877
3878
3879def to_bool(input_x):
3880    r"""
3881    Converts input tensor dtype to bool.
3882    """
3883    return P.Cast()(input_x, mstype.bool_)
3884
3885
3886def to_float(input_x):
3887    r"""
3888    Converts input tensor dtype to float32.
3889    """
3890    return P.Cast()(input_x, mstype.float32)
3891
3892
3893def to_half(input_x):
3894    r"""
3895    Converts input tensor dtype to float16.
3896    """
3897    return P.Cast()(input_x, mstype.float16)
3898
3899
3900def to_int(input_x):
3901    r"""
3902    Converts input tensor dtype to int32.
3903    """
3904    return P.Cast()(input_x, mstype.int32)
3905
3906
3907def to_long(input_x):
3908    r"""
3909    Converts input tensor dtype to int64.
3910    """
3911    return P.Cast()(input_x, mstype.int64)
3912
3913
3914def cholesky(input_x, upper=False):
3915    r"""
3916    Computes the Cholesky decomposition of a symmetric positive-definite matrix
3917    """
3918    return F.cholesky(input_x, upper=upper)
3919
3920
3921def cholesky_inverse(input_x, upper=False):
3922    r"""
3923    Computes the inverse of the positive definite matrix using cholesky matrix factorization.
3924    """
3925    return F.cholesky_inverse(input_x, upper=upper)
3926
3927
3928def cholesky_solve(input, input2, upper=False):
3929    r"""
3930    Computes the solution of a set of linear equations with a positive definite matrix,
3931    according to its Cholesky decomposition factor `input2` .
3932    """
3933    return F.cholesky_solve(input, input2, upper=upper)
3934
3935
3936def map_tensor_get(map_tensor, key_tensor, insert_default_value=True):
3937    r"""
3938    Get or create value according the key tensor from a map tensor.
3939    """
3940    return _map_tensor_ops.MapTensorGet(insert_default_value)(map_tensor, key_tensor)
3941
3942
3943def map_tensor_put(map_tensor, key_tensor, value_tensor):
3944    r"""
3945    Insert or update key value tensor pairs to a map tensor.
3946    """
3947    return _map_tensor_ops.put(map_tensor, key_tensor, value_tensor)
3948
3949
3950def map_tensor_erase(map_tensor, key_tensor):
3951    r"""
3952    Remove records according the key tensor from a map tensor.
3953    """
3954    return _map_tensor_ops.erase(map_tensor, key_tensor)
3955
3956
3957def map_tensor_get_keys(map_tensor):
3958    r"""
3959    Get all keys as a tensor.
3960    """
3961    return _map_tensor_ops.get_keys(map_tensor)
3962
3963
3964def map_tensor_get_values(map_tensor):
3965    r"""
3966    Get all values as a tensor.
3967    """
3968    return _map_tensor_ops.get_values(map_tensor)
3969
3970
3971def map_tensor_get_data(map_tensor):
3972    r"""
3973    Get all keys and values as a tensor.
3974    """
3975    return _map_tensor_ops.get_data(map_tensor)
3976
3977
3978def conj(input):
3979    r"""
3980    Computes complex conjugate of the input element-wise.
3981    """
3982    return F.conj(input)
3983
3984
3985def cross(input, other, dim=None):
3986    r"""
3987    Computes the cross product of input vectors in specified dimension.
3988    """
3989    return F.cross(input, other, dim)
3990
3991
3992def erfinv(input):
3993    r"""
3994    Computes the inverse error function of input tensor.
3995    """
3996    return F.erfinv(input)
3997
3998
3999def less_equal(input, other):
4000    r"""
4001    Computes the boolean value of :math:`input\_x <= other` element-wise.
4002    """
4003    return F.less_equal(input, other)
4004
4005
4006def lcm(x, other):
4007    r"""
4008    Computes least common multiplier of input tensors element-wise.
4009    """
4010    return F.lcm(x, other)
4011
4012
4013def ldexp(x, other):
4014    r"""
4015    Multiplies input by 2**:attr:other.
4016    """
4017    return F.ldexp(x, other)
4018
4019
4020def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
4021    r"""
4022    Combines an array of sliding local blocks into a large containing tensor.
4023    """
4024    return F.fold(input, output_size, kernel_size, dilation, padding, stride)
4025
4026
4027def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
4028    r"""
4029    Extracts sliding local blocks from a batched input tensor.
4030    """
4031    return F.unfold(input, kernel_size, dilation, padding, stride)
4032
4033
4034def expand(input, size):
4035    r"""
4036    Returns a new view of the self tensor with singleton dimensions expanded to a larger size.
4037    """
4038    size = TensorToTuple()(size)
4039    return F.broadcast_to(input, size)
4040
4041
4042def cumprod(input, dim, dtype=None):
4043    r"""
4044    Computes the cumulative product of the `input` tensor along dimension `dim`.
4045    """
4046    return F.cumprod(input, dim, dtype)
4047
4048
4049def multiply(input, other):
4050    """For details, please refer to :func:`mindspore.ops.multiply`."""
4051    return F.multiply(input, other)
4052
4053
4054def div(input, value, *, rounding_mode=None):
4055    r"""
4056    Divides the tensor `input` by the given input tensor `value` in floating-point type element-wise.
4057    """
4058    return F.div(input, value, rounding_mode=rounding_mode)
4059
4060
4061def eq(input, other):
4062    r"""
4063    Computes the equivalence between the tensor `input` and the given input tensor `other` element-wise.
4064    """
4065    return F.equal(input, other)
4066
4067
4068def equal(x, y):
4069    r"""
4070    Computes the equivalence between the tensor `x` and the given input tensor `y` element-wise.
4071    """
4072    return F.equal(x, y)
4073
4074
4075def expm1(input_x):
4076    r"""
4077    Computes exponential then minus 1 of a tensor element-wise.
4078    """
4079    return F.expm1(input_x)
4080
4081
4082@constexpr
4083def _check_index_add_alpha(alpha):
4084    check_is_number(alpha, (int, float))
4085
4086
4087def index_add(input, dim, index, source, *, alpha=1):
4088    r"""
4089    Adds tensor `alpha` times `source`  to specified `dim` and `index` of input tensor.
4090    """
4091    _check_index_add_alpha(alpha)
4092    source = source * alpha
4093    return F.index_add(input, indices=index, y=source, axis=dim)
4094
4095
4096def greater(input, other):
4097    r"""
4098    Computes the boolean value of :math:`input > other` element-wise.
4099    """
4100    return F.greater(input, other)
4101
4102
4103def greater_equal(input, other):
4104    r"""
4105    Computes the boolean value of :math:`input >= other` element-wise.
4106    """
4107    return F.greater_equal(input, other)
4108
4109
4110def igamma(input, other):
4111    r"""
4112    Computes lower regularized incomplete Gamma function.
4113    """
4114    return F.igamma(input, other)
4115
4116
4117def igammac(input, other):
4118    r"""
4119    Computes upper regularized incomplete Gamma function.
4120    """
4121    return F.igammac(input, other)
4122
4123
4124def isinf(input):
4125    r"""
4126    Determines which elements are inf or -inf for each position.
4127    """
4128    return F.isinf(input)
4129
4130
4131def isnan(input):
4132    r"""
4133    Determines which elements are NaN for each position.
4134    """
4135    return F.isnan(input)
4136
4137
4138def le(input, other):
4139    r"""
4140    Computes the boolean value of :math:`input <= other` element-wise.
4141    """
4142    return F.le(input, other)
4143
4144
4145def less(input, other):
4146    r"""
4147    Computes the boolean value of :math:`input < other` element-wise.
4148    """
4149    return F.less(input, other)
4150
4151
4152def logical_and(input, other):
4153    r"""
4154    Computes the "logical AND" of two tensors element-wise.
4155    """
4156    return F.logical_and(input, other)
4157
4158
4159def logical_not(input):
4160    r"""
4161    Computes the "logical NOT" of input tensor element-wise.
4162    """
4163    return F.logical_not(input)
4164
4165
4166def logical_or(input, other):
4167    r"""
4168    Computes the "logical OR" of two tensors element-wise.
4169    """
4170    return F.logical_or(input, other)
4171
4172
4173def logical_xor(input, other):
4174    r"""
4175    Computes the "logical XOR" of two tensors element-wise.
4176    """
4177    return F.logical_xor(input, other)
4178
4179
4180def lstsq(input, A):
4181    r"""
4182    Computes the solutions of the least squares and minimum norm problems of full-rank
4183    matrix `input` of size :math:`(m \times n)` and matrix `A` of size :math:`(m \times k)`.
4184    """
4185    return F.lstsq(input, A)
4186
4187
4188def mvlgamma(input, p):
4189    r"""
4190    Computes the multivariate log-gamma function with dimension p element-wise.
4191    """
4192    return F.mvlgamma(input, p)
4193
4194
4195def maximum(input, other):
4196    r"""
4197    Computes the maximum of input tensors element-wise.
4198    """
4199    return F.maximum(input, other)
4200
4201
4202def mul(input, other):
4203    r"""
4204    Multiplies two tensors element-wise.
4205    """
4206    return F.mul(input, other)
4207
4208
4209def neg(input):
4210    r"""
4211    Returns a tensor with negative values of the input tensor element-wise.
4212    """
4213    return F.neg(input)
4214
4215
4216def ne(input, other):
4217    r"""
4218    Computes the non-equivalence of two tensors element-wise.
4219    """
4220    return F.ne(input, other)
4221
4222
4223def not_equal(x, other):
4224    r"""
4225    Computes the non-equivalence of two tensors element-wise.
4226    """
4227    return F.not_equal(x, other)
4228
4229
4230def sign(x):
4231    r"""
4232    For details, please refer to :func:`mindspore.ops.sign`.
4233    """
4234    return F.sign(x)
4235
4236
4237def signbit(x):
4238    """
4239    For details, please refer to :func:`mindspore.ops.signbit`.
4240    """
4241    return F.signbit(x)
4242
4243
4244def sgn(x):
4245    """
4246    For details, please refer to :func:`mindspore.ops.sgn`.
4247    """
4248    return F.sgn(x)
4249
4250
4251def sinh(input):
4252    r"""
4253    Computes hyperbolic sine of the input element-wise.
4254    """
4255    return F.sinh(input)
4256
4257
4258def sort(input, axis=-1, descending=False):
4259    r"""
4260    Sorts the elements of the input tensor along a given dimension in ascending order by value.
4261    """
4262    return F.sort(input, axis=axis, descending=descending)
4263
4264
4265def argsort(input, axis=-1, descending=False):
4266    """For details, please refer to :func:`mindspore.ops.argsort`."""
4267    return F.argsort(input, axis, descending)
4268
4269
4270def trunc(input):
4271    r"""
4272    Returns a new tensor with the truncated integer values of the elements of input.
4273    """
4274    return F.trunc(input)
4275
4276
4277def where(x, condition, y):
4278    r"""
4279    Returns a tensor whose elements are selected from either `x` or `y` depending on `condition`.
4280    Please refer to :func:`mindspore.ops.where`.
4281    """
4282    return F.where(condition, x, y)
4283
4284
4285def imag(input):
4286    r"""
4287    Returns a new tensor containing imaginary value of the input.
4288    """
4289    return F.imag(input)
4290
4291
4292def diff(x, n=1, axis=-1, prepend=None, append=None):
4293    r"""
4294    For details, please refer to :func:`mindspore.ops.diff`.
4295    """
4296    return F.diff(x, n, axis, prepend, append)
4297
4298
4299def frac(x):
4300    r"""
4301    For details, please refer to :func:`mindspore.ops.frac`.
4302    """
4303    return F.frac(x)
4304
4305
4306def argwhere(input):
4307    r"""
4308    For details, please refer to :func:`mindspore.ops.argwhere`.
4309    """
4310    return F.argwhere(input)
4311
4312
4313def moveaxis(input, source, destination):
4314    r"""
4315    For details, please refer to :func:`mindspore.ops.moveaxis`.
4316    """
4317    return F.moveaxis(input, source, destination)
4318
4319
4320def movedim(input, source, destination):
4321    r"""
4322    For details, please refer to :func:`mindspore.ops.movedim`.
4323    """
4324    return F.movedim(input, source, destination)
4325
4326
4327def nextafter(input, other):
4328    r"""
4329    For details, please refer to :func:`mindspore.ops.nextafter`.
4330    """
4331    return F.nextafter(input, other)
4332
4333
4334def qr(input, some=True):
4335    r"""
4336    For details, please refer to :func:`mindspore.ops.qr`.
4337    """
4338    check_bool_type(some, 'some', 'Tensor.qr')
4339    return F.qr(input, 'reduced' if some else 'complete')
4340
4341
4342def ormqr(input, input2, input3, left=True, transpose=False):
4343    r"""
4344    For details, please refer to :func:`mindspore.ops.ormqr`.
4345    """
4346    return F.ormqr(input, input2, input3, left, transpose)
4347
4348
4349def amax(input, axis=None, keep_dims=False):
4350    r"""
4351    For details, please refer to :func:`mindspore.ops.amax`.
4352    """
4353    return F.amax(input, axis, keep_dims)
4354
4355
4356def uniform(input, from_=0., to=1., generator=None):
4357    r"""
4358    Generates random numbers in the half-open interval [from_, to).
4359    """
4360    return F.uniform_ext(input, from_, to, generator)
4361
4362
4363def amin(input, axis=None, keep_dims=False):
4364    r"""
4365    For details, please refer to :func:`mindspore.ops.amin`.
4366    """
4367    return F.amin(input, axis, keep_dims)
4368
4369
4370def lu_solve(b, LU_data, LU_pivots):
4371    r"""
4372    For details, please refer to :func:`mindspore.Tensor.lu_solve`
4373    """
4374    return F.lu_solve(b, LU_data, LU_pivots)
4375
4376
4377def masked_scatter(input, mask, tensor):
4378    r"""
4379    For details, please refer to :func:`mindspore.Tensor.masked_scatter`
4380    """
4381    return array_ops.MaskedScatter()(input, mask, tensor)
4382
4383
4384def index_put(input, indices, values, accumulate=False):
4385    r"""
4386    For details, please refer to :func:`mindspore.Tensor.index_put`
4387    """
4388    check_bool_type(accumulate, 'accumulate', 'Tensor.index_put')
4389    _index_put = array_ops.IndexPut(0 if accumulate is False else 1)
4390    return _index_put(input, values, indices)
4391
4392
4393def aminmax(input, *, axis=0, keepdims=False):
4394    r"""
4395    For details, please refer to :func:`mindspore.ops.aminmax`.
4396    """
4397    return F.aminmax(input, axis=axis, keepdims=keepdims)
4398
4399
4400def quantile(input, q, axis=None, keepdims=False):
4401    r"""
4402    For details, please refer to :func:`mindspore.ops.quantile`.
4403    """
4404    return F.quantile(input, q, axis, keepdims)
4405
4406
4407def nanquantile(input, q, axis=None, keepdims=False):
4408    r"""
4409    For details, please refer to :func:`mindspore.ops.nanquantile`.
4410    """
4411    return F.nanquantile(input, q, axis, keepdims)
4412
4413
4414def orgqr(input, input2):
4415    r"""
4416    For details, please refer to :func:`mindspore.ops.orgqr`.
4417    """
4418    return F.orgqr(input, input2)
4419
4420
4421def outer(input, vec2):
4422    r"""
4423    For details, please refer to :func:`mindspore.ops.vec2`.
4424    """
4425    return F.outer(input, vec2)
4426
4427def sigmoid(input):
4428    r"""
4429    For details, please refer to :func:`mindspore.ops.sigmoid`.
4430    """
4431    return F.sigmoid(input)
4432
4433def _getitem(data, index):
4434    return multitype_ops.getitem(data, index)
4435
4436def _setitem(data, index, value):
4437    return multitype_ops.setitem(data, index, value)
4438