• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2022 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15
16# pylint: disable=unused-import
17"""Defines math operators with functional form."""
18
19import collections
20from functools import cmp_to_key
21import math
22import numbers
23import numpy as np
24
25import mindspore as ms
26from mindspore import log as logger
27import mindspore.ops as ops
28from mindspore.common import dtype as mstype
29from mindspore.ops import operations as P
30from mindspore.ops import composite as C
31from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
32from mindspore.ops.primitive import constexpr, _primexpr
33from mindspore.ops.operations._inner_ops import TileSize
34from mindspore.ops.auto_generate import Cummin, BatchMatMul, LinSpaceExt, Norm
35from mindspore.ops import auto_generate
36from mindspore.ops.operations.math_ops import STFT
37from mindspore.ops.operations.math_ops import LuUnpack
38from mindspore.ops.operations.math_ops import Roll
39from mindspore.ops.operations.math_ops import Ormqr
40from mindspore.ops.operations.math_ops import DivMod
41from mindspore.ops.operations.array_ops import MatrixSetDiagV3, Transpose
42from mindspore.ops.auto_generate import (minimum, maximum, mul, sin, sinc, sinh, cummax, real, conj, add, sub, cos, cosh,
43                                         matrix_exp, sqrt, rsqrt, square, trace, nextafter, abs, acos, acosh, angle,
44                                         asin, asinh, atan, atan2, atanh, ceil, equal, erf, erfc, erfinv, exp, expm1,
45                                         floor, floor_divide, floor_mod, gcd, greater, greater_equal, less, less_equal,
46                                         log, log1p, neg, not_equal, pow, round, isfinite, argmax_ext, mean_ext_op,
47                                         sum_ext_op, prod_ext_op, all, matrix_inverse_ext, atan2_ext, sign)
48from mindspore.ops.auto_generate import tanh
49from mindspore.nn import layer
50from mindspore._checkparam import check_is_number
51from mindspore import _checkparam as validator
52from mindspore.ops.operations.math_ops import (
53    Bernoulli,
54    BesselI0,
55    BesselI1,
56    BesselJ0,
57    BesselJ1,
58    BesselK0,
59    BesselK0e,
60    BesselY0,
61    BesselY1,
62    BesselK1,
63    BesselK1e,
64    CumulativeLogsumexp,
65    LuSolve,
66    MatrixExp,
67    MatrixSolve,
68    Median,
69    Fmax,
70    Orgqr,
71    Fmin,
72    Renorm,
73    Hypot,
74    Heaviside,
75    Lcm,
76    Gcd,
77    Quantile,
78    NanToNum,
79    SparseSegmentMean,
80    TrilIndices,
81    TriuIndices,
82    InplaceIndexAdd,
83    InplaceUpdateV2,
84    Igamma,
85    Igammac,
86    Polar,
87    Angle,
88    FFTWithSize,
89)
90from mindspore.common.tensor import Tensor
91from mindspore.ops._primitive_cache import _get_cache_prim
92from mindspore._c_expression import Tensor as Tensor_
93import mindspore.ops.function as F
94from mindspore.ops.operations._sequence_ops import TupleToTensor
95
96
97@constexpr
98def _make_tensor(val, dtype):
99    """Returns the tensor with value `val` and dtype `dtype`."""
100    return Tensor(val, dtype)
101
102
103def get_x_shape(x_shape):
104    s = 1
105    for i in x_shape:
106        s = s * i
107    return (s,)
108
109
110#####################################
111# Public Operation Functions.
112#####################################
113absolute_ = P.Abs()
114cast_ = P.Cast()
115tensor_add = P.Add()
116tensor_ceil = P.Ceil()
117tensor_div = P.RealDiv()
118tensor_exp = P.Exp()
119tensor_expm1 = P.Expm1()
120tensor_floordiv = P.FloorDiv()
121floordiv = tensor_floordiv
122tensor_ge = P.GreaterEqual()
123tensor_gt = greater
124tensor_le = P.LessEqual()
125tensor_lt = P.Less()
126tensor_mod = P.FloorMod()
127floormod = tensor_mod
128tensor_mul = P.Mul()
129tensor_pow = P.Pow()
130pows = tensor_pow
131tensor_sub = P.Sub()
132transpose_ = P.Transpose()
133xdivy_ = P.Xdivy()
134tensor_div_ = P.Div()
135tensor_divmod_ = DivMod()
136
137#####################################
138# Private Operation Functions.
139#####################################
140accumulate_ = P.AccumulateNV2()
141acos_ = P.ACos()
142acosh_ = P.Acosh()
143addcdiv_ = P.Addcdiv()
144addcuml_ = P.Addcmul()
145addn_ = P.AddN()
146angle_ = Angle()
147asin_ = P.Asin()
148asinh_ = P.Asinh()
149atan2_ = P.Atan2()
150atan_ = P.Atan()
151atanh_ = P.Atanh()
152batch_matmul_ = BatchMatMul()
153bessel_i0_ = BesselI0()
154bessel_i0e_ = P.BesselI0e()
155bessel_i1_ = BesselI1()
156bessel_i1e_ = P.BesselI1e()
157bessel_j0_ = BesselJ0()
158bessel_j1_ = BesselJ1()
159bessel_k0_ = BesselK0()
160bessel_k0e_ = BesselK0e()
161bessel_k1_ = BesselK1()
162bessel_k1e_ = BesselK1e()
163bessel_y0_ = BesselY0()
164bessel_y1_ = BesselY1()
165bitwise_and_ = P.BitwiseAnd()
166bitwise_or_ = P.BitwiseOr()
167bitwise_xor_ = P.BitwiseXor()
168conj_ = P.Conj()
169cumprod_ = P.CumProd()
170cumsum_ = P.CumSum()
171cumulative_logsumexp_ = CumulativeLogsumexp()
172digamma_ = P.Digamma()
173dtype_ = P.DType()
174eps_ = P.Eps()
175erf_ = P.Erf()
176erfc_ = P.Erfc()
177erfinv_ = P.Erfinv()
178exp2_ = P.Pow()
179expand_dims_ = P.ExpandDims()
180fill_v2_ = P.FillV2()
181floor_ = P.Floor()
182gcd_ = Gcd()
183igamma_ = Igamma()
184igammac_ = Igammac()
185imag_ = P.Imag()
186inv_ = P.math_ops.Inv()
187invert_ = P.Invert()
188isinf_ = P.IsInf()
189isnan_ = P.IsNan()
190lcm_ = Lcm()
191lerp_ = P.Lerp()
192lgamma_ = P.Lgamma()
193linspace_ = P.LinSpace()
194log1p_ = P.Log1p()
195log_ = P.Log()
196log_matrix_determinant_ = P.LogMatrixDeterminant()
197logical_and_ = P.LogicalAnd()
198logical_not_ = P.LogicalNot()
199logical_or_ = P.LogicalOr()
200logical_xor_ = P.LogicalXor()
201lu_solve_ = LuSolve()
202lu_unpack_ = LuUnpack()
203matmul_ = P.MatMul()
204matrix_determinant_ = P.MatrixDeterminant()
205matrix_inverse_ = P.MatrixInverse()
206mod_ = P.Mod()
207nextafter_ = P.NextAfter()
208ones_ = P.Ones()
209polar_ = Polar()
210poly_gamma_ = P.Polygamma()
211rank_ = P.Rank()
212reciprocal_ = P.Reciprocal()
213reduce_sum_ = P.ReduceSum()
214reshape_ = P.Reshape()
215select_ = P.Select()
216slice_ = P.Slice()
217size_ = P.Size()
218scalar_to_tensor_ = P.ScalarToTensor()
219shape_ = P.Shape()
220sign_ = P.Sign()
221sparse_segment_mean_ = SparseSegmentMean()
222tan_ = P.Tan()
223tanh_ = P.Tanh()
224tensor_round_ = P.Round()
225tile_ = P.Tile()
226tile_size_ = TileSize()
227trunc_ = P.Trunc()
228truncate_div_ = P.TruncateDiv()
229truncate_mod_ = P.TruncateMod()
230xlogy_ = P.Xlogy()
231zeros_ = P.Zeros()
232zeta_ = P.Zeta()
233
234
235#####################################
236# Element-wise Operation Functions.
237#####################################
238
239
240def addn(x):
241    """
242    Computes addition of all input tensors element-wise.
243
244    All input tensors must have the same shape.
245
246    Args:
247        x (Union(tuple[Tensor], list[Tensor])): A tuple or list composed of Tensor.
248
249    Returns:
250        Tensor, has the same shape and dtype as each Tensor of `x`.
251
252    Raises:
253        TypeError: If `x` is neither tuple nor list.
254        ValueError: If there are Tensors with different shapes in `x`.
255
256    Supported Platforms:
257        ``Ascend`` ``GPU`` ``CPU``
258
259    Examples:
260        >>> import mindspore
261        >>> import numpy as np
262        >>> from mindspore import Tensor, ops
263        >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
264        >>> y = Tensor(np.array([4, 5, 6]), mindspore.float32)
265        >>> output = ops.addn([x, y, x, y])
266        >>> print(output)
267        [10. 14. 18.]
268    """
269    return addn_(x)
270
271
272def absolute(input):
273    """
274    Alias for :func:`mindspore.ops.abs` .
275
276    Supported Platforms:
277        ``Ascend`` ``GPU`` ``CPU``
278    """
279    return abs(input)
280
281
282def addcdiv(input, tensor1, tensor2, value=1):
283    r"""
284    Performs the element-wise division of tensor tensor1 by tensor tensor2,
285    multiply the result by the scalar value and add it to input data.
286
287    .. math::
288        y[i] = input[i] + value[i] * (tensor1[i] / tensor2[i])
289
290    Args:
291        input (Tensor): The tensor to be added.
292        tensor1 (Tensor): The numerator tensor.
293        tensor2 (Tensor): The denominator tensor.
294        value (Union[Tensor, Number]): The multiplier for tensor1/tensor2. Default: ``1`` .
295
296    Returns:
297        Tensor, has the same shape and dtype as tensor1/tensor2.
298
299    Raises:
300        TypeError: If dtype of `tensor1`, `tensor2`, `input` is not tensor.
301        ValueError: If `tensor1` could not be broadcast to a tensor with shape of `tensor2`.
302        ValueError: If `value` could not be broadcast to tensors with shapes of `tensor1/tensor2`.
303        ValueError: If `input` could not be broadcast to tensors with shapes of `value*(tensor1/tensor2)`.
304
305    Supported Platforms:
306        ``Ascend`` ``GPU`` ``CPU``
307
308    Examples:
309        >>> import mindspore
310        >>> import numpy as np
311        >>> from mindspore import Tensor, ops
312        >>> input_data = Tensor(np.array([1, 1, 1, 1]), mindspore.float32)
313        >>> x1 = Tensor(np.array([1, 2, 3, 4]), mindspore.float32)
314        >>> x2 = Tensor(np.array([4, 3, 2, 1]), mindspore.float32)
315        >>> value = Tensor([1], mindspore.float32)
316        >>> y = ops.addcdiv(input_data, x1, x2, value)
317        >>> print(y)
318        [1.25      1.6666667 2.5       5.       ]
319    """
320    return addcdiv_(input, tensor1, tensor2, Tensor(value))
321
322
323def addcmul(input, tensor1, tensor2, value=1):
324    r"""
325    Performs the element-wise product of tensor tensor1 and tensor tensor2,
326    multiply the result by the scalar value and add it to input data.
327
328    .. math::
329        output[i] = input[i] + value[i] * (tensor1[i] * tensor2[i])
330
331    Args:
332        input (Tensor): The tensor to be added.
333        tensor1 (Tensor): The tensor to be multiplied.
334        tensor2 (Tensor): The tensor to be multiplied.
335        value (Union[Tensor, Number]): The multiplier for tensor1*tensor2. Default: ``1`` .
336
337    Returns:
338        Tensor, has the same shape and dtype as tensor1*tensor2.
339
340    Raises:
341        TypeError: If dtype of `tensor1`, `tensor2`, `input` is not Tensor.
342        TypeError: If dtype of `input` is not one of: float32, float16, int32.
343        TypeError: If dtype of `tensor1` or `tensor2` is not one of: float32, float16, int32.
344        TypeError: If dtype of `value` is not one of: float32, float16, int32.
345        ValueError: If `tensor1` could not be broadcast to a tensor with shape of `tensor2`.
346        ValueError: If `value` could not be broadcast to tensors with shapes of `tensor1` * `tensor2`.
347        ValueError: If `input` could not be broadcast to tensors with shapes of `value*(tensor1*tensor2)`.
348
349    Supported Platforms:
350        ``Ascend`` ``GPU`` ``CPU``
351
352    Examples:
353        >>> import mindspore
354        >>> import numpy as np
355        >>> from mindspore import Tensor, ops
356        >>> input_data = Tensor(np.array([1, 1, 1]), mindspore.float32)
357        >>> x1 = Tensor(np.array([[1], [2], [3]]), mindspore.float32)
358        >>> x2 = Tensor(np.array([[1, 2, 3]]), mindspore.float32)
359        >>> value = Tensor([1], mindspore.float32)
360        >>> y = ops.addcmul(input_data, x1, x2, value)
361        >>> print(y)
362        [[ 2.  3.  4.]
363         [ 3.  5.  7.]
364         [ 4.  7. 10.]]
365    """
366    return addcuml_(input, tensor1, tensor2, Tensor(value))
367
368
369def bincount(input, weights=None, minlength=0):
370    """
371    Counts the number of occurrences of each value in `input`.
372
373    If you don't specify `minlength`, the length of output Tensor will be
374    the maximum value of the input `input` plus one.
375
376    If `minlength` is specified, the length of output Tensor is the value of maximum of `input` plus 1 and `minlength`.
377
378    Each value in the output Tensor marks the number of occurrences of that index in `input`.
379    If 'weights' is specified, the output results are weighted, i.e ``out[n] += weight[i]`` instead of ``out[n] += 1``.
380
381    Note:
382        If `input` contains negative value, the result will be undefined.
383
384    Args:
385        input (Tensor): 1-d input tensor.
386        weights (Tensor, optional): Weights, a tensor of the same shape as `input`. Default: ``None`` .
387        minlength (int, optional): A minimum number of bins for the output tensor. Default: ``0`` .
388
389    Returns:
390        Tensor, a tensor of shape [max(input)+1] if input is non-empty, otherwise, the shape is [0].
391
392    Raises:
393        TypeError: If `input` or `weights` is not a tensor.
394        ValueError: If `input` is not one-dimensional, or if `input` and `weights` do not have the same shape.
395        ValueError: If `minlength` is a negative integer.
396
397    Supported Platforms:
398        ``Ascend`` ``GPU`` ``CPU``
399
400    Examples:
401        >>> from mindspore import Tensor, ops
402        >>> from mindspore import dtype as mstype
403        >>> x = Tensor([2, 4, 1, 0, 0], dtype=mstype.int64)
404        >>> print(ops.bincount(x, minlength=7))
405        [2. 1. 1. 0. 1. 0. 0.]
406        >>> weights = Tensor([0, 0.25, 0.5, 0.75, 1], dtype=mstype.float32)
407        >>> print(ops.bincount(x, weights=weights))
408        [1.75 0.5  0.   0.   0.25]
409    """
410    if not isinstance(input, Tensor):
411        raise TypeError("For math function 'bincount', 'input' must be Tensor.")
412    if weights is not None and not isinstance(weights, Tensor):
413        raise TypeError(f"For math function 'bincount', 'weights' must be Tensor, but got {type(weights)}.")
414    if not isinstance(minlength, int) or isinstance(minlength, bool):
415        raise TypeError(f"For math function 'bincount', 'minlength' must be int but got {type(minlength)}.")
416    if rank_(input) != 1:
417        raise ValueError(f"For math function 'bincount', 'input' should be one-dimensional tensor.")
418    if input.shape[0] == 0:
419        return Tensor_([])
420    if minlength < 0:
421        raise ValueError(f"For 'bincount', 'minlength' should be >= 0 but got {minlength}.")
422    if max(input.astype(mstype.float32)) > minlength - 1:
423        length = (max(input.astype(mstype.float32)) + 1).astype(mstype.int32)
424    else:
425        length = cast_(minlength, mstype.int32)
426    idx = F.arange(length).expand_dims(-1)
427    idx_mapping = equal(input, idx.astype(input.dtype))
428    if weights is not None:
429        if input.shape != weights.shape:
430            raise ValueError('for bincount `input` and `weights` must have the same length')
431        idx_mapping *= weights
432    return reduce_sum_(idx_mapping.astype(mstype.float32), 1).ravel()
433
434
435def bucketize(input, boundaries, *, right=False):
436    r"""
437    Bucketizes `input` based on `boundaries`. If `right` is ``False``, the left boundary is closed. For each element x
438    in `input`, the returned index satisfies the following rules:
439
440    .. math::
441
442        \begin{cases}
443        boundaries[i-1] < x <= boundaries[i], & \text{if right} = False\\
444        boundaries[i-1] <= x < boundaries[i], & \text{if right} = True
445        \end{cases}
446
447    Args:
448        input (Tensor): A tensor containing the search value(s).
449        boundaries (list): A sorted list of boundary values of the buckets.
450
451    Keyword Args:
452        right (bool, optional): if ``False``, gets the lower bound index for each value in input from boundaries;
453            If ``True``, gets the upper bound index instead. Default: ``False``.
454
455    Returns:
456        Tensor, the indexes Tensor, with the same shape as the input, and data type is int32.
457
458    Raises:
459        TypeError: If `boundaries` is not a list.
460        TypeError: If `input` is not a Tensor.
461
462    Supported Platforms:
463        ``Ascend`` ``GPU`` ``CPU``
464
465    Examples:
466        >>> import numpy as np
467        >>> from mindspore import Tensor, ops
468        >>> input = Tensor(np.array([[3, 6, 9], [3, 6, 9]]))
469        >>> boundaries = list(np.array([1., 3., 5., 7., 9.]))
470        >>> output = ops.bucketize(input, boundaries, right=True)
471        >>> print(output)
472        [[2 3 5]
473         [2 3 5]]
474    """
475
476    bucketize_op = _get_cache_prim(P.Bucketize)
477    epsilon_ = 0. if right else 1.e-6
478    boundaries = [boundary + epsilon_ for boundary in boundaries]
479    return bucketize_op(boundaries)(input)
480
481
482def exp2(input):
483    """
484    Computes base two exponential of Tensor `input` element-wise.
485
486    .. math::
487        out_i = 2^{input_i}
488
489    Args:
490        input (Tensor): Input tensor.
491
492    Returns:
493        Tensor, has the same shape and dtype as the `input`.
494
495    Raises:
496        TypeError: If `input` is not a Tensor.
497
498    Supported Platforms:
499        ``Ascend`` ``GPU`` ``CPU``
500
501    Examples:
502        >>> import mindspore
503        >>> import numpy as np
504        >>> from mindspore import Tensor, ops
505        >>> x = Tensor(np.array([2, 3, 4]), mindspore.float32)
506        >>> output = ops.exp2(x)
507        >>> print(output)
508        [ 4.  8. 16.]
509    """
510
511    tensor_2 = Tensor(np.array(2.0).astype(np.float32))
512    if input.dtype == mstype.float16:
513        tensor_2 = Tensor(np.array(2.0).astype(np.float16))
514    return exp2_(tensor_2, input)
515
516
517def argmin(input, axis=None, keepdims=False):
518    """
519    Returns the indices of the minimum value of a tensor across the axis.
520
521    If the shape of input tensor is :math:`(x_1, ..., x_N)`, the shape of the output tensor is
522    :math:`(x_1, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`.
523
524    Args:
525        input (Tensor): Input tensor.
526        axis (Union[int, None], optional): Axis where the Argmin operation applies to. Default: ``None`` .
527        keepdims (bool, optional): Whether the output tensor retains the specified
528            dimension. Ignored if `axis` is None. Default: ``False`` .
529
530    Returns:
531        Tensor, indices of the min value of input tensor across the axis.
532
533    Raises:
534        TypeError: If `axis` is not an int.
535
536    Supported Platforms:
537        ``Ascend`` ``GPU`` ``CPU``
538
539    Examples:
540        >>> import mindspore
541        >>> import numpy as np
542        >>> from mindspore import Tensor, ops
543        >>> input_x = Tensor(np.array([2.0, 3.1, 1.2]), mindspore.float32)
544        >>> index = ops.argmin(input_x)
545        >>> print(index)
546        2
547    """
548    if not input.shape:
549        return Tensor(0)
550    is_axis_none = False
551    if axis is None:
552        input = reshape_(input, (-1,))
553        axis = 0
554        is_axis_none = True
555    out = _get_cache_prim(P.Argmin)(axis)(input)
556    if keepdims and not is_axis_none:
557        out = expand_dims_(out, axis)
558    return out
559
560
561def negative(input):
562    r"""
563    Alias for :func:`mindspore.ops.neg` .
564
565    Supported Platforms:
566        ``Ascend`` ``GPU`` ``CPU``
567    """
568    return neg(input)
569
570
571def positive(input):
572    r"""
573    Return self Tensor.
574
575    Args:
576        input (Tensor): Input Tensor.
577
578    Returns:
579        Tensor, self input.
580
581    Raises:
582        TypeError: If `input` is not a Tensor.
583
584    Supported Platforms:
585        ``Ascend`` ``GPU`` ``CPU``
586
587    Examples:
588        >>> import numpy as np
589        >>> from mindspore import Tensor, ops
590        >>> from mindspore import dtype as mstype
591        >>> x = Tensor(np.array([-5.0, 1.5, 3.0, 100.0]), mstype.float32)
592        >>> print(ops.positive(x))
593        [ -5.    1.5   3.  100. ]
594    """
595    _check_is_tensor("input", input, "positive")
596    return input
597
598
599def numel(input):
600    r"""
601    Returns a Scalar of type int that represents the total number of elements in the Tensor.
602
603    Args:
604        input (Tensor): Input Tensor.
605
606    Returns:
607        int. A scalar representing the total of elements in the Tensor.
608
609    Raises:
610        TypeError: If `input` is not a Tensor.
611
612    Supported Platforms:
613        ``Ascend`` ``GPU`` ``CPU``
614
615    Examples:
616        >>> import mindspore
617        >>> import numpy as np
618        >>> from mindspore import Tensor, ops
619        >>> input_x = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
620        >>> print(ops.numel(input_x))
621        4
622    """
623    _check_is_tensor("input", input, "numel")
624    return input.size
625
626
627def permute(input, axis):
628    """
629    Permutes the dimensions of the input tensor according to input `axis` .
630
631    Args:
632        input (Tensor): Input Tensor.
633        axis (tuple(int)): Permute will permute the tensor to the input `axis` order.
634
635    Returns:
636        Tensor, has the same dimension as input tensor, with `axis` suitably permuted.
637
638    Raises:
639        ValueError: If `axis` is None.
640        ValueError: If the number of elements of `axis` is not equal to `input` ndim.
641
642    Supported Platforms:
643        ``Ascend`` ``GPU`` ``CPU``
644
645    Examples:
646        >>> import mindspore
647        >>> import numpy as np
648        >>> from mindspore import Tensor, ops
649        >>> input_x = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), mindspore.float32)
650        >>> input_perm = (0, 2, 1)
651        >>> print(ops.permute(input_x, input_perm))
652        [[[ 1.  4.]
653          [ 2.  5.]
654          [ 3.  6.]]
655         [[ 7. 10.]
656          [ 8. 11.]
657          [ 9. 12.]]]
658    """
659    return transpose_(input, axis)
660
661
662def subtract(input, other, *, alpha=1):
663    r"""
664    Performs the element-wise subtract of input tensors.
665
666    .. math::
667        output[i] = input[i] - alpha * other[i]
668
669    Args:
670        input (Union[Tensor, number.Number]): Tensor or Number involved in subtraction.
671        other (Union[Tensor, number.Number]): Tensor or Number involved in subtraction.
672
673    Keyword Args:
674        alpha (Number): The multiplier for :math:`other`. Default: ``1`` .
675
676    Returns:
677        Tensor, has the same shape and dtype as input tensors.
678
679    Raises:
680        TypeError: `input` or `other` is neither Tensor nor number.Number.
681        TypeError: Both `input` and `other` are not Tensor.
682
683    Supported Platforms:
684        ``Ascend`` ``GPU`` ``CPU``
685
686    Examples:
687        >>> import mindspore
688        >>> import numpy as np
689        >>> from mindspore import Tensor, ops
690        >>> input = Tensor(np.array([4, 5, 6]), mindspore.float32)
691        >>> y = Tensor(np.array([1, 2, 3]), mindspore.float32)
692        >>> z = ops.subtract(input, y, alpha=1)
693        >>> print(z)
694        [3. 3. 3.]
695    """
696    return tensor_sub(input, alpha * other)
697
698
699def multiply(input, other):
700    r"""
701    Alias for :func:`mindspore.ops.asinh`.
702
703    Supported Platforms:
704        ``Ascend`` ``GPU`` ``CPU``
705    """
706    return tensor_mul(input, other)
707
708
709def div(input, other, *, rounding_mode=None):
710    r"""
711    Divides the first input tensor by the second input tensor in floating-point type element-wise.
712
713    .. math::
714
715        out_{i} = input_{i} / other_{i}
716
717    Note:
718        - When the two inputs have different shapes, they must be able to broadcast to a common shape.
719        - The two inputs can not be bool type at the same time,
720          [True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
721        - The two inputs comply with the implicit type conversion rules to make the data types
722          consistent.
723
724    Args:
725        input (Union[Tensor, Number, bool]): The first input is a number or
726            a bool or a tensor whose data type is number or bool.
727        other (Union[Tensor, Number, bool]): The second input is a number or
728            a bool when the first input is a tensor or a tensor whose data type is number or bool.
729
730    Keyword Args:
731        rounding_mode (str, optional): Type of rounding applied to the result. Default: ``None`` .
732            Three types are defined as,
733
734            - None: Default behavior, which is the same as true division in Python or `true_divide` in NumPy.
735
736            - "floor": Rounds the division of the inputs down, which is the same as floor division in Python
737              or `floor_divide` in NumPy.
738
739            - "trunc": Rounds the division of the inputs towards zero, which is the same as C-style integer division.
740
741    Returns:
742        Tensor, the shape is the same as the one after broadcasting,
743        and the data type is the one with higher precision or higher digits among the two inputs.
744
745    Raises:
746        TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
747        ValueError: If `rounding_mode` value is not None, "floor" or "trunc".
748
749    Supported Platforms:
750        ``Ascend`` ``GPU`` ``CPU``
751
752    Examples:
753        >>> import mindspore
754        >>> import numpy as np
755        >>> from mindspore import Tensor, ops
756        >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
757        >>> y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
758        >>> output = ops.div(x, y)
759        >>> print(output)
760        [0.25 0.4 0.5]
761    """
762    if rounding_mode is not None and rounding_mode not in ['floor', 'trunc']:
763        raise ValueError("For ops.div, rounding_mode value should be None, 'floor' or 'trunc'.")
764    if rounding_mode:
765        output = tensor_divmod_(input, other, rounding_mode)
766    else:
767        output = tensor_div_(input, other)
768    return output
769
770
771def true_divide(dividend, divisor):
772    r"""
773    Alias for :func:`mindspore.ops.div` with :math:`rounding\_mode=None`.
774
775    Supported Platforms:
776        ``Ascend`` ``GPU`` ``CPU``
777    """
778    return div(dividend, divisor)
779
780
781def divide(input, other, *, rounding_mode=None):
782    """
783    Alias for :func:`mindspore.ops.div` .
784
785    Supported Platforms:
786        ``Ascend`` ``GPU`` ``CPU``
787    """
788    return div(input, other, rounding_mode=rounding_mode)
789
790
791def float_power(input, exponent):
792    """
793    Computes `input` to the power of the exponent.
794    For the real number type, cast `input` and `exponent` to mindspore.float64 to calculate.
795    Currently, complex type calculation is not supported.
796
797    Args:
798        input (Union[Tensor, Number]): The first input is a tensor or a number.
799        exponent (Union[Tensor, Number]): The second input, if the first input is Tensor,
800            the second input can be Number or Tensor. Otherwise, it must be a Tensor.
801
802    Returns:
803        Tensor, the shape is the same as the one after broadcasting. For the complex type,
804        the return value type is the same as the input type. For the real number type,
805        the return value type is mindspore.float64.
806
807    Raises:
808        TypeError: If neither `input` nor `exponent` is a Tensor.
809        TypeError: If the data type of `input` or `exponent` is not in Tensor and Number.
810
811    Supported Platforms:
812        ``GPU`` ``CPU``
813
814    Examples:
815        >>> import numpy as np
816        >>> from mindspore import Tensor, ops
817        >>> input = Tensor(np.array([-1.5, 0., 2.]))
818        >>> output = ops.float_power(input, 2)
819        >>> print(output)
820        [2.25 0.   4.  ]
821    """
822    if not (isinstance(input, Tensor) or isinstance(exponent, Tensor)):
823        raise TypeError("At least one of the types of inputs must be tensor, " + \
824                        f"but the type of 'input' got is {type(input)}, " + \
825                        f"and the type of 'exponent' is {type(exponent)}.")
826    if not isinstance(input, (Tensor, numbers.Number)):
827        raise TypeError(f"The type of 'input' must be Tensor or Number, but got {type(input)}.")
828    if not isinstance(exponent, (Tensor, numbers.Number)):
829        raise TypeError(f"The type of 'exponent' must be Tensor or Number, but got {type(exponent)}.")
830
831    if (isinstance(input, Tensor) and is_complex(input)) or \
832            (isinstance(exponent, Tensor) and is_complex(exponent)) or \
833            isinstance(input, complex) or isinstance(exponent, complex):
834        input = cast_(input, mstype.complex128)
835        exponent = cast_(exponent, mstype.complex128)
836    else:
837        input = cast_(input, mstype.float64)
838        exponent = cast_(exponent, mstype.float64)
839    return pow(input, exponent)
840
841
842def floor_div(x, y):
843    """
844    Alias for :func:`mindspore.ops.floor_divide` .
845
846    Supported Platforms:
847        ``Ascend`` ``GPU`` ``CPU``
848    """
849    return tensor_floordiv(x, y)
850
851
852def fmod(input, other):
853    """
854    Computes the floating-point remainder of the division operation input/other.
855
856    .. math::
857
858        out = input - n * other
859
860    Where :math:`n` is :math:`input/other` with its fractional part truncated.
861    The returned value has the same sign as `input` and is less than `other` in magnitude.
862
863    Args:
864        input (Union[Tensor, Number]): the dividend.
865        other (Union[Tensor, Number]): the divisor.
866
867    Returns:
868        Tensor, the shape is the same as the one after broadcasting,
869        and the data type is the one with higher precision or higher digits among the two inputs.
870
871    Raises:
872        TypeError: If neither `input` nor `other` is a Tensor.
873
874    Supported Platforms:
875        ``Ascend`` ``GPU`` ``CPU``
876
877    Examples:
878        >>> import mindspore
879        >>> import numpy as np
880        >>> from mindspore import Tensor, ops
881        >>> input = Tensor(np.array([-4., -3.5, 0, 3.5, 4]), mindspore.float32)
882        >>> output = ops.fmod(input, 2.5)
883        >>> print(output)
884        [-1.5 -1.   0.   1.   1.5]
885    """
886    if not (isinstance(input, (Tensor, Tensor_)) or isinstance(other, (Tensor, Tensor_))):
887        raise TypeError("At least one of the types of inputs must be tensor, " + \
888                        f"but the type of 'input' got is {type(input)}, " + \
889                        f"and the type of 'other' is {type(other)}.")
890    return input - div(input, other, rounding_mode="trunc") * other
891
892
893def logdet(input):
894    r"""
895    Calculates log determinant of one or a batch of square matrices.
896
897    Args:
898        input (Tensor): Tensor of shape :math:`(*, n, n)` where :math:`*` means zero or more batch dimensions.
899
900    Returns:
901        Tensor, the log determinant of `input`. If the matrix determinant is smaller than 0, nan will be returned. If
902        the matrix determinant is 0, -inf will be returned.
903
904    Raises:
905        TypeError: If dtype of `input` is not float32, float64, Complex64 or Complex128.
906
907    Supported Platforms:
908        ``CPU``
909
910    Examples:
911        >>> import mindspore
912        >>> from mindspore import Tensor, ops
913        >>> a = Tensor([[[8, 9], [1, 2]], [[5, 6], [3, 4]]], mindspore.float32)
914        >>> output = ops.logdet(a)
915        >>> print(output)
916        [1.9459091 0.6931454]
917    """
918    det_x = det(input)
919    return log_(det_x)
920
921
922def i0(input):
923    r"""
924    Alias for :func:`mindspore.ops.bessel_i0` .
925
926    Supported Platforms:
927        ``GPU`` ``CPU``
928    """
929    return bessel_i0(input)
930
931
932def inplace_update(x, v, indices):
933    """
934    Updates specified values in `x` to `v` according to `indices`.
935
936    .. warning::
937        This is an experimental API that is subject to change or deletion.
938
939    Note:
940        `indices` can only be indexed along the highest dimension.
941
942    Args:
943        x (Tensor): A tensor which to be inplace updated. It can be one of the following data types:
944            float32, float16 and int32.
945        v (Tensor): A tensor with the same type as `x` and the same dimension size as `x` except
946            the first dimension, which must be the same as the size of `indices`.
947        indices (Union[int, tuple[int], Tensor]): Determines which rows of `x` to update with `v`,
948            should be several int. It is an int or tuple or tensor with one dimension,
949            whose value is in [-x.shape[0], x.shape[0]).
950            If it is a tuple or Tensor, the size of 'indices' should be the same as the first dimension of 'v'.
951
952    Returns:
953        Tensor, with the same type and shape as the input `x`.
954
955    Raises:
956        TypeError: If `indices` is neither int nor tuple nor Tensor.
957        TypeError: If `indices` is a tuple or Tensor, but its element is not an int.
958
959    Supported Platforms:
960        ``GPU`` ``CPU``
961
962    Examples:
963        >>> import numpy as np
964        >>> import mindspore
965        >>> from mindspore import Tensor, ops
966        >>> indices = (0, 1)
967        >>> x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
968        >>> v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
969        >>> output = ops.inplace_update(x, v, indices)
970        >>> print(output)
971        [[0.5 1. ]
972         [1.  1.5]
973         [5.  6. ]]
974    """
975    inplace_update_inner = InplaceUpdateV2()
976    return inplace_update_inner(x, indices, v)
977
978
979def inplace_add(x, v, indices):
980    """
981    Adds `v` into specified rows of `x`. Computes `y` = `x`; y[i,] += `v`.
982
983    Note:
984            `indices` refers to the left-most dimension.
985
986    Args:
987        x (Tensor): The tensor to be added. It has shape :math:`(N,*)` where :math:`*` means
988            any number of additional dimensions.
989        v (Tensor):  The value tensor add to `x`. It has the same dimension sizes as `x` except
990          the first dimension, whose size must be the same as `indices`. It has the same data type with `x`.
991        indices (Union[int, tuple]): Indices into the left-most dimension of `x`, and determines which rows of `x`
992            to add with `v`. It is an integer or a tuple, whose value is in [0, the first dimension size of `x`).
993
994    Returns:
995        Tensor, has the same shape and dtype as `x`.
996
997    Raises:
998        TypeError: If `indices` is neither int nor tuple.
999        TypeError: If `indices` is a tuple whose elements are not all int.
1000        ValueError: If the rank of `x` is not equal to the rank of `v`.
1001        ValueError: If the length of `indices` is not equal to `v.shape[0]`.
1002        ValueError: If the values of `indices` are not in range of `[0, x.shape[0])`.
1003
1004    Supported Platforms:
1005        ``Ascend`` ``GPU`` ``CPU``
1006
1007    Examples:
1008        >>> import numpy as np
1009        >>> import mindspore
1010        >>> from mindspore import Tensor, ops
1011        >>> indices = (0, 1)
1012        >>> x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
1013        >>> input_v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
1014        >>> output = ops.inplace_add(x, input_v, indices)
1015        >>> print(output)
1016        [[1.5 3. ]
1017         [4.  5.5]
1018         [5.  6. ]]
1019    """
1020    inplace_add_inner = _get_cache_prim(P.InplaceAdd)(indices)
1021    return inplace_add_inner(x, v)
1022
1023
1024def inplace_index_add(var, indices, updates, axis):  # pylint: disable=redefined-outer-name
1025    """
1026    Adds Tensor `updates` to specified axis and indices of Tensor `var` element-wise.
1027
1028    Args:
1029        var (Parameter): The input Parameter to add to, with data type uint8, int8, int16, int32,
1030            float16, float32, float64.
1031        indices (Tensor): The indies along `axis` to perform the addition. A 1D Tensor
1032            of shape :math:`(updates.shape[axis],)`, every value of it
1033            should be in range :math:`[0, var.shape[axis])` with data type int32.
1034        updates (Tensor): The input Tensor with the value to add. Must have same data type as `var`.
1035            The shape must be the same as `var` except the `axis` th dimension.
1036        axis (int): The dimension along which to index. It should be in range :math:`[0, len(var.dim))`.
1037
1038    Returns:
1039        Tensor, updated result, has the same shape and dtype as `var`.
1040
1041    Raises:
1042        TypeError: If `var` is not a Parameter.
1043        TypeError: If neither `indices` nor `updates` is a Tensor.
1044        ValueError: If `axis` is out of valid range.
1045        ValueError: If `var` rank is not the same as `updates` rank.
1046        ValueError: If shape of `indices` is not :math:`(updates.shape[axis],)`.
1047        ValueError: If `updates`'s shape is not the same as `var` except the `axis` th dimension.
1048
1049    Supported Platforms:
1050        ``Ascend`` ``CPU``
1051
1052    Examples:
1053        >>> import mindspore
1054        >>> import numpy as np
1055        >>> from mindspore import Tensor, ops, Parameter
1056        >>> var = Parameter(Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32))
1057        >>> indices = Tensor(np.array([0, 1]), mindspore.int32)
1058        >>> updates = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
1059        >>> var = ops.inplace_index_add(var, indices, updates, axis=0)
1060        >>> print(var)
1061        [[1.5 3. ]
1062         [4.  5.5]
1063         [5.  6. ]]
1064    """
1065
1066    inplace_index_add_ = InplaceIndexAdd(axis)
1067    return inplace_index_add_(var, indices, updates)
1068
1069
1070def inplace_sub(x, v, indices):
1071    r"""
1072    Subtracts `v` into specified rows of `x`. Computes :math:`y = x`; :math:`y[i,] -= input\_v`.
1073
1074    Note:
1075        `indices` refers to the left-most dimension.
1076
1077    Args:
1078        x (Tensor): TThe tensor to be subtracted. It has shape :math:`(N,*)` where :math:`*` means
1079            any number of additional dimensions.
1080        v (Tensor): The value tensor subtract from `x`. It has the same dimension sizes as `x` except
1081            the first dimension, whose size must be the same as `indices`. It has the same data type with `x`.
1082        indices (Union[int, tuple]): Indices into the left-most dimension of `x`, and determines which rows of `x`
1083            to subtract with `v`. It is an int or tuple, whose value is in [0, the first dimension size of `x`).
1084
1085    Returns:
1086        Tensor, has the same shape and dtype as `x`.
1087
1088    Raises:
1089        TypeError: If `indices` is neither int nor tuple.
1090        TypeError: If `indices` is a tuple whose elements are not all int.
1091        ValueError: If the rank of `x` is not equal to the rank of `v`.
1092        ValueError: If the length of `indices` is not equal to `v.shape[0]`.
1093        ValueError: If the values of `indices` are not in range of `[0, x.shape[0])`.
1094
1095    Supported Platforms:
1096        ``Ascend`` ``GPU`` ``CPU``
1097
1098    Examples:
1099        >>> import numpy as np
1100        >>> import mindspore
1101        >>> from mindspore import Tensor, ops
1102        >>> indices = (0, 1)
1103        >>> x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
1104        >>> input_v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
1105        >>> output = ops.inplace_sub(x, input_v, indices)
1106        >>> print(output)
1107        [[0.5 1. ]
1108         [2.  2.5]
1109         [5.  6. ]]
1110    """
1111    inplace_sub_inner = _get_cache_prim(P.InplaceSub)(indices)
1112    return inplace_sub_inner(x, v)
1113
1114
1115def logical_not(input):
1116    """
1117    Computes the "logical NOT" of a tensor element-wise.
1118
1119    .. math::
1120
1121        out_{i} = \\neg input_{i}
1122
1123    Args:
1124        input (Tensor): The input tensor.
1125
1126    Returns:
1127        Tensor, the shape is the same as the `input`, and the dtype is bool.
1128
1129    Raises:
1130        TypeError: If `input` is not a Tensor.
1131
1132    Supported Platforms:
1133        ``Ascend`` ``GPU`` ``CPU``
1134
1135    Examples:
1136        >>> import mindspore
1137        >>> import numpy as np
1138        >>> from mindspore import Tensor, ops
1139        >>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
1140        >>> output = ops.logical_not(x)
1141        >>> print(output)
1142        [False  True False]
1143    """
1144    return logical_not_(input)
1145
1146
1147def logical_or(input, other):
1148    """
1149    Computes the "logical OR" of two tensors element-wise.
1150
1151    Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
1152    The inputs must be two tensors or one tensor and one bool.
1153
1154    When the inputs are two tensors, the shapes of them could be broadcast.
1155
1156    When the inputs are one tensor and one bool, the bool object could only be a constant.
1157
1158    .. math::
1159
1160        out_{i} = input_{i} \\vee other_{i}
1161
1162    Note:
1163        logical_or supports broadcasting.
1164
1165    Args:
1166        input (Union[Tensor, bool]): The first input is a bool or a tensor whose data type can be implicitly
1167            converted to bool.
1168        other (Union[Tensor, bool]): The second input is a bool when the first input is a tensor or
1169            a tensor whose data type can be implicitly converted to bool.
1170
1171    Returns:
1172        Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
1173
1174    Supported Platforms:
1175        ``Ascend`` ``GPU`` ``CPU``
1176
1177    Examples:
1178        >>> import mindspore
1179        >>> import numpy as np
1180        >>> from mindspore import Tensor, ops
1181        >>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
1182        >>> y = Tensor(np.array([True, True, False]), mindspore.bool_)
1183        >>> output = ops.logical_or(x, y)
1184        >>> print(output)
1185        [ True  True  True]
1186        >>> x = Tensor(1, mindspore.bool_)
1187        >>> y = Tensor(0, mindspore.bool_)
1188        >>> output = ops.logical_or(x, y)
1189        >>> print(output)
1190        True
1191        >>> x = True
1192        >>> y = Tensor(0, mindspore.bool_)
1193        >>> output = ops.logical_or(x, y)
1194        >>> print(output)
1195        True
1196        >>> x = True
1197        >>> y = Tensor(np.array([True, False]), mindspore.bool_)
1198        >>> output = ops.logical_or(x, y)
1199        >>> print(output)
1200        [True True]
1201    """
1202    return logical_or_(input, other)
1203
1204
1205def logical_and(input, other):
1206    r"""
1207    Computes the "logical AND" of two tensors element-wise.
1208
1209    Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
1210    The inputs must be two tensors or one tensor and one bool.
1211
1212    When the inputs are two tensors, the shapes of them could be broadcast.
1213
1214    When the inputs are one tensor and one bool, the bool object could only be a constant.
1215
1216    .. math::
1217
1218        out_{i} = input_{i} \wedge other_{i}
1219
1220    Note:
1221        logical_and supports broadcasting.
1222
1223    Args:
1224        input (Union[Tensor, bool]): The first input is a bool or a tensor whose data type can be implicitly
1225            converted to bool.
1226        other (Union[Tensor, bool]): The second input is a bool when the first input is a tensor or
1227            a tensor whose data type can be implicitly converted to bool.
1228
1229    Returns:
1230        Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
1231
1232    Raises:
1233        TypeError: If neither `input` nor `other` is a Tensor.
1234
1235    Supported Platforms:
1236        ``Ascend`` ``GPU`` ``CPU``
1237
1238    Examples:
1239        >>> import mindspore
1240        >>> import numpy as np
1241        >>> from mindspore import Tensor, ops
1242        >>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
1243        >>> y = Tensor(np.array([True, True, False]), mindspore.bool_)
1244        >>> output = ops.logical_and(x, y)
1245        >>> print(output)
1246        [ True False False]
1247        >>> x = Tensor(1, mindspore.bool_)
1248        >>> y = Tensor(0, mindspore.bool_)
1249        >>> output = ops.logical_and(x, y)
1250        >>> print(output)
1251        False
1252        >>> x = True
1253        >>> y = Tensor(0, mindspore.bool_)
1254        >>> output = ops.logical_and(x, y)
1255        >>> print(output)
1256        False
1257        >>> x = True
1258        >>> y = Tensor(np.array([True, False]), mindspore.bool_)
1259        >>> output = ops.logical_and(x, y)
1260        >>> print(output)
1261        [True False]
1262    """
1263    return logical_and_(input, other)
1264
1265
1266def signbit(input):
1267    r"""
1268    Determine the symbol of each element. If the element value is less than 0,
1269    the corresponding output position is True; otherwise, it is False.
1270
1271    Args:
1272        input (Tensor): The input value.
1273
1274    Returns:
1275        Tensor, the signbit of input.
1276
1277    Raises:
1278        TypeError: If input is not a Tensor.
1279
1280    Supported Platforms:
1281        ``Ascend`` ``GPU`` ``CPU``
1282
1283    Examples:
1284        >>> import mindspore as ms
1285        >>> from mindspore import ops
1286        >>> input = ms.Tensor([0.3, 1.2, 0., -2.5])
1287        >>> output = ops.signbit(input)
1288        >>> print(output)
1289        [False False False  True]
1290    """
1291    if not isinstance(input, Tensor):
1292        raise TypeError(f"For signbit, the input must be a Tensor, but got {type(input)}")
1293    res = ops.less(input, 0)
1294    return res
1295
1296
1297def sgn(input):
1298    r"""
1299    Extension of :func:`mindspore.ops.sign` in complex domain.
1300    For real number input, this function is the same as :func:`mindspore.ops.sign`.
1301    For complex input, this function is calculated according to the following formula.
1302
1303    .. math::
1304        \text{out}_{i} = \begin{cases}
1305                        0 & |\text{input}_i| == 0 \\
1306                        \frac{{\text{input}_i}}{|{\text{input}_i}|} & \text{otherwise}
1307                        \end{cases}
1308
1309    Args:
1310        input (Tensor): The input value.
1311
1312    Returns:
1313        Tensor, the sgn of input.
1314
1315    Raises:
1316        TypeError: If input is not a Tensor.
1317
1318    Supported Platforms:
1319        ``Ascend`` ``GPU`` ``CPU``
1320
1321    Examples:
1322        >>> import mindspore as ms
1323        >>> from mindspore import ops
1324        >>> input = ms.Tensor([[3 + 4j, 7 - 24j, 0, 6 + 8j, 8], [15 + 20j, 7 - 24j, 0, 3 + 4j, 20]], dtype=ms.complex64)
1325        >>> output = ops.sgn(input)
1326        >>> print(output)
1327        [[0.6 +0.8j  0.28-0.96j 0.  +0.j   0.6 +0.8j  1.  +0.j  ]
1328         [0.6 +0.8j  0.28-0.96j 0.  +0.j   0.6 +0.8j  1.  +0.j  ]]
1329    """
1330    if not isinstance(input, Tensor):
1331        raise TypeError(f"For sgn, the input must be a Tensor, but got {type(input)}")
1332    if not ops.is_complex(input):
1333        return ops.sign(input)
1334    modulus = ops.ComplexAbs()(input)
1335    zeros_mask = modulus.equal(0)
1336    non_zero_modulus = ops.masked_fill(modulus, zeros_mask, ops.cast(1, modulus.dtype))
1337    zeros_modulus = ops.zeros_like(non_zero_modulus)
1338    complex_modulus = ops.Complex()(non_zero_modulus, zeros_modulus)
1339    res = tensor_div(input, complex_modulus)
1340    return res
1341
1342
1343def cosine_similarity(x1, x2, dim=1, eps=1e-08):
1344    r"""
1345    Calculate cosine similarity between `x1` and `x2` along the axis, `dim`.
1346
1347    .. math::
1348        \text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2 \cdot \Vert x_2 \Vert _2, \epsilon)}
1349
1350    Note:
1351        Currently, broadcast of input is not supported.
1352
1353    Args:
1354        x1 (Tensor): The first input Tensor.
1355        x2 (Tensor): The second input Tensor.
1356        dim (int, optional): Axis for calculating cosine similarity. Default: ``1`` .
1357        eps (float, optional): Minimal value to avoid division by zero. Default: ``1e-08`` .
1358
1359    Returns:
1360        Tensor, cosine similarity between x1 and x2.
1361
1362    Raises:
1363        TypeError: If the dtype of x1 or x2 is neither float16 nor float32.
1364
1365    Supported Platforms:
1366        ``Ascend`` ``GPU`` ``CPU``
1367
1368    Examples:
1369        >>> import mindspore as ms
1370        >>> from mindspore import ops
1371        >>> x1 = ms.Tensor([[-0.0256, 0.0127, -0.2475, 0.2316, 0.8037],
1372        ...                 [0.5809, -1.2712, -0.7038, -0.2558, 0.7494]], dtype=ms.float32)
1373        >>> x2 = ms.Tensor([[-0.6115, -0.1965, -0.8484, 0.2389, 0.2409],
1374        ...                 [1.8940, -2.1997, 0.1915, 0.0856, 0.7542]], dtype=ms.float32)
1375        >>> output = ops.cosine_similarity(x1, x2)
1376        >>> print(output)
1377        [0.4843164  0.81647635]
1378    """
1379    molecule = ops.sum(x1 * x2, dim=dim)
1380    denominator = (ops.norm(x1, dim=dim, ord=2) * ops.norm(x2, dim=dim, ord=2)).clip(min=eps)
1381    output = molecule / denominator
1382    return output
1383
1384
1385def _check_cov_weights(weights, weights_name, num_observations, valid_type, valid_type_name):
1386    """check cov weights valid"""
1387    if weights.ndim > 1:
1388        raise ValueError(
1389            f"For cov, the {weights_name} must have one or fewer dimensions, but got {weights.ndim} dimensions.")
1390    if weights.dtype not in valid_type:
1391        raise TypeError(
1392            f"For cov, the dtype of {weights_name} must be {valid_type_name} type, but got type {weights.dtype}")
1393    if ops.numel(weights) != num_observations:
1394        raise ValueError(
1395            f"For cov, the numel of {weights_name} must equal the number of columns of input, "
1396            f"but got numel:{ops.numel(weights)}, number of columns of input:{num_observations}.")
1397    return 0
1398
1399
1400def _get_default_div_type(param):
1401    """get the default type when div"""
1402    if param.dtype == mstype.float64:
1403        return param
1404    return param.astype(mstype.float32)
1405
1406
1407def cov(input, *, correction=1, fweights=None, aweights=None):
1408    r"""
1409    Given the input and weights, returns the covariance matrix (the square matrix of the covariance of each pair of
1410    variables) of input, where the input row is the variable and the column is the observation value.
1411
1412    The diagonal contains each variable and its own covariance. If input is a scalar or 1D vector of a single variable,
1413    its variance will be returned.
1414
1415    The unbiased sample covariance of the variables :math:`a` and :math:`b` is given by the following formula:
1416
1417    .. math::
1418        \text{cov}_w(a,b) = \frac{\sum^{N}_{i = 1}(a_{i} - \bar{a})(b_{i} - \bar{b})}{N~-~1}
1419
1420    where :math:`\bar{a}` and :math:`\bar{b}` are the simple means of the :math:`a` and :math:`b` respectively.
1421
1422    If `fweights` and/or `aweights` are provided, the unbiased weighted covariance
1423    is calculated, which is given by:
1424
1425    .. math::
1426        \text{cov}_w(a,b) = \frac{\sum^{N}_{i = 1}w_i(a_{i} - \mu_a^*)(b_{i} - \mu_b^*)}{\sum^{N}_{i = 1}w_i~-~1}
1427
1428    where :math:`w` denotes `fweights` or `aweights` based on whichever is provided, or
1429    :math:`w = fweights \times aweights` if both are provided, and
1430    :math:`\mu_x^* = \frac{\sum^{N}_{i = 1}w_ix_{i} }{\sum^{N}_{i = 1}w_i}` is the weighted mean of the variable.
1431
1432    .. warning::
1433        The values of `fweights` and `aweights` cannot be negative, and the negative weight scene result is undefined.
1434
1435    .. note::
1436        Currently, complex number is not supported.
1437
1438    Args:
1439        input (Tensor): A 2D matrix, or a scalar or 1D vector of a single variable
1440
1441    Keyword Args:
1442        correction (int, optional): The difference between sample size and sample degrees of freedom.
1443            Defaults to Bessel's correction, `correction = 1` which returns the unbiased estimate,
1444            even if both `fweights` and `aweights` are specified. `correction = 0`
1445            will return the simple average. Default: ``1`` .
1446        fweights (Tensor, optional): Scalar or one-dimensional Tensor containing integer frequency weight, indicating
1447            the number of repetition of each observation vector. Its numel must equal the number of columns of `input`.
1448            Ignored if `None`. Default: ``None`` .
1449        aweights (Tensor, optional): A scalar or 1D Tensor containing float observation weights represents
1450            the importance of each observation vector. The higher the importance, the greater the corresponding value.
1451            Its numel must equal the number of columns of `input`. Must have floating point dtype. Ignored if `None`.
1452            Default: ``None`` .
1453
1454    Returns:
1455        Tensor, the covariance matrix Tensor of `input`.
1456
1457    Raises:
1458        ValueError: If the dimensions of input is greater than 2.
1459        ValueError: If the dimensions of fweights is greater than 1.
1460        ValueError: If the numel of fweights not equal the number of columns of input.
1461        ValueError: If the numel of aweights not equal the number of columns of input.
1462        ValueError: If the dimensions of aweights is greater than 1.
1463        TypeError: If the dtype of input is bool.
1464        TypeError: If the dtype of fweights is not an integer type.
1465        TypeError: If the dtype of aweights is not a floating point type.
1466
1467    Supported Platforms:
1468        ``Ascend`` ``GPU`` ``CPU``
1469
1470    Examples:
1471        >>> import mindspore as ms
1472        >>> from mindspore import ops
1473        >>> x = ms.Tensor([[0., 3.], [5., 5.], [7., 0.]]).T
1474        >>> print(x)
1475        [[0. 5. 7.]
1476         [3. 5. 0.]]
1477        >>> print(ops.cov(x))
1478        [[13.        -3.5      ]
1479         [-3.5        6.3333335]]
1480        >>> print(ops.cov(x, correction=0))
1481        [[ 8.666667  -2.3333333]
1482         [-2.3333333  4.2222223]]
1483        >>> fw = ms.Tensor([5, 2, 4], dtype=ms.int64)
1484        >>> aw = ms.Tensor([0.4588, 0.9083, 0.7616], ms.float32)
1485        >>> print(ops.cov(x, fweights=fw, aweights=aw))
1486        [[10.146146 -3.47241 ]
1487         [-3.47241   4.716825]]
1488    """
1489    if input.ndim > 2:
1490        raise ValueError(f"For cov, the input must have two or fewer dimensions, but got {input.ndim} dimensions.")
1491    if input.dtype == mstype.bool_:
1492        raise TypeError(f"For cov, the input dtype can not be bool.")
1493
1494    # View input tensor as 2D
1495    input_x = input.view((1, -1)) if input.ndim < 2 else input
1496    num_observations = input_x.shape[1]
1497    if fweights is not None:
1498        _check_cov_weights(fweights, "fweights", num_observations, mstype.int_type, "an integer")
1499
1500    if aweights is not None:
1501        _check_cov_weights(aweights, "aweights", num_observations, mstype.float_type, "a floating point")
1502
1503    if fweights is not None and aweights is None:
1504        w = fweights
1505    elif fweights is None and aweights is not None:
1506        w = aweights
1507    elif fweights is not None and aweights is not None:
1508        w = fweights * aweights
1509    else:
1510        w = None
1511
1512    if w is not None:
1513        w_sum = w.sum()
1514        avg = (input_x * w).sum(1) / _get_default_div_type(w_sum)
1515    else:
1516        w_sum = ops.cast(num_observations, mstype.int64)
1517        avg = input_x.sum(1) / _get_default_div_type(w_sum)
1518
1519    if w is not None and aweights is not None and correction != 0:
1520        norm_factor = w_sum - correction * (w * aweights).sum() / w_sum
1521    else:
1522        norm_factor = w_sum - correction
1523
1524    norm_factor = norm_factor.clip(min=0)
1525
1526    input_x = input_x - avg.unsqueeze(1)
1527    c = ops.mm(input_x, (input_x * w if w is not None else input_x).T)
1528    norm_factor = norm_factor.astype(mstype.float32)
1529    return ops.true_divide(c, _get_default_div_type(norm_factor)).squeeze()
1530
1531
1532def t(input):
1533    r"""
1534    Transposes a 2-D Tensor. 1-D Tensor are returned as it is.
1535
1536    Args:
1537        input (Tensor): The input Tensor.
1538
1539    Returns:
1540        Tensor, the transpose of `input` .
1541
1542    Supported Platforms:
1543        ``Ascend`` ``GPU`` ``CPU``
1544
1545    Examples:
1546        >>> import mindspore
1547        >>> from mindspore import Tensor, ops
1548        >>> from mindspore import dtype as mstype
1549        >>> x = Tensor([[1, 2, 3], [2, 3, 4]], mstype.float32)
1550        >>> output = ops.t(x)
1551        >>> print(output)
1552        [[1. 2.]
1553         [2. 3.]
1554         [3. 4.]]
1555    """
1556    if input.ndim == 2:
1557        return transpose_(input, (1, 0))
1558    return input
1559
1560
1561def tan(input):
1562    r"""
1563    Computes tangent of `input` element-wise.
1564
1565    .. math::
1566
1567        out_i = \tan(input_i)
1568
1569    Args:
1570        input (Tensor): The input Tensor, valid for any dimensions.
1571
1572    Returns:
1573        Tensor, has the same shape as `input`.
1574
1575    Raises:
1576        TypeError: If `input` is not a Tensor.
1577
1578    Supported Platforms:
1579        ``Ascend`` ``GPU`` ``CPU``
1580
1581    Examples:
1582        >>> import mindspore
1583        >>> import numpy as np
1584        >>> from mindspore import Tensor, ops
1585        >>> input = Tensor(np.array([-1.0, 0.0, 1.0]), mindspore.float32)
1586        >>> output = ops.tan(input)
1587        >>> print(output)
1588        [-1.5574081 0. 1.5574081]
1589    """
1590    return tan_(input)
1591
1592
1593def xlogy(input, other):
1594    r"""
1595    Computes the first input tensor multiplied by the logarithm of second input tensor element-wise.
1596    Returns zero when `input` is zero.
1597
1598    .. math::
1599
1600        out_i = input_{i}\ln{other_{i}}
1601
1602    Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
1603    The inputs must be two tensors or one tensor and one scalar.
1604    When the inputs are two tensors, the shapes of them could be broadcast.
1605    When the inputs are one tensor and one scalar,
1606    the scalar could only be a constant.
1607
1608    .. warning::
1609        - On Ascend, the data type of `input` and `other` must be float16 or float32.
1610
1611    Args:
1612        input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
1613            a bool or a tensor whose data type is
1614            `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
1615            `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
1616        other (Union[Tensor, number.Number, bool]): The second input is a number.Number or
1617            a bool when the first input is a tensor or a tensor whose data type is number or bool\_.
1618            When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
1619
1620    Returns:
1621        Tensor, the shape is the same as the one after broadcasting,
1622        and the data type is the one with higher precision or higher digits among the two inputs.
1623
1624    Raises:
1625        TypeError: If `input` and `other` is not a number.Number or a bool or a Tensor.
1626        TypeError: If dtype of `input` and `other` is not in [float16, float32, float64, complex64, complex128].
1627        ValueError: If `input` could not be broadcast to a tensor with shape of `other`.
1628
1629    Supported Platforms:
1630        ``Ascend`` ``GPU`` ``CPU``
1631
1632    Examples:
1633        >>> import mindspore
1634        >>> import numpy as np
1635        >>> from mindspore import Tensor, ops
1636        >>> input = Tensor(np.array([-5, 0, 4]), mindspore.float32)
1637        >>> other = Tensor(np.array([2, 2, 2]), mindspore.float32)
1638        >>> output = ops.xlogy(input, other)
1639        >>> print(output)
1640        [-3.465736   0.        2.7725887]
1641    """
1642    if isinstance(input, Tensor) and isinstance(other, Tensor) and input.dtype == mstype.bool_ \
1643            and other.dtype == mstype.bool_:
1644        input = input.astype(mstype.float32)
1645        other = other.astype(mstype.float32)
1646    return xlogy_(input, other)
1647
1648
1649def arccosh(input):
1650    r"""
1651    Alias for :func:`mindspore.ops.acosh`.
1652
1653    Supported Platforms:
1654        ``Ascend`` ``GPU`` ``CPU``
1655
1656    Examples:
1657        >>> import mindspore
1658        >>> from mindspore import Tensor, ops
1659        >>> import numpy as np
1660        >>> x = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32)
1661        >>> output = ops.arccosh(x)
1662        >>> print(output)
1663        [0.        0.9624237 1.7627472 5.298292 ]
1664    """
1665    return acosh_(input)
1666
1667
1668def arcsin(x):
1669    r"""
1670    Alias for :func:`mindspore.ops.asin`.
1671
1672    Supported Platforms:
1673        ``Ascend`` ``GPU`` ``CPU``
1674    """
1675    return asin_(x)
1676
1677
1678def arctan(input):
1679    r"""
1680    Alias for :func:`mindspore.ops.atan`.
1681
1682    Supported Platforms:
1683        ``Ascend`` ``GPU`` ``CPU``
1684
1685    Examples:
1686        >>> import mindspore
1687        >>> from mindspore import Tensor, ops
1688        >>> import numpy as np
1689        >>> x = Tensor(np.array([1.0, 0.0]), mindspore.float32)
1690        >>> output = ops.arctan(x)
1691        >>> print(output)
1692        [0.7853982 0.       ]
1693    """
1694    return atan_(input)
1695
1696
1697def arctan2(input, other):
1698    r"""
1699    Alias for :func:`mindspore.ops.atan2`.
1700
1701    Supported Platforms:
1702        ``Ascend`` ``GPU`` ``CPU``
1703
1704    Examples:
1705        >>> import mindspore
1706        >>> from mindspore import Tensor, ops
1707        >>> import numpy as np
1708        >>> x = Tensor(np.array([0, 1]), mindspore.float32)
1709        >>> y = Tensor(np.array([1, 1]), mindspore.float32)
1710        >>> output = ops.arctan2(x, y)
1711        >>> print(output)
1712        [0.        0.7853982]
1713    """
1714    return atan2_(input, other)
1715
1716
1717def arctan2_ext(input, other):
1718    r"""
1719    Alias for :func:`mindspore.ops.atan2_ext`.
1720
1721    Supported Platforms:
1722        ``Ascend``
1723
1724    Examples:
1725        >>> import mindspore
1726        >>> from mindspore import Tensor, ops
1727        >>> import numpy as np
1728        >>> x = Tensor(np.array([0, 1]), mindspore.float32)
1729        >>> y = Tensor(np.array([1, 1]), mindspore.float32)
1730        >>> output = ops.arctan2_ext(x, y)
1731        >>> print(output)
1732        [0.        0.7853982]
1733    """
1734    return atan2_ext(input, other)
1735
1736
1737def polar(abs, angle):  # pylint: disable=redefined-outer-name
1738    r"""
1739    Converts polar coordinates to Cartesian coordinates.
1740
1741    Returns a complex tensor, its elements are Cartesian coordinates constructed with the polar
1742    coordinates which is specified by radial distance `abs` and polar angle `angle`.
1743
1744    .. math::
1745
1746        y_{i} =  abs_{i} * \cos(angle_{i}) + abs_{i} * \sin(angle_{i}) * j
1747
1748    Args:
1749        abs (Tensor): Radial distance. The shape of tensor is
1750          :math:`(N,*)` where :math:`N` means the batchsize of the input tensor,
1751          :math:`*` means, any number of additional dimensions.
1752          Must be one of the following types: float32, float64.
1753        angle (Tensor):  Polar angle. It has the same shape and dtype as `abs`.
1754
1755    Returns:
1756        Tensor, has the same shape as `abs`.
1757
1758        - If the inputs are float32, data type must be complex64.
1759        - If the inputs are float64, data type must be complex128.
1760
1761    Raises:
1762        TypeError: If neither `abs` nor `angle` is a Tensor.
1763        TypeError: If the dtype of input is not one of: float32, float64.
1764        TypeError: If the dtypes of `abs` and `angle` are not the same.
1765        ValueError: If `abs`'s shape is not the same as `angle`.
1766
1767    Supported Platforms:
1768        ``GPU`` ``CPU``
1769
1770    Examples:
1771        >>> import mindspore
1772        >>> import numpy as np
1773        >>> from mindspore import Tensor, ops
1774        >>> abs = Tensor(np.array([1, 2]), mindspore.float64)
1775        >>> angle = Tensor(np.array([np.pi / 2, 5 * np.pi / 4]), mindspore.float64)
1776        >>> output = ops.polar(abs, angle)
1777        >>> print(output)
1778        [ 6.12323400e-17+1.j         -1.41421356e+00-1.41421356j]
1779    """
1780    return polar_(abs, angle)
1781
1782
1783def arccos(input):
1784    """
1785    Alias for :func:`mindspore.ops.acos` .
1786
1787    Supported Platforms:
1788        ``Ascend`` ``GPU`` ``CPU``
1789    """
1790    return acos(input)
1791
1792
1793def arcsinh(input):
1794    r"""
1795    Alias for :func:`mindspore.ops.asinh`.
1796
1797    Supported Platforms:
1798        ``Ascend`` ``GPU`` ``CPU``
1799    """
1800    return asinh(input)
1801
1802
1803def arctanh(input):
1804    r"""
1805    Alias for :func:`mindspore.ops.atanh`.
1806
1807    Supported Platforms:
1808        ``Ascend`` ``GPU`` ``CPU``
1809    """
1810    return atanh(input)
1811
1812
1813def bitwise_and(input, other):
1814    r"""
1815    Returns bitwise `and` of two tensors element-wise.
1816
1817    .. math::
1818
1819        out_i = input_{i} \wedge other_{i}
1820
1821    Args of `input` and `other` comply with the implicit type conversion rules to
1822    make the data types consistent.
1823    If they have different data types, the lower priority data type will be converted to
1824    the relatively highest priority data type.
1825
1826    Args:
1827        input (Tensor): The first input tensor with shape :math:`(N, *)` where :math:`*` means
1828            any number of additional dimensions.
1829        other (Tensor): The second input tensor with the same dtype as `input`.
1830
1831    Returns:
1832        Tensor, has the same type as the `input`.
1833
1834    Raises:
1835        TypeError: If `input` or `other` is not a Tensor.
1836
1837    Supported Platforms:
1838        ``Ascend`` ``GPU`` ``CPU``
1839
1840    Examples:
1841        >>> import mindspore
1842        >>> import numpy as np
1843        >>> from mindspore import Tensor, ops
1844        >>> input = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
1845        >>> other = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
1846        >>> output = ops.bitwise_and(input, other)
1847        >>> print(output)
1848        [ 0  0  1 -1  1  0  1]
1849    """
1850    return bitwise_and_(input, other)
1851
1852
1853def bitwise_or(input, other):
1854    r"""
1855    Returns bitwise `or` of two tensors element-wise.
1856
1857    .. math::
1858
1859        out_i = input_{i} \mid other_{i}
1860
1861    Args of `input` and `other` comply with the implicit type conversion rules to
1862    make the data types consistent.
1863    If they have different data types, the lower priority data type will be converted to
1864    the relatively highest priority data type.
1865
1866    Args:
1867        input (Tensor): The first input tensor with shape :math:`(N, *)` where :math:`*` means
1868            any number of additional dimensions.
1869        other (Tensor): The second input tensor with the same dtype as `input`.
1870
1871    Returns:
1872        Tensor, has the same type as the `input`.
1873
1874    Raises:
1875        TypeError: If `input` or `other` is not a Tensor.
1876
1877    Supported Platforms:
1878        ``Ascend`` ``GPU`` ``CPU``
1879
1880    Examples:
1881        >>> import mindspore
1882        >>> import numpy as np
1883        >>> from mindspore import Tensor, ops
1884        >>> input = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
1885        >>> other = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
1886        >>> output = ops.bitwise_or(input, other)
1887        >>> print(output)
1888        [ 0  1  1 -1 -1  3  3]
1889    """
1890    return bitwise_or_(input, other)
1891
1892
1893def bitwise_xor(input, other):
1894    r"""
1895    Returns bitwise `xor` of two tensors element-wise.
1896
1897    .. math::
1898
1899        out_i = input_{i} \oplus other_{i}
1900
1901    Args of `input` and `other` comply with the implicit type conversion rules to
1902    make the data types consistent.
1903    If they have different data types, the lower priority data type will be converted to
1904    the relatively highest priority data type.
1905
1906    Args:
1907        input (Tensor): The first input tensor with shape :math:`(N, *)` where :math:`*` means
1908            any number of additional dimensions.
1909        other (Tensor): The second input tensor with the same dtype as `input`.
1910
1911    Returns:
1912        Tensor, has the same type as the `input`.
1913
1914    Raises:
1915        TypeError: If `input` or `other` is not a Tensor.
1916
1917    Supported Platforms:
1918        ``Ascend`` ``GPU`` ``CPU``
1919
1920    Examples:
1921        >>> import mindspore
1922        >>> import numpy as np
1923        >>> from mindspore import Tensor, ops
1924        >>> input = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
1925        >>> other = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
1926        >>> output = ops.bitwise_xor(input, other)
1927        >>> print(output)
1928        [ 0  1  0  0 -2  3  2]
1929    """
1930    return bitwise_xor_(input, other)
1931
1932
1933def bitwise_left_shift(input, other):
1934    r"""
1935    Perform a left bitwise shift operation on the `input` element-wise, where the number of bits to shift is
1936    specified by `other`.
1937
1938    .. math::
1939
1940        \begin{aligned}
1941        &out_{i} =input_{i} << other_{i}
1942        \end{aligned}
1943
1944    Args:
1945        input (Union[Tensor, int, bool]): The input to be left shifted.
1946        other (Union[Tensor, int, bool]): The number of bit to be applied on left arithmetic shift.
1947
1948    Returns:
1949        Tensor, the result after bitwise left shift.
1950
1951    Raises:
1952        TypeError: If neither `input` nor `other` is a tensor.
1953        TypeError: If either `input` or `other` is not a bool, int or a tensor of dtype: int or uint.
1954        TypeError: If `input` and `other` do not have the same dtype.
1955        ValueError: If `input` and `other` could not be broadcast.
1956
1957    Supported Platforms:
1958        ``Ascend`` ``GPU`` ``CPU``
1959
1960    Examples:
1961        >>> import mindspore
1962        >>> import numpy as np
1963        >>> from mindspore import Tensor, ops
1964        >>> input = Tensor(np.array([1024, 2]), mindspore.int16)
1965        >>> other = Tensor(np.array([2]), mindspore.int16)
1966        >>> output = ops.bitwise_left_shift(input, other)
1967        >>> print(output)
1968        [4096    8]
1969    """
1970    if not isinstance(input, Tensor) and not isinstance(other, Tensor):
1971        raise TypeError(f"For 'bitwise_left_shift', at least one of the inputs should be a Tensor.")
1972
1973    cast = ops.Cast()
1974    if isinstance(input, numbers.Number):
1975        if not isinstance(input, int):
1976            raise TypeError(f"For 'bitwise_left_shift', 'input' must be an integer, but got input:{type(input)}.")
1977        input = cast(input, other.dtype)
1978    elif isinstance(other, numbers.Number):
1979        if not isinstance(other, int):
1980            raise TypeError(f"For 'bitwise_left_shift', 'other' must be an integer, but got other:{type(other)}.")
1981        other = cast(other, input.dtype)
1982    ls = ops.LeftShift()
1983    return ls(input, other)
1984
1985
1986def bitwise_right_shift(input, other):
1987    r"""
1988    Perform a right bitwise shift operation on the `input` element-wise, where the number of bits to shift is
1989    specified by `other`.
1990
1991    .. math::
1992
1993        \begin{aligned}
1994        &out_{i} =input_{i} >> other_{i}
1995        \end{aligned}
1996
1997    Args:
1998        input (Union[Tensor, int, bool]): The input to be right shifted.
1999        other (Union[Tensor, int, bool]): The number of bit to be applied on right arithmetic shift.
2000
2001    Returns:
2002        Tensor, the result after bitwise right shift.
2003
2004    Raises:
2005        TypeError: If neither `input` nor `other` is a tensor.
2006        TypeError: If either `input` or `other` is not a bool, int or a tensor of dtype: int or uint.
2007        TypeError: If `input` and `other` do not have the same dtype.
2008        ValueError: If `input` and `other` could not be broadcast.
2009
2010    Supported Platforms:
2011        ``Ascend`` ``GPU`` ``CPU``
2012
2013    Examples:
2014        >>> import mindspore
2015        >>> import numpy as np
2016        >>> from mindspore import Tensor, ops
2017        >>> input = Tensor(np.array([1024, 2]), mindspore.int16)
2018        >>> other = Tensor(np.array([2]), mindspore.int16)
2019        >>> output = ops.bitwise_right_shift(input, other)
2020        >>> print(output)
2021        [256   0]
2022    """
2023    if not isinstance(input, Tensor) and not isinstance(other, Tensor):
2024        raise TypeError(f"For 'bitwise_left_shift', at least one of the inputs should be a Tensor.")
2025    cast = ops.Cast()
2026    if isinstance(input, numbers.Number):
2027        if not isinstance(input, int):
2028            raise TypeError(f"For 'bitwise_left_shift', 'input' must be an integer, but got input:{type(input)}.")
2029        input = cast(input, other.dtype)
2030    elif isinstance(other, numbers.Number):
2031        if not isinstance(other, int):
2032            raise TypeError(f"For 'bitwise_left_shift', 'other' must be an integer, but got other:{type(other)}.")
2033        other = cast(other, input.dtype)
2034    rs = ops.RightShift()
2035    return rs(input, other)
2036
2037
2038def inv(x):
2039    r"""
2040    Computes Reciprocal of input tensor element-wise.
2041
2042    .. math::
2043        out_i = \frac{1}{x_{i} }
2044
2045    Args:
2046        x (Tensor): Tensor of any dimension. Must be one of the following types: float16, float32 or int32.
2047
2048    Returns:
2049        Tensor, has the same type and shape as input shape value.
2050
2051    Raises:
2052        TypeError: If `x` is not a Tensor.
2053        TypeError: If dtype of `x` is not one of float16, float32, int32.
2054
2055    Supported Platforms:
2056        ``Ascend`` ``GPU`` ``CPU``
2057
2058    Examples:
2059        >>> import mindspore
2060        >>> import numpy as np
2061        >>> from mindspore import Tensor, ops
2062        >>> x = Tensor(np.array([0.25, 0.4, 0.31, 0.52]), mindspore.float32)
2063        >>> output = ops.inv(x)
2064        >>> print(output)
2065        [4.        2.5       3.2258065 1.923077 ]
2066    """
2067    return inv_(x)
2068
2069
2070def inverse(input):
2071    """
2072    Compute the inverse of the input matrix.
2073
2074    Args:
2075        input (Tensor): A matrix to be calculated. Input `input` must be at least two dimensions, and the size of
2076            the last two dimensions must be the same size. And the matrix must be invertible.
2077
2078    Returns:
2079        Tensor, has the same type and shape as input `input`.
2080
2081    Raises:
2082        TypeError: If `input` is not a Tensor.
2083        ValueError: If the size of the last two dimensions of `input` is not the same.
2084        ValueError: If the dimension of `input` is less than 2.
2085
2086    Supported Platforms:
2087        ``GPU`` ``CPU``
2088
2089    Examples:
2090        >>> from mindspore import Tensor, ops
2091        >>> from mindspore import dtype as mstype
2092        >>> x = Tensor([[1., 2.], [3., 4.]], mstype.float32)
2093        >>> print(ops.inverse(x))
2094        [[-2.   1. ]
2095         [ 1.5 -0.5]]
2096    """
2097    _check_is_tensor("input", input, "inverse")
2098    return matrix_inverse_(input)
2099
2100
2101def inverse_ext(input):
2102    """
2103    Compute the inverse of the input matrix.
2104
2105    Args:
2106        input (Tensor): A matrix to be calculated. Input `input` must be at least two dimensions, and the size of
2107            the last two dimensions must be the same size. And the matrix must be invertible.
2108
2109    Returns:
2110        Tensor, has the same type and shape as input `input`.
2111
2112    Raises:
2113        ValueError: If the size of the last two dimensions of `input` is not the same.
2114        ValueError: If `input` is not empty and its dimensions are less than 2.
2115        ValueError: If the dimensions of `input` are larger than 6.
2116
2117    Supported Platforms:
2118        ``Ascend``
2119
2120    Examples:
2121        >>> from mindspore import Tensor, ops
2122        >>> from mindspore import dtype as mstype
2123        >>> x = Tensor([[1., 2.], [3., 4.]], mstype.float32)
2124        >>> print(ops.inverse_ext(x))
2125        [[-2.   1. ]
2126         [ 1.5 -0.5]]
2127    """
2128    return matrix_inverse_ext(input)
2129
2130
2131def invert(x):
2132    r"""
2133    Flips all bits of input tensor element-wise.
2134
2135    .. math::
2136        out_i = \sim x_{i}
2137
2138    Args:
2139        x (Tensor): The input Tensor of shape :math:`(x_1, x_2, ..., x_R)`.
2140            The data type should be one of the following types: int16, uint16.
2141
2142    Returns:
2143        Tensor, has the same shape as `x`.
2144
2145    Raises:
2146        TypeError: If dtype of `x` is neither int16 nor uint16.
2147
2148    Supported Platforms:
2149        ``Ascend`` ``GPU`` ``CPU``
2150
2151    Examples:
2152        >>> import mindspore
2153        >>> import numpy as np
2154        >>> from mindspore import Tensor, ops
2155        >>> x = Tensor(np.array([25, 4, 13, 9]), mindspore.int16)
2156        >>> output = ops.invert(x)
2157        >>> print(output)
2158        [-26 -5 -14 -10]
2159    """
2160    return invert_(x)
2161
2162
2163def bessel_j0(x):
2164    r"""
2165    Computes Bessel function of the first kind, order 0 element-wise.
2166
2167    The formula is defined as:
2168
2169    .. math::
2170        \begin{array}{ll} \\
2171            J_{0}(x) = \frac{1}{\pi} \int_{0}^{\pi} \cos (x \sin \theta) d \theta
2172            =\sum_{m=0}^{\infty} \frac{(-1)^{m} x^{2 m}}{2^{2 m} (m !)^2}
2173        \end{array}
2174
2175    Args:
2176        x (Tensor): The input tensor. The data type must be float16, float32 or float64.
2177
2178    Returns:
2179        Tensor, has the same shape and dtype as the `x`.
2180
2181    Raises:
2182        TypeError: If `x` is not a Tensor.
2183        TypeError: If dtype of `x` is not float16, float32 or float64.
2184
2185    Supported Platforms:
2186        ``GPU`` ``CPU``
2187
2188    Examples:
2189        >>> import mindspore
2190        >>> import numpy as np
2191        >>> from mindspore import Tensor, ops
2192        >>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
2193        >>> output = ops.bessel_j0(x)
2194        >>> print(output)
2195        [0.93846981  0.76519769  0.22389078  -0.39714981]
2196    """
2197    return bessel_j0_(x)
2198
2199
2200def bessel_j1(x):
2201    r"""
2202    Computes Bessel function of the first kind, order 1 element-wise.
2203
2204    The formula is defined as:
2205
2206    .. math::
2207        \begin{array}{ll} \\
2208            J_{1}(x) = \frac{1}{\pi} \int_{0}^{\pi} \cos (x \sin \theta- \theta) d \theta
2209            =\sum_{m=0}^{\infty} \frac{(-1)^{m} x^{2 m+1}}{2^{2 m+1} m !(m+1) !}
2210        \end{array}
2211
2212    Args:
2213        x (Tensor): The input tensor. The data type must be float16, float32 or float64.
2214
2215    Returns:
2216        Tensor, has the same shape and dtype as the `x`.
2217
2218    Raises:
2219        TypeError: If `x` is not a Tensor.
2220        TypeError: If dtype of `x` is not float16, float32 or float64.
2221
2222    Supported Platforms:
2223        ``GPU`` ``CPU``
2224
2225    Examples:
2226        >>> import mindspore
2227        >>> import numpy as np
2228        >>> from mindspore import Tensor, ops
2229        >>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
2230        >>> output = ops.bessel_j1(x)
2231        >>> print(output)
2232        [0.24226846  0.44005059  0.57672481 -0.06604333]
2233    """
2234    return bessel_j1_(x)
2235
2236
2237def bessel_i0(x):
2238    r"""
2239    Computes modified Bessel function of the first kind, order 0 element-wise.
2240
2241    .. math::
2242        \begin{array}{ll} \\
2243            I_{0}(x)=J_{0}(\mathrm{i} x)=\sum_{m=0}^{\infty}
2244            \frac{x^{2 m}}{2^{2 m} (m !)^{2}}
2245        \end{array}
2246
2247    where :math:`J_{0}` is Bessel function of the first kind, order 0.
2248
2249    Args:
2250        x (Tensor): The input tensor. The data type must be float16, float32 or float64.
2251
2252    Returns:
2253        Tensor, has the same shape and dtype as the `x`.
2254
2255    Raises:
2256        TypeError: If `x` is not a Tensor.
2257        TypeError: If dtype of `x` is not float16, float32 or float64.
2258
2259    Supported Platforms:
2260        ``GPU`` ``CPU``
2261
2262    Examples:
2263        >>> import mindspore
2264        >>> import numpy as np
2265        >>> from mindspore import Tensor, ops
2266        >>> x = Tensor(np.array([-1, -0.5, 0.5, 1]), mindspore.float32)
2267        >>> output = ops.bessel_i0(x)
2268        >>> print(output)
2269        [1.266066  1.0634835 1.0634835 1.266066]
2270    """
2271    return bessel_i0_(x)
2272
2273
2274def bessel_i0e(x):
2275    r"""
2276    Computes exponential scaled modified Bessel function of the first kind, order 0 element-wise.
2277
2278    The formula is defined as:
2279
2280    .. math::
2281        \begin{array}{ll} \\
2282            \text I_{0}e(x)=e^{(-|x|)} * I_{0}(x)=e^{(-|x|)} * \sum_{m=0}^
2283            {\infty} \frac{x^{2 m}}{2^{2 m} (m !)^{2}}
2284        \end{array}
2285
2286    where :math:`I_{0}` is modified Bessel function of the first kind, order 0.
2287
2288    Args:
2289        x (Tensor): The input tensor. The data type must be float16, float32 or float64.
2290
2291    Returns:
2292        Tensor, has the same shape and dtype as the `x`.
2293
2294    Raises:
2295        TypeError: If `x` is not a Tensor.
2296        TypeError: If dtype of `x` is not float16, float32 or float64.
2297
2298    Supported Platforms:
2299        ``Ascend`` ``GPU`` ``CPU``
2300
2301    Examples:
2302        >>> import mindspore
2303        >>> import numpy as np
2304        >>> from mindspore import Tensor, ops
2305        >>> x = Tensor(np.array([-1, -0.5, 0.5, 1]), mindspore.float32)
2306        >>> output = ops.bessel_i0e(x)
2307        >>> print(output)
2308        [0.46575961  0.64503527  0.64503527  0.46575961]
2309    """
2310    return bessel_i0e_(x)
2311
2312
2313def bessel_k0(x):
2314    r"""
2315    Computes modified Bessel function of the second kind, order 0 element-wise.
2316
2317    The formula is defined as:
2318
2319    .. math::
2320        \begin{array}{ll} \\
2321            K_{0}(x)= \lim_{\nu \to 0} \left(\frac{\pi}{2}\right) \frac
2322            {I_{-\nu}(x)-I_{\nu}(x)}{\sin (\nu \pi)} = \int_{0}^{\infty} e^{-x \cosh t} d t
2323        \end{array}
2324
2325    where :math:`I_{0}` is modified Bessel function of the first kind, order 0.
2326
2327    Args:
2328        x (Tensor): The input tensor. The data type must be float16, float32 or float64.
2329
2330    Returns:
2331        Tensor, has the same shape and dtype as the `x`.
2332
2333    Raises:
2334        TypeError: If `x` is not a Tensor.
2335        TypeError: If dtype of `x` is not float16, float32 or float64.
2336
2337    Supported Platforms:
2338        ``GPU`` ``CPU``
2339
2340    Examples:
2341        >>> import mindspore
2342        >>> import numpy as np
2343        >>> from mindspore import Tensor, ops
2344        >>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
2345        >>> output = ops.bessel_k0(x)
2346        >>> print(output)
2347        [0.92441907  0.42102444  0.11389387  0.01115968]
2348    """
2349    return bessel_k0_(x)
2350
2351
2352def bessel_k0e(x):
2353    r"""
2354    Computes exponential scaled modified Bessel function of the second kind, order 0 element-wise.
2355
2356    The formula is defined as:
2357
2358    .. math::
2359        \begin{array}{ll} \\
2360            K_{0}e(x)= e^{(-|x|)} * K_{0}(x) = e^{(-|x|)} * \int_{0}^
2361            {\infty} e^{-x \cosh t} d t
2362        \end{array}
2363
2364    where :math:`K_{0}` is modified Bessel function of the second kind, order 0.
2365
2366    Args:
2367        x (Tensor): The input tensor. The data type must be float16, float32 or float64.
2368
2369    Returns:
2370        Tensor, has the same shape and dtype as the `x`.
2371
2372    Raises:
2373        TypeError: If `x` is not a Tensor.
2374        TypeError: If dtype of `x` is not float16, float32 or float64.
2375
2376    Supported Platforms:
2377        ``GPU`` ``CPU``
2378
2379    Examples:
2380        >>> import mindspore
2381        >>> import numpy as np
2382        >>> from mindspore import Tensor, ops
2383        >>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
2384        >>> output = ops.bessel_k0e(x)
2385        >>> print(output)
2386        [1.52410939  1.14446308  0.84156822  0.60929767]
2387    """
2388    return bessel_k0e_(x)
2389
2390
2391def bessel_y0(x):
2392    r"""
2393    Computes Bessel function of the second kind, order 0 element-wise.
2394
2395    The formula is defined as:
2396
2397    .. math::
2398        \begin{array}{ll} \\
2399            Y_{0}(x)=\lim_{n \to 0} \frac{J_{n}(x) \cos n \pi-J_{-n}(x)}{\sin n \pi}
2400        \end{array}
2401
2402    where :math:`J_{0}` is Bessel function of the first kind, order 0.
2403
2404    Args:
2405        x (Tensor): The input tensor. The data type must be float16, float32 or float64.
2406
2407    Returns:
2408        Tensor, has the same shape and dtype as the `x`.
2409
2410    Raises:
2411        TypeError: If `x` is not a Tensor.
2412        TypeError: If dtype of `x` is not float16, float32 or float64.
2413
2414    Supported Platforms:
2415        ``GPU`` ``CPU``
2416
2417    Examples:
2418        >>> import mindspore
2419        >>> import numpy as np
2420        >>> from mindspore import Tensor, ops
2421        >>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
2422        >>> output = ops.bessel_y0(x)
2423        >>> print(output)
2424        [-0.44451874  0.08825696  0.51037567  -0.01694074]
2425    """
2426    return bessel_y0_(x)
2427
2428
2429def bessel_y1(x):
2430    r"""
2431    Computes Bessel function of the second kind, order 1 element-wise.
2432
2433    The formula is defined as:
2434
2435    .. math::
2436        \begin{array}{ll} \\
2437            Y_{1}(x)=\lim_{n \to 1} \frac{J_{n}(x) \cos n \pi-J_{-n}(x)}{\sin n \pi}
2438        \end{array}
2439
2440    where :math:`J_{1}` is Bessel function of the first kind, order 1.
2441
2442    Args:
2443        x (Tensor): The input tensor. The data type must be float16, float32 or float64.
2444
2445    Returns:
2446        Tensor, has the same shape and dtype as the `x`.
2447
2448    Raises:
2449        TypeError: If `x` is not a Tensor.
2450        TypeError: If dtype of `x` is not float16, float32 or float64.
2451
2452    Supported Platforms:
2453        ``GPU`` ``CPU``
2454
2455    Examples:
2456        >>> import mindspore
2457        >>> import numpy as np
2458        >>> from mindspore import Tensor, ops
2459        >>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
2460        >>> output = ops.bessel_y1(x)
2461        >>> print(output)
2462        [-1.47147239  -0.78121282  -0.10703243  0.39792571]
2463    """
2464    return bessel_y1_(x)
2465
2466
2467def eps(x):
2468    r"""
2469    Create a Tensor with the same data type and shape as input, and the element value is the minimum value that the
2470    corresponding data type can express.
2471
2472    Args:
2473        x (Tensor): Tensor of any dimension used to obtain the minimum value that its data type can express.
2474            The data type must be float16, float32 or float64.
2475
2476    Returns:
2477        Tensor, has the same type and shape as `x`, but filled with `x` dtype minimum val.
2478
2479    Raises:
2480        TypeError: If `x` is not a Tensor.
2481        TypeError: If data type of `x` is neither float16, float32, nor float64.
2482
2483    Supported Platforms:
2484        ``Ascend`` ``GPU`` ``CPU``
2485
2486    Examples:
2487        >>> import mindspore
2488        >>> from mindspore import Tensor, ops
2489        >>> x = Tensor([4, 1, 2, 3], mindspore.float32)
2490        >>> output = ops.eps(x)
2491        >>> print(output)
2492        [1.1920929e-07 1.1920929e-07 1.1920929e-07 1.1920929e-07]
2493    """
2494    return eps_(x)
2495
2496
2497def linspace(start, end, steps):
2498    r"""
2499    Returns a Tensor whose value is `steps` evenly spaced in the interval `start` and `end` (including `start` and
2500    `end`), and the length of the output Tensor is `steps`.
2501
2502    .. math::
2503        \begin{aligned}
2504        &step = (end - start)/(steps - 1)\\
2505        &output = [start, start+step, start+2*step, ... , end]
2506        \end{aligned}
2507
2508    Args:
2509        start (Union[Tensor, int, float]): Start value of interval. The tensor data type must be float32 or float64
2510            and with shape of 0-D.
2511        end (Union[Tensor, int, float]): Last value of interval. The tensor data type must be float32 or float64
2512            and with shape of 0-D.
2513        steps (Union[Tensor, int]): Number of ticks in the interval, inclusive of start and end.
2514            Must be positive int number or 0D int32/int64 Tensor.
2515
2516    Returns:
2517        Tensor, has the same dtype as `start`, and the shape of :math:`(steps)`.
2518
2519    Raises:
2520        TypeError: If `start` or `end` is not a Tensor.
2521        TypeError: If dtype of `start` or dtype of `end` is not float32 or float64.
2522        ValueError: If shape of `start` or shape of `end` is not 0-D.
2523        TypeError: If `steps` is not int or 0D int32/int64 Tensor.
2524        ValueError: If `steps` is not positive int number.
2525
2526    Supported Platforms:
2527        ``Ascend`` ``GPU`` ``CPU``
2528
2529    Examples:
2530        >>> import mindspore
2531        >>> from mindspore import Tensor, ops
2532        >>> start = Tensor(1, mindspore.float32)
2533        >>> end = Tensor(10, mindspore.float32)
2534        >>> steps = 5
2535        >>> output = ops.linspace(start, end, steps)
2536        >>> print(output)
2537        [ 1.    3.25  5.5   7.75 10.  ]
2538    """
2539    if not isinstance(start, Tensor):
2540        start = Tensor(start, mstype.float32)
2541    if not isinstance(end, Tensor):
2542        end = Tensor(end, mstype.float32)
2543    return linspace_(start, end, steps)
2544
2545
2546def linspace_ext(start, end, steps, *, dtype=None):
2547    r"""
2548    Returns a Tensor whose value is `steps` evenly spaced in the interval `start` and `end` (including `start` and
2549    `end`), and the length of the output Tensor is `steps`.
2550
2551    .. math::
2552        \begin{aligned}
2553        &step = (end - start)/(steps - 1)\\
2554        &output = [start, start+step, start+2*step, ... , end]
2555        \end{aligned}
2556
2557    Args:
2558        start (Union[Tensor, Number]): Start value of interval.
2559          If `start` is Tensor, data type must be float32 or float64 and with shape of 0-D.
2560        end (Union[Tensor, Number]): Last value of interval.
2561          If `end` is Tensor, data type must be float32 or float64 and with shape of 0-D.
2562        steps (Union[Tensor, int]): Number of ticks in the interval, inclusive of start and end.
2563            Must be positive int number or 0D int32/int64 Tensor.
2564
2565    Keyword Args:
2566        dtype (mindspore.dtype, optional): The output Tensor data type. Default: ``None`` , the data type of output
2567            Tensor is float32.
2568
2569    Returns:
2570        Tensor, has the shape of :math:`(steps,)`.
2571
2572    Raises:
2573        TypeError: If dtype of `start` or dtype of `end` is not supported.
2574        ValueError: If shape of `start` or shape of `end` is not 0-D.
2575        TypeError: If `steps` is not int or 0D int32/int64 Tensor.
2576        ValueError: If `steps` is not positive int number.
2577
2578    Supported Platforms:
2579        ``Ascend`` ``GPU`` ``CPU``
2580
2581    Examples:
2582        >>> start = Tensor(1, mindspore.float32)
2583        >>> end = Tensor(10, mindspore.float32)
2584        >>> steps = 5
2585        >>> output = ops.function.math_func.linspace_ext(start, end, steps, dtype=mindspore.float32)
2586        >>> print(output)
2587        [ 1.    3.25  5.5   7.75 10.  ]
2588    """
2589    return _get_cache_prim(LinSpaceExt)()(start, end, steps, dtype)
2590
2591
2592def det(input):
2593    r"""
2594    Computes the determinant of one or more square matrices.
2595
2596    Args:
2597        input (Tensor): A matrix to be calculated, its shape should be :math:`[..., M, M]` who must
2598          have at least two dimensions, and the last two
2599          dimensions must be the same size. Data type must be float32, float64, complex64 or complex128.
2600
2601    Returns:
2602        Tensor. The shape is :math:`input.shape[:-2]`, and the dtype is same as `input`.
2603
2604    Raises:
2605        TypeError: If `input` is not a Tensor.
2606        TypeError: If dtype of `input` not float32, float64, complex64 or complex128.
2607        ValueError: If the last two dimensions of `input` is not same size.
2608        ValueError: If the dimension of `input` is less than 2.
2609
2610    Supported Platforms:
2611        ``Ascend`` ``GPU`` ``CPU``
2612
2613    Examples:
2614        >>> import mindspore
2615        >>> import numpy as np
2616        >>> from mindspore import Tensor, ops
2617        >>> input = Tensor(np.array([[[-4.5, -1.5], [7.0, 6.0]], [[2.5, 0.5], [3.0, 9.0]]]), mindspore.float32)
2618        >>> output = ops.det(input)
2619        >>> print(output)
2620        [-16.5 21. ]
2621
2622    Supported Platforms:
2623        ``Ascend`` ``GPU`` ``CPU``
2624    """
2625    return matrix_determinant_(input)
2626
2627
2628def matrix_determinant(input):
2629    r"""
2630    `matrix_determinant` is deprecated, please use `det` instead.
2631    """
2632    logger.warning("matrix_determinant is deprecated, please use `det` instead.")
2633    return matrix_determinant_(input)
2634
2635
2636def log_matrix_determinant(input):
2637    r"""
2638    `log_matrix_determinant` is deprecated, please use `matrix_solve` instead.
2639    """
2640    logger.warning("`log_matrix_determinant` is deprecated, please use `matrix_solve` instead.")
2641    return log_matrix_determinant_(input)
2642
2643
2644def lu_solve(b, LU_data, LU_pivots):
2645    r"""
2646    Computes the solution y to the system of linear equations :math:`Ay = b` ,
2647    given LU decomposition :math:`A` and column vector :math:`b`.
2648
2649    LU decomposition of a matrix can be generated from :func:`mindspore.scipy.linalg.lu_factor` .
2650
2651    .. warning::
2652        This is an experimental API that is subject to change or deletion.
2653
2654    Args:
2655        b (Tensor): Column vector `b` in the above equation. It has shape :math:`(*, m, k)`,
2656            where :math:`*` is batch dimensions, with data type float32, float16.
2657        LU_data (Tensor): LU decomposition. It has shape :math:`(*, m, m)`, where :math:`*` is batch
2658            dimensions, that can be decomposed into an upper triangular matrix U and a lower triangular
2659            matrix L, with data type float32, float16.
2660        LU_pivots (Tensor): Permutation matrix P of LU decomposition. It has
2661            shape :math:`(*, m)`, where :math:`*` is batch dimensions, that can be converted
2662            to a permutation matrix P, with data type int32.
2663
2664    Returns:
2665        Tensor, the same data type as the `b` and `LU_data`.
2666
2667    Raises:
2668        TypeError: If dtype of `b` or `LU_data` is not one of: float32, float16.
2669        TypeError: If dtype of `LU_pivots` is not: int32.
2670        TypeError: If `b`, `LU_data` or `LU_pivots` is not Tensor.
2671        TypeError: If dtype of `b` is not same as dtype of `LU_data`.
2672        ValueError: If the batch dimensions of LU_pivots does not match the batch dimensions of LU_data.
2673        ValueError: If `b` dimension less than 2, `LU_data` dimension less than 2 or `LU_pivots` dimension less than 1.
2674
2675    Supported Platforms:
2676        ``Ascend`` ``GPU`` ``CPU``
2677
2678    Examples:
2679        >>> import mindspore
2680        >>> import numpy as np
2681        >>> from mindspore import Tensor, ops
2682        >>> b = Tensor(np.array([[1], [3], [3]]), mindspore.float32)
2683        >>> LU_data = Tensor(np.array([[2, 1, 1], [0.5, 1, 1.5], [0.5, 0, 2.5]]), mindspore.float32)
2684        >>> LU_pivots = Tensor(np.array([2, 2, 3]), mindspore.int32)
2685        >>> y = ops.lu_solve(b, LU_data, LU_pivots)
2686        >>> print(y)
2687        [[ 1.9000002]
2688         [-1.4000001]
2689         [ 0.6      ]]
2690    """
2691    out = lu_solve_(b, LU_data, LU_pivots)
2692    return out
2693
2694
2695def matrix_solve(matrix, rhs, adjoint=False):  # pylint: disable=redefined-outer-name
2696    r"""
2697    Solves systems of linear equations.
2698
2699    .. math::
2700        \begin{aligned}
2701        &matrix[..., M, M] * x[..., M, K] = rhs[..., M, K]\\
2702        &adjoint(matrix[..., M, M]) * x[..., M, K] = rhs[..., M, K]
2703        \end{aligned}
2704
2705    .. warning::
2706        On GPU, if the matrix is irreversible, an error may be reported or an unknown result may be returned.
2707
2708    Args:
2709        matrix (Tensor): The shape of tensor is :math:`(..., M, M)` .
2710        rhs (Tensor): The shape of tensor is :math:`(..., M, K)` . `rhs` must have the same dtype as `matrix`.
2711        adjoint(bool): Indicating whether to solve with matrix or its (block-wise) adjoint. Default: ``False`` .
2712
2713    Returns:
2714        x (Tensor), The dtype and shape is the same as 'rhs'.
2715
2716    Raises:
2717        TypeError: If adjoint is not the type of bool.
2718        TypeError: If the type of matrix is not one of the following dtype:
2719                   mstype.float16, mstype.float32, mstype.float64, mstype.complex64, mstype.complex128.
2720        TypeError: If the type of `matrix` is not the same as that of `rhs`.
2721        ValueError: If the rank of `matrix` less than 2.
2722        ValueError: If the dimension of `matrix` is not the same as `rhs`.
2723        ValueError: If the inner-most 2 dimension of `matrix` is not the same.
2724        ValueError: If the inner-most 2 dimension of `rhs` does not match `matrix`.
2725        ValueError: If the `matrix` is irreversible.
2726
2727    Supported Platforms:
2728        ``Ascend`` ``CPU``
2729
2730    Examples:
2731        >>> import mindspore
2732        >>> from mindspore import Tensor, ops
2733        >>> matrix = Tensor([[5, 4], [3, 1]], mindspore.float32)
2734        >>> rhs = Tensor([[7], [2]], mindspore.float32)
2735        >>> result = ops.matrix_solve(matrix, rhs)
2736        >>> print(result)
2737        [[0.14285707]
2738         [1.5714287 ]]
2739    """
2740    matrix_solve_ = _get_cache_prim(MatrixSolve)(adjoint=adjoint)
2741    return matrix_solve_(matrix, rhs)
2742
2743
2744def slogdet(input):
2745    r"""
2746    Computes the sign and the log of the absolute value of the determinant of one or more square matrices.
2747
2748    Note:
2749        The type of output always be real-value, even `input` is complex.
2750
2751    Args:
2752        input (Tensor): A matrix to be calculated, its shape is :math:`(..., M, M)`.
2753          The matrix must be at least two dimensions, and the last two
2754          dimensions must be the same size. Data type must be float32, float64, complex64 or complex128.
2755
2756    Returns:
2757        Tensor. The signs of the log determinants. The shape is :math:`input.shape[:-2]`.
2758
2759        Tensor. The absolute values of the log determinants. The shape is :math:`input.shape[:-2]`.
2760
2761    Raises:
2762        TypeError: If `input` is not a Tensor.
2763        TypeError: If dtype of `input` not float32, float64, complex64 or complex128.
2764        ValueError: If the last two dimensions of `input` is not same size.
2765        ValueError: If the dimension of `input` is less than 2.
2766
2767    Supported Platforms:
2768        ``Ascend`` ``GPU`` ``CPU``
2769
2770    Examples:
2771        >>> import mindspore
2772        >>> import numpy as np
2773        >>> from mindspore import Tensor, ops
2774        >>> input_x = Tensor(np.array([[[-4.5, -1.5], [7.0, 6.0]], [[2.5, 0.5], [3.0, 9.0]]]), mindspore.float32)
2775        >>> sign, output = ops.slogdet(input_x)
2776        >>> print(sign)
2777        [-1.   1.]
2778        >>> print(output)
2779        [2.80336046e+00    3.04452229e+00]
2780    """
2781    return log_matrix_determinant_(input)
2782
2783
2784def truncate_div(x, y):
2785    """
2786    Divides the first input tensor by the second input tensor element-wise and rounds the results
2787    of division towards zero. Equivalent to C-style integer division.
2788
2789    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
2790    When the inputs are two tensors,
2791    dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
2792    When the inputs are one tensor and one scalar,
2793    the scalar could only be a constant.
2794
2795    Note:
2796        Broadcasting is supported.
2797
2798    Args:
2799        x(Union[Tensor, Number, bool]): The first input is a number, or a bool,
2800            or a tensor whose data type is number or bool.
2801        y(Union[Tensor, Number, bool]): The second input is a number, or a bool when the first input
2802            is a tensor, or a tensor whose data type is number or bool.
2803
2804    Returns:
2805        Tensor, the shape is the same as the one after broadcasting,
2806        and the data type is the one with higher precision or higher digits among the two inputs.
2807
2808    Raises:
2809        TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
2810
2811    Supported Platforms:
2812        ``Ascend`` ``GPU`` ``CPU``
2813
2814    Examples:
2815        >>> import mindspore
2816        >>> import numpy as np
2817        >>> from mindspore import Tensor, ops
2818        >>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
2819        >>> y = Tensor(np.array([3, 3, 3]), mindspore.int32)
2820        >>> output = ops.truncate_div(x, y)
2821        >>> print(output)
2822        [0 1 0]
2823    """
2824    return truncate_div_(x, y)
2825
2826
2827def truncate_mod(x, y):
2828    r"""
2829    Returns the remainder of division element-wise.
2830
2831    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
2832    When the inputs are two tensors,
2833    dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
2834    When the inputs are one tensor and one scalar,
2835    the scalar could only be a constant.
2836
2837    .. warning::
2838        - The input data does not support 0.
2839        - When the elements of input exceed 2048 , the accuracy of operator cannot guarantee the requirement of
2840          double thousandths in the mini form.
2841        - Due to different architectures, the calculation results of this operator on NPU and CPU may be inconsistent.
2842        - If shape is expressed as (D1,D2... ,Dn), then D1\*D2... \*DN<=1000000,n<=8.
2843
2844    Args:
2845        x (Union[Tensor, numbers.Number, bool]): The first input is a number, or a bool,
2846            or a tensor whose data type is number or bool.
2847        y (Union[Tensor, numbers.Number, bool]): The second input is a number, or a bool when the first input
2848            is a tensor, or a tensor whose data type is number or bool.
2849
2850    Returns:
2851        Tensor, the shape is the same as the one after broadcasting,
2852        and the data type is the one with higher precision among the two inputs.
2853
2854    Raises:
2855        TypeError: If neither `x` nor `y` is one of the following: Tensor, number, bool.
2856        TypeError: If neither `x` nor `y` is a Tensor.
2857        ValueError: If the shape `x` and `y` cannot be broadcasted to each other.
2858
2859    Supported Platforms:
2860        ``Ascend`` ``GPU`` ``CPU``
2861
2862    Examples:
2863        >>> import mindspore
2864        >>> import numpy as np
2865        >>> from mindspore import Tensor, ops
2866        >>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
2867        >>> y = Tensor(np.array([3, 3, 3]), mindspore.int32)
2868        >>> output = ops.truncate_mod(x, y)
2869        >>> print(output)
2870        [ 2  1 -1]
2871    """
2872    return truncate_mod_(x, y)
2873
2874
2875def trunc(input):
2876    r"""
2877    Returns a new tensor with the truncated integer values of the elements of the input tensor.
2878
2879    Args:
2880        input (Tensor): The input tensor.
2881
2882    Returns:
2883        Tensor, the same shape and data type as the input.
2884
2885    Raises:
2886        TypeError: If `input` is not a Tensor.
2887
2888    Supported Platforms:
2889        ``Ascend`` ``GPU`` ``CPU``
2890
2891    Examples:
2892        >>> import mindspore
2893        >>> import numpy as np
2894        >>> from mindspore import Tensor, ops
2895        >>> x = Tensor(np.array([3.4742, 0.5466, -0.8008, -3.9079]),mindspore.float32)
2896        >>> output = ops.trunc(x)
2897        >>> print(output)
2898        [3. 0. 0. -3.]
2899    """
2900    return trunc_(input)
2901
2902
2903def ldexp(x, other):
2904    """
2905    Multiplies input Tensor by :math:`2^{other}` element-wise.
2906
2907    It takes two arguments, a mantissa `x` and an exponent `other`,
2908    and returns their product as a floating-point number:
2909
2910    .. math::
2911
2912        out_{i} = x_{i} * ( 2 ^{other_{i}} )
2913
2914    Note:
2915        This function is commonly used to construct
2916        floating-point numbers from their component parts, or to scale a
2917        floating-point number by a power of two.
2918
2919    Args:
2920        x (Tensor): The input Tensor.
2921        other (Tensor): A Tensor of integers that represent exponents.
2922
2923    Returns:
2924        Tensor, the output Tensor.
2925
2926    Raises:
2927        TypeError: If `x` is not a Tensor.
2928        TypeError: If `other` is not a Tensor.
2929        ValueError: If shape of `x` and `other` can not broadcast.
2930
2931    Supported Platforms:
2932        ``Ascend`` ``GPU`` ``CPU``
2933
2934    Examples:
2935        >>> import mindspore
2936        >>> import numpy as np
2937        >>> from mindspore import Tensor
2938        >>> from mindspore import ops
2939        >>> x = Tensor(np.array([1.]), mindspore.float32)
2940        >>> other = Tensor(np.array([1, 2, 3, 4]), mindspore.int32)
2941        >>> out = ops.ldexp(x, other)
2942        >>> print(out)
2943        [ 2.  4.  8. 16.]
2944        >>> x = Tensor(np.array([[1.], [2]]), mindspore.float32)
2945        >>> other = Tensor(np.array([[1.], [2]]), mindspore.int32)
2946        >>> out = ops.ldexp(x, other)
2947        >>> print(out)
2948        [[2.]
2949         [8.]]
2950    """
2951    out = tensor_mul(x, tensor_pow(2.0, other))
2952    return out
2953
2954
2955def logit(input, eps=None):
2956    r"""
2957    Calculate the logit of a tensor element-wise.
2958
2959    .. math::
2960        \begin{align}
2961        y_{i} & = \ln(\frac{z_{i}}{1 - z_{i}}) \\
2962        z_{i} & = \begin{cases}
2963        input_{i} & \text{if eps is None} \\
2964        \text{eps} & \text{if } input_{i} \lt \text{eps} \\
2965        input_{i} & \text{if } \text{eps} \leq input_{i} \leq 1 - \text{eps} \\
2966        1 - \text{eps} & \text{if } input_{i} \gt 1 - \text{eps}
2967        \end{cases}
2968        \end{align}
2969
2970    Args:
2971        input (Tensor): The input tensor of type float16, float32 or float64.
2972        eps (float, optional): The epsilon. If eps is not None, the input clamp bound is defined as [eps, 1-eps],
2973            otherwise, the `input` is not clamped. Default: ``None`` .
2974
2975    Returns:
2976        Tensor, with the same shape and dtype as the `input`.
2977
2978    Raises:
2979        TypeError: If `eps` is not a float.
2980        TypeError: If `input` is not a Tensor.
2981        TypeError: If dtype of `input` is not float16, float32 or float64.
2982
2983    Supported Platforms:
2984        ``Ascend`` ``GPU`` ``CPU``
2985
2986    Examples:
2987        >>> import numpy as np
2988        >>> from mindspore import Tensor, ops
2989        >>> x = Tensor(np.array([0.1, 0.2, 0.3]).astype(np.float32))
2990        >>> output = ops.logit(x, eps=1e-5)
2991        >>> print(output)
2992        [-2.1972246 -1.3862944 -0.8472978]
2993    """
2994    if eps is None:
2995        eps = -1.0
2996    logit_ = _get_cache_prim(P.Logit)(eps)
2997    return logit_(input)
2998
2999#####################################
3000# Comparison Operation Functions.
3001#####################################
3002
3003
3004def lt(input, other):
3005    """
3006    Alias for :func:`mindspore.ops.less` .
3007
3008    Supported Platforms:
3009        ``Ascend`` ``GPU`` ``CPU``
3010    """
3011    return less(input, other)
3012
3013
3014def le(input, other):
3015    r"""
3016    Computes the boolean value of :math:`input <= other` element-wise.
3017
3018    .. math::
3019
3020        out_{i} =\begin{cases}
3021            & \text{True,    if } input_{i}<=other_{i} \\
3022            & \text{False,   if } input_{i}>other_{i}
3023            \end{cases}
3024
3025    .. note::
3026        - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
3027          consistent.
3028        - The inputs must be two tensors or one tensor and one scalar.
3029        - When the inputs are one tensor and one scalar, the scalar could only be a constant.
3030
3031    Args:
3032        input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
3033            a bool or a tensor whose data type is
3034            `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
3035            `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
3036        other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
3037            the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
3038            When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
3039
3040    Returns:
3041        Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
3042
3043    Supported Platforms:
3044        ``Ascend`` ``GPU`` ``CPU``
3045
3046    Examples:
3047        >>> import mindspore
3048        >>> import numpy as np
3049        >>> from mindspore import Tensor, ops
3050        >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
3051        >>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
3052        >>> output = ops.le(x, y)
3053        >>> print(output)
3054        [ True False  True]
3055    """
3056    return tensor_le(input, other)
3057
3058
3059def gt(input, other):
3060    r"""
3061    Compare the value of the input parameters :math:`input,other` element-wise, and the output result is a bool value.
3062
3063    .. math::
3064
3065        out_{i} =\begin{cases}
3066            & \text{True,    if } input_{i}>other_{i} \\
3067            & \text{False,   if } input_{i}<=other_{i}
3068            \end{cases}
3069
3070    Note:
3071        - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
3072          consistent.
3073        - The inputs must be two tensors or one tensor and one scalar.
3074        - When the inputs are two tensors, dtypes of them cannot be bool at the same time,
3075          and the shapes of them can be broadcast.
3076        - When the inputs are one tensor and one scalar, the scalar could only be a constant.
3077        - Broadcasting is supported.
3078        - If the input Tensor can be broadcast, the low dimension will be extended to the corresponding high dimension
3079          in another input by copying the value of the dimension.
3080
3081    Args:
3082        input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
3083            a bool or a tensor whose data type is
3084            `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
3085            `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ .
3086        other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
3087            the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
3088            When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
3089
3090    Returns:
3091        Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
3092
3093    Raises:
3094        TypeError: If neither `input` nor `other` is a Tensor.
3095
3096    Supported Platforms:
3097        ``Ascend`` ``GPU`` ``CPU``
3098
3099    Examples:
3100        >>> import mindspore
3101        >>> import numpy as np
3102        >>> from mindspore import Tensor, ops
3103        >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
3104        >>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
3105        >>> output = ops.gt(x, y)
3106        >>> print(output)
3107        [False True False]
3108    """
3109    return tensor_gt(input, other)
3110
3111
3112def ge(input, other):
3113    r"""
3114    Computes the boolean value of :math:`input >= other` element-wise.
3115
3116    Note:
3117        - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
3118          consistent.
3119        - The inputs must be two tensors or one tensor and one scalar.
3120        - When the inputs are two tensors, dtypes of them cannot be bool at the same time,
3121          and the shapes of them can be broadcast.
3122        - When the inputs are one tensor and one scalar, the scalar could only be a constant.
3123        - Broadcasting is supported.
3124        - If the input Tensor can be broadcast, the low dimension will be extended to the corresponding high dimension
3125          in another input by copying the value of the dimension.
3126
3127    .. math::
3128
3129        out_{i} =\begin{cases}
3130            & \text{True,    if } input_{i}>=other_{i} \\
3131            & \text{False,   if } input_{i}<other_{i}
3132            \end{cases}
3133
3134    Args:
3135        input (Union[Tensor, Number, bool]): The first input is a number or
3136            a bool or a tensor whose data type is number or bool.
3137        other (Union[Tensor, Number, bool]): The second input is a number or
3138            a bool when the first input is a tensor or a tensor whose data type is number or bool.
3139
3140    Returns:
3141        Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
3142
3143    Raises:
3144        TypeError: If neither `input` nor `other` is a Tensor.
3145
3146    Supported Platforms:
3147        ``Ascend`` ``GPU`` ``CPU``
3148
3149    Examples:
3150        >>> import mindspore
3151        >>> import numpy as np
3152        >>> from mindspore import Tensor, ops
3153        >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
3154        >>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
3155        >>> output = ops.ge(x, y)
3156        >>> print(output)
3157        [True True False]
3158    """
3159    return tensor_ge(input, other)
3160
3161
3162def eq(input, other):
3163    r"""
3164    Computes the equivalence between two tensors element-wise.
3165
3166    The second argument can be a number or a tensor whose shape is broadcastable with the first argument and vise versa.
3167
3168    .. math::
3169
3170        out_{i} =\begin{cases}
3171            & \text{True,    if } input_{i} = other_{i} \\
3172            & \text{False,   if } input_{i} \ne other_{i}
3173            \end{cases}
3174
3175    Note:
3176        - `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
3177        - The input must be two Tensors, or a Tensor and a Scalar.
3178        - The shapes of the inputs can be broadcasted to each other.
3179
3180    Args:
3181        input (Union[Tensor, Number]): The first input is a number or
3182            a tensor whose data type is number.
3183        other (Union[Tensor, Number]): The second input is a number when the first input is a tensor.
3184            The data type is the same as the first input. If the first input is a number,
3185            the second input should be a tensor.
3186
3187    Returns:
3188        Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
3189
3190    Raises:
3191        TypeError: If neither `input` nor `other` is a Tensor.
3192
3193    Supported Platforms:
3194        ``Ascend`` ``GPU`` ``CPU``
3195
3196    Examples:
3197        >>> import mindspore
3198        >>> from mindspore import Tensor, ops
3199        >>> # case 1: The shape of two inputs are different
3200        >>> x = Tensor([1, 2, 3], mindspore.float32)
3201        >>> output = ops.eq(x, 2.0)
3202        >>> print(output)
3203        [False True False]
3204        >>> # case 2: The shape of two inputs are the same
3205        >>> x = Tensor([1, 2, 3], mindspore.int32)
3206        >>> y = Tensor([1, 2, 4], mindspore.int32)
3207        >>> output = ops.eq(x, y)
3208        >>> print(output)
3209        [ True  True False]
3210    """
3211    return equal(input, other)
3212
3213
3214def ne(input, other):
3215    r"""
3216    Computes the non-equivalence of two tensors element-wise.
3217
3218    Note:
3219        - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
3220          consistent.
3221        - When the inputs are two tensors, the shapes of them could be broadcast.
3222        - When the inputs are one tensor and one scalar, the scalar could only be a constant.
3223        - Broadcasting is supported.
3224
3225    .. math::
3226
3227        out_{i} =\begin{cases}
3228        & \text{True,    if } input_{i} \ne other_{i} \\
3229        & \text{False,   if } input_{i} = other_{i}
3230        \end{cases}
3231
3232    Args:
3233        input (Union[Tensor, Number, bool]): The first input is a number or
3234            a bool or a tensor whose data type is number or bool.
3235        other (Union[Tensor, Number, bool]): The second input is a number or
3236            a bool when the first input is a tensor or a tensor whose data type is number or bool.
3237
3238    Returns:
3239        Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
3240
3241    Raises:
3242        TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
3243
3244    Supported Platforms:
3245        ``Ascend`` ``GPU`` ``CPU``
3246
3247    Examples:
3248        >>> import mindspore
3249        >>> from mindspore import Tensor, ops
3250        >>> x = Tensor([1, 2, 3], mindspore.float32)
3251        >>> output = ops.ne(x, 2.0)
3252        >>> print(output)
3253        [ True False  True]
3254        >>>
3255        >>> x = Tensor([1, 2, 3], mindspore.int32)
3256        >>> y = Tensor([1, 2, 4], mindspore.int32)
3257        >>> output = ops.ne(x, y)
3258        >>> print(output)
3259        [False False  True]
3260    """
3261    return not_equal(input, other)
3262
3263
3264def approximate_equal(x, y, tolerance=1e-5):
3265    r"""
3266    Returns ``True`` if abs(x-y) is smaller than tolerance element-wise, otherwise ``False`` .
3267
3268    .. math::
3269
3270        out_i = \begin{cases}
3271        & \text{ if } \left | x_{i} - y_{i} \right | < \text{tolerance},\ \ True  \\
3272        & \text{ if } \left | x_{i} - y_{i} \right | \ge \text{tolerance},\ \  False
3273        \end{cases}
3274
3275    where `tolerance` indicates Acceptable maximum tolerance.
3276
3277    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
3278    If they have different data types, the lower precision data type will be converted to
3279    the relatively highest precision data type.
3280
3281    Args:
3282        x (Tensor): A tensor. Must be one of the following types: float32, float16.
3283          :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
3284        y (Tensor): A tensor of the same type and shape as `x`.
3285        tolerance (float): The maximum deviation that two elements can be considered equal. Default: ``1e-5`` .
3286
3287    Returns:
3288        Tensor, the shape is the same as the shape of `x`, and the data type is bool.
3289
3290    Raises:
3291        TypeError: If `tolerance` is not a float.
3292        RuntimeError: If the data type of `x`, `y` conversion of Parameter is given
3293                      but data type conversion of Parameter is not supported.
3294
3295    Supported Platforms:
3296        ``Ascend`` ``GPU`` ``CPU``
3297
3298    Examples:
3299        >>> import numpy as np
3300        >>> from mindspore import Tensor, ops
3301        >>> from mindspore import dtype as mstype
3302        >>> tol = 1.5
3303        >>> x = Tensor(np.array([1, 2, 3]), mstype.float32)
3304        >>> y = Tensor(np.array([2, 4, 6]), mstype.float32)
3305        >>> output = ops.approximate_equal(Tensor(x), Tensor(y), tol)
3306        >>> print(output)
3307        [ True  False  False]
3308    """
3309    return _get_cache_prim(P.ApproximateEqual)(tolerance)(x, y)
3310
3311
3312def isnan(input):
3313    r"""
3314    Determines which elements are NaN for each position.
3315
3316    .. math::
3317
3318        out_i = \begin{cases}
3319          & \ True,\ \text{ if } input_{i} = \text{Nan} \\
3320          & \ False,\ \text{ if } input_{i} \ne  \text{Nan}
3321        \end{cases}
3322
3323    where :math:`Nan` means not a number.
3324
3325    Args:
3326        input (Tensor): The input tensor.
3327
3328    Returns:
3329        Tensor, has the same shape of `input`, and the dtype is bool.
3330
3331    Raises:
3332        TypeError: If `input` is not a Tensor.
3333
3334    Supported Platforms:
3335        ``Ascend`` ``GPU`` ``CPU``
3336
3337    Examples:
3338        >>> import mindspore
3339        >>> import numpy as np
3340        >>> from mindspore import Tensor, ops
3341        >>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
3342        >>> output = ops.isnan(x)
3343        >>> print(output)
3344        [ True False False]
3345        >>> x = Tensor(2.1, mindspore.float64)
3346        >>> output = ops.isnan(x)
3347        >>> print(output)
3348        False
3349    """
3350    return isnan_(input)
3351
3352
3353def isclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False):
3354    """
3355    Returns a new Tensor with boolean elements representing if each element of `input`
3356    is “close” to the corresponding element of `other`. Closeness is defined as:
3357
3358    .. math::
3359        |input-other| ≤ atol + rtol × |other|
3360
3361    Args:
3362        input (Tensor): First tensor to compare.
3363            Support dtype: float16, float32, float64, int8, int16, int32, int64 and uint8.
3364            On Ascend, more dtypes are support: bool and bfloat16.
3365        other (Tensor): Second tensor to compare. Dtype must be same as `input`.
3366        rtol (Union[float, int, bool], optional): Relative tolerance. Default: ``1e-05`` .
3367        atol (Union[float, int, bool], optional): Absolute tolerance. Default: ``1e-08`` .
3368        equal_nan (bool, optional): If ``True`` , then two NaNs will be considered equal. Default: ``False``.
3369
3370    Returns:
3371        A bool Tensor, with the shape as broadcasted result of the input `input` and `other`.
3372
3373    Raises:
3374        TypeError: `input` or `other` is not Tensor.
3375        TypeError: `input` or `other` dtype is not support.
3376        TypeError: `atol` or `rtol` is not float, int or bool.
3377        TypeError: `equal_nan` is not bool.
3378        TypeError: `input` and `other` have different dtypes.
3379        ValueError: `input` and `other` cannot broadcast.
3380
3381    Supported Platforms:
3382        ``Ascend`` ``GPU`` ``CPU``
3383
3384    Examples:
3385        >>> import mindspore
3386        >>> import numpy as np
3387        >>> from mindspore import Tensor, ops
3388        >>> input = Tensor(np.array([1.3, 2.1, 3.2, 4.1, 5.1]), mindspore.float16)
3389        >>> other = Tensor(np.array([1.3, 3.3, 2.3, 3.1, 5.1]), mindspore.float16)
3390        >>> output = ops.isclose(input, other)
3391        >>> print(output)
3392        [ True False False False  True]
3393    """
3394    is_close = _get_cache_prim(P.IsClose)(rtol=rtol, atol=atol, equal_nan=equal_nan)
3395    return is_close(input, other)
3396
3397
3398def isreal(input):
3399    """
3400    Tests element-wise for real number.
3401    A complex value is considered real when its imaginary part is 0.
3402
3403    Args:
3404        input (Tensor): The input tensor.
3405
3406    Returns:
3407       Tensor, true where `input` is real number, false otherwise.
3408
3409    Raises:
3410        TypeError: If `input` is not a Tensor.
3411
3412    Supported Platforms:
3413        ``GPU`` ``CPU``
3414
3415    Examples:
3416        >>> from mindspore import ops, Tensor
3417        >>> from mindspore import dtype as mstype
3418        >>> x = Tensor([1, 1+1j, 2+0j], mstype.complex64)
3419        >>> output = ops.isreal(x)
3420        >>> print(output)
3421        [ True False True]
3422    """
3423
3424    _check_is_tensor("input", input, "isreal")
3425
3426    # Note: Integral and Floating tensor values are always real
3427    value = Tensor(1, mstype.bool_)
3428    real_dtype = mstype.int_type + mstype.uint_type + mstype.float_type + (mstype.bool_,)
3429    if input.dtype in real_dtype:
3430        return fill_v2_(input.shape, value)
3431    return imag_(input) == 0
3432
3433
3434def is_complex(input):
3435    '''
3436    Return True if the data type of the tensor is complex, otherwise return False.
3437
3438    Args:
3439        input (Tensor): The input tensor.
3440
3441    Returns:
3442        Bool, return whether the data type of the tensor is complex.
3443
3444    Raises:
3445        TypeError: If `input` is not a Tensor.
3446
3447    Supported Platforms:
3448        ``Ascend`` ``GPU`` ``CPU``
3449
3450    Examples:
3451        >>> from mindspore import ops, Tensor
3452        >>> from mindspore import dtype as mstype
3453        >>> input = Tensor([1, 1+1j, 2+2j], mstype.complex64)
3454        >>> output = ops.is_complex(input)
3455        >>> print(output)
3456        True
3457    '''
3458    if not isinstance(input, (Tensor, Tensor_)):
3459        raise TypeError("The input must be Tensor!")
3460    return input.dtype in mstype.complex_type
3461
3462
3463def nan_to_num(input, nan=0.0, posinf=None, neginf=None):
3464    """
3465    Replace the `NaN`, positive infinity and negative infinity values in 'input' with the
3466    specified values in `nan`, `posinf` and `neginf` respectively.
3467
3468    Args:
3469        input (Tensor): The shape of tensor is :math:`(input_1, input_2, ..., input_R)`.
3470            With float32 or float16 data type.
3471        nan (float): The replace value of 'NaN'. Default value is 0.0.
3472        posinf (float): the value to replace positive infinity values with. Default: ``None``,
3473            replacing positive infinity with the maximum value supported by the data type of `input`.
3474        neginf (float): the value to replace negative infinity values with. Default: ``None``,
3475            replacing negative infinity with the minimum value supported by the data type of `input`.
3476
3477    Returns:
3478        Tensor, has the same shape and dtype as the `input`.
3479
3480    Raises:
3481        TypeError: If `input` is not a Tensor.
3482        TypeError: If dtype of `input` is not float16 or float32.
3483
3484    Supported Platforms:
3485        ``Ascend`` ``CPU``
3486
3487    Examples:
3488        >>> import mindspore
3489        >>> import numpy as np
3490        >>> from mindspore import Tensor, ops
3491        >>> input = Tensor(np.array([float('nan'), float('inf'), -float('inf'), 5.0]), mindspore.float32)
3492        >>> output = ops.nan_to_num(input, 1.0, 2.0, 3.0)
3493        >>> print(output)
3494        [1.  2.  3.  5.0]
3495    """
3496    if not isinstance(input, (Tensor, Tensor_)):
3497        raise TypeError("the input x must be Tensor!")
3498    if nan is not None:
3499        if not isinstance(nan, float):
3500            raise TypeError("the parameter nan's dtype must be float.")
3501    else:
3502        nan = 0.0
3503    if posinf is not None:
3504        if not isinstance(posinf, float):
3505            raise TypeError("the parameter posinf's dtype must be float.")
3506    else:
3507        if input.dtype == mstype.float16:
3508            posinf = (float)(np.finfo(np.float16).max)
3509        elif input.dtype == mstype.float32:
3510            posinf = (float)(np.finfo(np.float32).max)
3511    if neginf is not None:
3512        if not isinstance(neginf, float):
3513            raise TypeError("the parameter neginf's dtype must be float.")
3514    else:
3515        if input.dtype == mstype.float16:
3516            neginf = (float)(np.finfo(np.float16).min)
3517        elif input.dtype == mstype.float32:
3518            neginf = (float)(np.finfo(np.float32).min)
3519    _nan_to_num = _get_cache_prim(NanToNum)(nan=nan, posinf=posinf, neginf=neginf)
3520    return _nan_to_num(input)
3521
3522
3523def fmax(input, other):
3524    r"""
3525    Computes the maximum of input tensors element-wise.
3526
3527    .. math::
3528        output_i = \max(x1_i, x2_i)
3529
3530    Note:
3531        - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
3532          consistent.
3533        - Shapes of `input` and `other` should be able to broadcast.
3534        - If one of the elements to be compared is NaN, another element is returned.
3535
3536    Args:
3537        input (Tensor): The first tensor. The supported dtypes are: float16, float32, float64, int32, int64.
3538        other (Tensor): The second tensor. The supported dtypes are: float16, float32, float64, int32, int64.
3539
3540    Returns:
3541        A Tensor, the shape is the same as the one after broadcasting,
3542        and the data type is the one with higher precision or higher digits among the two inputs.
3543
3544    Raises:
3545        TypeError: If `input` or `other` is not Tensor.
3546        TypeError: If dtype of `input` or `other` is not one of: float16, float32, float64, int32, int64.
3547        ValueError: If the shape of  `input` and `other` can not broadcast.
3548
3549    Supported Platforms:
3550        ``CPU``
3551
3552    Examples:
3553        >>> import mindspore
3554        >>> import numpy as np
3555        >>> from mindspore import Tensor, ops
3556        >>> x1 = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
3557        >>> x2 = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
3558        >>> output = ops.fmax(x1, x2)
3559        >>> print(output)
3560        [4. 5. 6.]
3561    """
3562    fmax_ = Fmax()
3563    return fmax_(input, other)
3564
3565
3566def fmin(input, other):
3567    r"""
3568    Computes the minimum of input tensors element-wise.
3569
3570    .. math::
3571        output_i = min(input_i, other_i)
3572
3573    Note:
3574        - Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
3575          consistent.
3576        - Shapes of `input` and `other` should be able to broadcast.
3577        - If one of the elements to be compared is NaN, another element is returned.
3578
3579    Args:
3580        input (Tensor): The first tensor. The supported dtypes are: float16, float32, float64, int32, int64.
3581        other (Tensor): The second tensor. The supported dtypes are: float16, float32, float64, int32, int64.
3582
3583    Returns:
3584        A Tensor, the shape is the same as the one after broadcasting,
3585        and the data type is the one with higher precision or higher digits among the two inputs.
3586
3587    Raises:
3588        TypeError: If `input` or `other` is not Tensor.
3589        TypeError: If dtype of `input` or `other` is not one of: float16, float32, float64, int32, int64.
3590        ValueError: If the shape of  `input` and `other` can not broadcast.
3591
3592    Supported Platforms:
3593
3594
3595    Examples:
3596        >>> import numpy as np
3597        >>> from mindspore import Tensor, ops
3598        >>> from mindspore import dtype as mstype
3599        >>> input = Tensor(np.array([1.0, 5.0, 3.0]), mstype.float32)
3600        >>> other = Tensor(np.array([4.0, 2.0, 6.0]), mstype.float32)
3601        >>> output = ops.fmin(input, other)
3602        >>> print(output)
3603        [1. 2. 3.]
3604    """
3605    fmin_ = Fmin()
3606    return fmin_(input, other)
3607
3608
3609def median(input, axis=-1, keepdims=False):
3610    r"""
3611    Computes the median and indices of input tensor.
3612
3613    .. warning::
3614        - `indices` does not necessarily contain the first occurrence of each median value found in the `input`,
3615          unless it is unique. The specific implementation of this API is device-specific.
3616          The results may be different on CPU and GPU.
3617
3618    Args:
3619        input (Tensor): A Tensor of any dimension whose data type is int16, int32, int64, float32 or float64.
3620        axis (int, optional): The dimension need to reduce. Default: ``-1`` .
3621        keepdims (bool, optional): Whether the output tensor need to retain `axis` dimension or not.
3622            Default: ``False`` .
3623
3624    Returns:
3625        y (Tensor), has the same dtype as the `input`. If `keepdims` is true,
3626        the `y` has the same shape as the `input` except the shape of `y` in dimension `axis` is size 1.
3627        Otherwise, the `y` lacks `axis` dimension than input.
3628
3629        indices (Tensor), has the same shape as the `y`, but dtype is int64.
3630
3631    Raises:
3632        TypeError: If dtype of `input` is not one of the following: int16, int32, int64, float32, float64.
3633        TypeError: If input `input` is not a Tensor.
3634        TypeError: If `axis` is not a int.
3635        TypeError: If `keepdims` is not a bool.
3636        ValueError: If `axis` is not in range of [-x.dim, x.dim-1].
3637
3638    Supported Platforms:
3639        ``GPU`` ``CPU``
3640
3641    Examples:
3642        >>> import numpy as np
3643        >>> from mindspore import Tensor, ops
3644        >>> x = Tensor(np.array([[0.57, 0.11, 0.21],[0.38, 0.50, 0.57], [0.36, 0.16, 0.44]]).astype(np.float32))
3645        >>> y = ops.median(x, axis=0, keepdims=False)
3646        >>> print(y)
3647        (Tensor(shape=[3], dtype=Float32, value= [ 3.79999995e-01,  1.59999996e-01,  4.39999998e-01]),
3648        Tensor(shape=[3], dtype=Int64, value= [1, 2, 2]))
3649    """
3650    median_ = _get_cache_prim(Median)(global_median=False, axis=axis, keep_dims=keepdims, ignore_nan=False)
3651    return median_(input)
3652
3653
3654def nanmedian(input, axis=-1, keepdims=False):
3655    r"""
3656    Computes the median and indices of `input` in specified dimension, ignoring NaN.
3657    If all elements in the specified dimensions are NaN, the result will be NaN.
3658
3659    .. warning::
3660        `indices` does not necessarily contain the first occurrence of each median value found in the `input`,
3661        unless it is unique.
3662
3663    Args:
3664        input (Tensor): The input tensor to calculate the median and indices.
3665        axis (int, optional): The dimension need to calculate median and indices.
3666            Default: ``-1`` , calculate the last dimension.
3667        keepdims (bool, optional): Whether the output tensor needs to retain dimension or not.
3668            Default: ``False``, not to retain dimensions.
3669
3670    Returns:
3671        Tensor, the median of input along the specified dimension, has the same dtype as `input`.
3672
3673        indices (Tensor), median index, dtype is int64.
3674
3675    Raises:
3676        TypeError: If dtype of `input` is not one of the following: int16, int32, int64, float32, float64.
3677        TypeError: If input `input` is not a Tensor.
3678        TypeError: If `axis` is not int.
3679        TypeError: If `keepdims` is not bool.
3680        ValueError: If `axis` is not in range of [-r, r) which `r` means the rank of `input`.
3681
3682    Supported Platforms:
3683        ``CPU``
3684
3685    Examples:
3686        >>> import mindspore
3687        >>> from mindspore import Tensor, ops
3688        >>> x = Tensor([[0.57, 0.11, float("nan")],
3689        ...             [0.38, float("nan"), float("nan")],
3690        ...             [0.36, 0.16, float("nan")]], mindspore.float32)
3691        >>> y, idx = ops.nanmedian(x, axis=0, keepdims=False)
3692        >>> print(y)
3693        [0.38 0.11  nan]
3694        >>> print(idx)
3695        [1 0 0]
3696    """
3697    nanmedian_ = _get_cache_prim(Median)(global_median=False, axis=axis, keep_dims=keepdims, ignore_nan=True)
3698    return nanmedian_(input)
3699
3700
3701def nanmean(input, axis=None, keepdims=False, *, dtype=None):
3702    r"""
3703    Computes the mean of `input` in specified dimension, ignoring NaN.
3704    If all elements in the specified dimensions are NaN, the result will be NaN.
3705
3706    Args:
3707        input (Tensor): The input tensor to calculate the mean.
3708        axis (int, optional): The dimension need to reduce. Default: ``None``, all dimensions are reduced.
3709        keepdims (bool, optional): Whether the output tensor needs to retain dimension or not.
3710            Default: ``False``, not to retain dimensions.
3711
3712    Keyword Args:
3713        dtype (mindspore.dtype, optional): The output Tensor data type. Default: ``None`` , the data type of output
3714            Tensor is same as the input.
3715
3716    Returns:
3717        Tensor, the mean of input `input` in the given dimension axis, while ignoring NaNs.
3718
3719    Raises:
3720        TypeError: If `input` is not a Tensor.
3721        TypeError: If `axis` is not int.
3722        TypeError: If `keepdims` is not bool.
3723        TypeError: If `dtype` is not mindspore dtype.
3724        ValueError: If `axis` is not in range of [-r, r) which `r` means the rank of `input`.
3725
3726    Supported Platforms:
3727        ``Ascend`` ``GPU`` ``CPU``
3728
3729    Examples:
3730        >>> import mindspore
3731        >>> from mindspore import Tensor, ops
3732        >>> x = Tensor([[0.5, -1.1, float('nan')], [3.4, float('nan'), float('nan')]], mindspore.float32)
3733        >>> y = ops.nanmean(x, axis=0, keepdims=False)
3734        >>> print(y)
3735        [ 1.95 -1.1    nan]
3736    """
3737    _check_is_tensor("input", input, "nanmean")
3738    _check_repeat_in_axis(axis, input.ndim, "nanmean")
3739    if input.dtype not in mstype.float_type:
3740        raise TypeError(f"For 'nanmean', input should be floating point dtype, but got {type(input)}.")
3741    nan_sum = nansum(input, axis, keepdims)
3742    is_num = isnan(input).logical_not()
3743    is_num = is_num.sum(axis=axis, keepdims=keepdims)
3744    out = nan_sum / is_num
3745    if dtype is not None:
3746        return out.astype(dtype)
3747    return out
3748
3749
3750def orgqr(input, input2):
3751    r"""
3752    Calculates the explicit representation of the orthogonal matrix :math:`Q`
3753    returned by :class:`mindspore.ops.Geqrf`.
3754
3755    Take the case of input without batch dimension as an example,
3756    computes the first :math:`N` columns of a product of
3757    `Householder <https://en.wikipedia.org/wiki/Householder_transformation#Householder_matrix>`_
3758    matrices. Suppose input `input` is a matrix of size :math:`(M, N)` after householder transformation.
3759    When the diagonal of `input` is set to 1, every colunm of lower triangular in `input` is
3760    denoted as :math:`w_j` for :math:`j` for
3761    :math:`j=1, \ldots, M`, this function returns the first :math:`N` columns of the matrix
3762
3763    .. math::
3764        H_{1} H_{2} \ldots H_{k} \quad \text { with } \quad H_{j}=\mathrm{I}_{M}-\tau_{j} w_{j} w_{j}^{\mathrm{H}}
3765
3766    where :math:`\mathrm{I}_{M}` is the :math:`M`-dimensional identity matrix. And when :math:`w` is complex,
3767    :math:`w^{\mathrm{H}}` is the conjugate transpose, otherwise the transpose.
3768    The output matrix is the same size as the input matrix `input`.
3769    :math:`tau` is corresponding to `input2`.
3770
3771    Args:
3772        input (Tensor): Tensor of shape :math:`(*, M, N)`, indicating 2D or 3D matrices,
3773            with float32, float64, complex64 and complex128 data type.
3774        input2 (Tensor): Tensor of shape :math:`(*, K)`, where `K` is less than or equal to `N`, indicating the
3775            reflecting coefficient in Householder transformation, which have the same type as `input`.
3776
3777    Returns:
3778        Tensor, has the same shape and data type as `input`.
3779
3780    Raises:
3781        TypeError: If `input` or `input2` are not Tensors.
3782        TypeError: If dtype of `input` and `input2` is not one of: float64, float32, complex64, complex128.
3783        ValueError: If `input` and `input2` have different batch size.
3784        ValueError: If input.shape[-2] < input.shape[-1].
3785        ValueError: If input.shape[-1] < input2.shape[-1].
3786        ValueError: If rank(input) - rank(input2) != 1.
3787        ValueError: If rank(input) != 2 or 3.
3788
3789    Supported Platforms:
3790        ``Ascend`` ``GPU`` ``CPU``
3791
3792    Examples:
3793        >>> import mindspore
3794        >>> import numpy as np
3795        >>> from mindspore import Tensor, ops
3796        >>> input = Tensor(np.array([[-114.6, 10.9, 1.1], [-0.304, 38.07, 69.38], [-0.45, -0.17, 62.]]),
3797        ... mindspore.float32)
3798        >>> input2 = Tensor(np.array([1.55, 1.94, 0.0]), mindspore.float32)
3799        >>> y = ops.orgqr(input, input2)
3800        >>> print(y)
3801        [[-0.54999995 -0.2128925   0.8137956 ]
3802         [ 0.47119996 -0.8752807   0.08240613]
3803         [ 0.69749993  0.42560163  0.57772595]]
3804    """
3805
3806    orgqr_ = Orgqr()
3807    return orgqr_(input, input2)
3808
3809
3810def ormqr(input, tau, other, left=True, transpose=False):
3811    r"""
3812    Calculates two matrices multiplication of a product of a general matrix with Householder matrices.
3813    Calculates the product of a matrix C(given by `other`) with dimensions (m, n) and a matrix Q which is represented
3814    using Householder reflectors (`input`, `tau`). Returns a Tensor.
3815
3816    Args:
3817        input (Tensor): Tensor of shape :math:`(*, mn, k)`, when `left` is True, mn equals to m,
3818            otherwise, mn equals to n. And `*` is zero or more batch dimensions.
3819        tau (Tensor): Tensor of shape :math:`(*, min(mn, k))` where `*` is zero or more batch dimensions,
3820            and its type is the same as `input`.
3821        other (Tensor): Tensor of shape :math:`(*, m, n)` where `*` is zero or more batch dimensions,
3822            and its type is the same as `input`.
3823        left (bool, optional): determines the order of multiplication. If True, computes op(Q) \* `other` ,
3824            otherwise, compute `other` \* op(Q). Default: ``True`` .
3825        transpose (bool, optional): If True, the matrix Q is conjugate transposed,
3826            otherwise, not conjugate transposing matrix Q. Default: ``False`` .
3827
3828    Returns:
3829        Tensor, with the same type and shape as `other`.
3830
3831    Raises:
3832        TypeError: If `input` or `tau` or `other` is not Tensor.
3833        TypeError: If dtype of `input` or `tau` or `other` is not one of: float64, float32, complex64, complex128.
3834        ValueError: If the dimension of `input` or `other` is less than 2D.
3835        ValueError: If rank(`input`) - rank(`tau`) != 1.
3836        ValueError: If tau.shape[:-1] != input.shape[:-2]
3837        ValueError: If other.shape[:-2] != input.shape[:-2]
3838        ValueError: If left == true, other.shape[-2] < tau.shape[-1].
3839        ValueError: If left == true, other.shape[-2] != input.shape[-2].
3840        ValueError: If left == false, other.shape[-1] < tau.shape[-1].
3841        ValueError: If left == false, other.shape[-1] != input.shape[-2].
3842
3843    Supported Platforms:
3844        ``GPU``
3845
3846    Examples:
3847        >>> import mindspore
3848        >>> import numpy as np
3849        >>> from mindspore import Tensor, ops
3850        >>> input = Tensor(np.array([[-114.6, 10.9, 1.1], [-0.304, 38.07, 69.38], [-0.45, -0.17, 62]]),
3851        ...                mindspore.float32)
3852        >>> tau = Tensor(np.array([1.55, 1.94, 3.0]), mindspore.float32)
3853        >>> other = Tensor(np.array([[-114.6, 10.9, 1.1],
3854        ...                          [-0.304, 38.07, 69.38],
3855        ...                          [-0.45, -0.17, 62]]), mindspore.float32)
3856        >>> output = ops.ormqr(input, tau, other)
3857        >>> print(output)
3858        [[  63.82713   -13.823125 -116.28614 ]
3859         [ -53.659264  -28.157839  -70.42702 ]
3860         [ -79.54292    24.00183   -41.34253 ]]
3861    """
3862
3863    ormqr_ = _get_cache_prim(Ormqr)(left, transpose)
3864    return ormqr_(input, tau, other)
3865
3866
3867def hypot(input, other):
3868    r"""
3869    Computes hypotenuse of input tensors element-wise as legs of a right triangle.
3870    The shape of two inputs should be broadcastable, and data type of them should be
3871    one of: float32, float64
3872
3873    .. math::
3874        out_i = \sqrt{input_i^2 + other_i^2}
3875
3876    Args:
3877        input (Tensor): The first input tensor.
3878        other (Tensor): The second input tensor.
3879
3880    Returns:
3881        Tensor, the shape is the same as the one after broadcasting, and the data type is one
3882        with higher precision in the two inputs.
3883
3884    Raises:
3885        TypeError: If data type `input` or `other` is not float32 or float64.
3886        ValueError: If shape of two inputs are not broadcastable.
3887
3888    Supported Platforms:
3889        ``Ascend`` ``GPU`` ``CPU``
3890
3891    Examples:
3892        >>> import numpy as np
3893        >>> from mindspore import Tensor, ops
3894        >>> input = Tensor(np.array([3., 5., 7.]))
3895        >>> other = Tensor(np.array([4., 12., 24.]))
3896        >>> y = ops.hypot(input, other)
3897        >>> print(y)
3898        [ 5. 13. 25.]
3899    """
3900
3901    hypot_ = Hypot()
3902    return hypot_(input, other)
3903
3904
3905def heaviside(input, values):
3906    r"""
3907    Computes the Heaviside step function for each element in input.
3908
3909    .. math::
3910        \text { heaviside }(\text { input, values })=\left\{\begin{array}{ll}
3911        0, & \text { if input }<0 \\
3912        \text { values, } & \text { if input }=0 \\
3913        1, & \text { if input }>0
3914        \end{array}\right.
3915
3916    Args:
3917        input (Tensor): The input tensor. With real number data type.
3918        values (Tensor): The values to use where `input` is zero. Values can be broadcast with `input` .
3919            `input` should have the same dtype with `values` .
3920
3921    Returns:
3922        Tensor, has the same type as `input` and `values`.
3923
3924    Raises:
3925        TypeError: If `input` or `values` is not Tensor.
3926        TypeError: If data type `input` and `values` is different.
3927        ValueError: If shape of two inputs are not broadcastable.
3928
3929    Supported Platforms:
3930        ``Ascend`` ``GPU`` ``CPU``
3931
3932    Examples:
3933        >>> import numpy as np
3934        >>> from mindspore import Tensor, ops
3935        >>> input = Tensor(np.array([-5., 1., 0., 2., 0.]))
3936        >>> values = Tensor(np.array([3.]))
3937        >>> y = ops.heaviside(input, values)
3938        >>> print(y)
3939        [0. 1. 3. 1. 3.]
3940    """
3941
3942    heaviside_ = Heaviside()
3943    return heaviside_(input, values)
3944
3945
3946def histc(input, bins=100, min=0., max=0.):
3947    r"""
3948    Computes the histogram of a tensor.
3949
3950    The elements are sorted into equal width bins between `min` and `max`.
3951    If `min` and `max` are both zero, the minimum and maximum values of the data are used.
3952
3953    Elements lower than min or higher than max are ignored.
3954
3955    Args:
3956        input (Tensor): the input tensor, type support list :math:`[float16, float32, int32]`.
3957        bins (int, optional): Number of histogram bins, optional. If specified, must be positive. Default: ``100`` .
3958        min (int, float, optional): An optional float of the lower end of the range (inclusive). Default: ``0.0`` .
3959        max (int, float, optional): An optional float of the upper end of the range (inclusive). Default: ``0.0`` .
3960
3961    Returns:
3962        Tensor, 1-D Tensor with type int32.
3963
3964    Raises:
3965        TypeError: If `input` is not a Tensor.
3966        TypeError: If `input` datetype not in support list.
3967        TypeError: If attr `min` or `max` is not float or int.
3968        TypeError: If attr `bins` is not int.
3969        ValueError: If attr value `min` > `max`.
3970        ValueError: If attr `bins` <= 0.
3971
3972    Supported Platforms:
3973        ``Ascend`` ``CPU``
3974
3975    Examples:
3976        >>> from mindspore import Tensor, ops
3977        >>> x = Tensor([1., 2, 1])
3978        >>> y = ops.histc(x, bins=4, min=0.0, max=3.0)
3979        >>> print(y)
3980        [0 2 1 0]
3981    """
3982    if not isinstance(min, (int, float)):
3983        raise TypeError(f"For 'histc', parameter 'min' must be an int or float, but got {type(min)}.")
3984    if not isinstance(max, (int, float)):
3985        raise TypeError(f"For 'histc', parameter 'max' must be an int or float, but got {type(max)}.")
3986
3987    histogram_op = _get_cache_prim(P.Histogram)(bins, float(min), float(max))
3988    return histogram_op(input)
3989
3990
3991def logspace(start, end, steps, base=10, *, dtype=mstype.float32):
3992    r"""
3993    Returns a 1-D Tensor with size `steps` whose value is from :math:`base^{start}` to :math:`base^{end}`,
3994    and use `base` as the base number.
3995
3996    .. math::
3997        \begin{aligned}
3998        &step = (end - start)/(steps - 1)\\
3999        &output = [base^{start}, base^{start + 1 * step}, ... , base^{start + (steps-2) * step}, base^{end}]
4000        \end{aligned}
4001
4002    Args:
4003        start (Union[float, Tensor]): Start value of interval.
4004        end (Union[float, Tensor]): End value of interval.
4005        steps (int): The steps must be a non-negative integer.
4006        base (int, optional): The base must be a non-negative integer. Default: ``10`` .
4007        dtype (mindspore.dtype, optional): The dtype of output. Default: ``mstype.float32`` .
4008
4009    Returns:
4010        Tensor has the shape as :math:`(step, )`. Its datatype is set by the attr 'dtype'.
4011
4012    Raises:
4013        TypeError: If `start` is not a float or a Tensor.
4014        TypeError: If `end` is not a float or a Tensor.
4015        TypeError: If `steps` is not an int.
4016        TypeError: If `base` is not an int.
4017        ValueError: If `steps` is not a non-negative integer.
4018        ValueError: If `base` is not a non-negative integer.
4019
4020    Supported Platforms:
4021        ``Ascend`` ``GPU`` ``CPU``
4022
4023    Examples:
4024        >>> import mindspore
4025        >>> from mindspore import Tensor, ops
4026        >>> start = Tensor(1, mindspore.float32)
4027        >>> end = Tensor(10, mindspore.float32)
4028        >>> output = ops.logspace(start, end, steps = 10, base = 10, dtype=mindspore.float32)
4029        >>> print(output)
4030        [1.e+01 1.e+02 1.e+03 1.e+04 1.e+05 1.e+06 1.e+07 1.e+08 1.e+09 1.e+10]
4031    """
4032    if isinstance(start, float):
4033        start = ops.cast(start, mstype.float32)
4034    if isinstance(end, float):
4035        end = ops.cast(end, mstype.float32)
4036    logspace_ = _get_cache_prim(P.LogSpace)(steps, base, dtype)
4037    return logspace_(start, end)
4038
4039
4040def logaddexp(input, other):
4041    r"""
4042    Computes the logarithm of the sum of exponentiations of the inputs.
4043    This function is useful in statistics where the calculated probabilities of events may be
4044    so small as to exceed the range of normal floating point numbers.
4045
4046    .. math::
4047
4048        out_i = \log(exp(input_i) + \exp(other_i))
4049
4050    Args:
4051        input (Tensor): Input Tensor. The dtype of `input` must be float.
4052        other (Tensor): Input Tensor. The dtype of `input` must be float.
4053            If the shape of `input` is not equal to the shape of `other`,
4054            they must be broadcastable to a common shape (which becomes the shape of the output).
4055
4056    Returns:
4057        Tensor.
4058
4059    Raises:
4060        TypeError: If `input`, `other` is not a Tensor.
4061        TypeError: The dtype of `input` or `other` is not float.
4062
4063    Supported Platforms:
4064        ``Ascend`` ``GPU`` ``CPU``
4065
4066    Examples:
4067        >>> import numpy as np
4068        >>> from mindspore import Tensor, ops
4069        >>> x1 = Tensor(np.array([1, 2, 3]).astype(np.float16))
4070        >>> x2 = Tensor(np.array(2).astype(np.float16))
4071        >>> output = ops.logaddexp(x1, x2)
4072        >>> print(output)
4073        [2.312 2.693 3.312]
4074    """
4075
4076    if not isinstance(input, (Tensor, Tensor_)):
4077        raise TypeError(f"For logaddexp, the input must be a Tensor, but got {type(input)}.")
4078    if not isinstance(other, (Tensor, Tensor_)):
4079        raise TypeError(f"For logaddexp, the other must be a Tensor, but got {type(other)}.")
4080    if not ops.is_floating_point(input) or not ops.is_floating_point(other):
4081        raise TypeError(f"For logaddexp2, the dtype of 'input' and 'other' must be float,"
4082                        f"but got {input.dtype} and {other.dtype}.")
4083    m = maximum(input, other)
4084    abs_val = abs(input - other)
4085    exp_val = tensor_exp(neg(abs_val))
4086    y = m + log1p(exp_val)
4087    return y
4088
4089
4090def logaddexp2(input, other):
4091    r"""
4092    Computes the logarithm of the sum of exponentiations in base of 2 of the inputs.
4093
4094    .. math::
4095
4096        out_i = \log_2(2^{input_i} + 2^{other_i})
4097
4098    Args:
4099        input (Tensor): Input tensor. The dtype of `input` must be float.
4100        other (Tensor): Input tensor. The dtype of `other` must be float.
4101            If ``input.shape != other.shape``, they must be broadcastable to
4102            a common shape (which becomes the shape of the output).
4103
4104    Returns:
4105        Tensor.
4106
4107    Raises:
4108        TypeError: If `input`, `other` is not a Tensor.
4109        TypeError: If the dtype of `input`, `other` is not a float.
4110
4111    Supported Platforms:
4112        ``Ascend`` ``GPU`` ``CPU``
4113
4114    Examples:
4115        >>> import numpy as np
4116        >>> from mindspore import Tensor, ops
4117        >>> x1 = Tensor(np.array([2, 4, 8]).astype(np.float16))
4118        >>> x2 = Tensor(np.array([2]).astype(np.float16))
4119        >>> output = ops.logaddexp2(x1, x2)
4120        >>> print(output)
4121        [3. 4.32 8.02]
4122    """
4123    _check_is_tensor("input", input, "logaddexp2")
4124    _check_is_tensor("other", other, "logaddexp2")
4125    if not ops.is_floating_point(input) or not ops.is_floating_point(other):
4126        raise TypeError(f"For logaddexp2, the dtype of 'input' and 'other' must be float,"
4127                        f"but got {input.dtype} and {other.dtype}.")
4128
4129    m = maximum(input, other)
4130    abs_val = abs(input - other)
4131    exp2_val = pows(2., neg(abs_val))
4132    y = m + log2(1. + exp2_val)
4133    return y
4134
4135
4136@_primexpr
4137def _check_and_canonicalize_axes(axes, ndim):
4138    """Check whether the types and values of input axes are valid."""
4139    return validator.check_and_canonicalize_axes(axes, ndim)
4140
4141
4142def _check_var_std_input(input, ddof, keepdims, axis, cls_name):
4143    _check_is_tensor("input", input, cls_name)
4144    _check_attr_dtype("ddof", ddof, [int, bool], cls_name)
4145    _check_attr_dtype("keepdims", keepdims, [bool], cls_name)
4146    if axis is None:
4147        axis = ()
4148    else:
4149        axis = _check_and_canonicalize_axes(axis, input.ndim)
4150    return axis
4151
4152
4153def vander(x, N=None):
4154    """
4155    Generates a Vandermonde matrix. The columns of the output matrix are powers of the input vector.
4156    The i-th output column is the input vector raised element-wise to the power of :math:`N - i - 1`.
4157
4158    Args:
4159        x (Tensor): 1-D input array.
4160        N (int, optional): Number of columns in the output. Default: ``None``,
4161            `N` will be assigned as :math:`len(x)`.
4162
4163    Returns:
4164        Tensor, the columns are :math:`x^0, x^1, ..., x^{(N-1)}`.
4165
4166    Raises:
4167        TypeError: If input `x` is not Tensor.
4168        ValueError: If `x` is not 1-D.
4169        TypeError: If input `N` is not int.
4170        ValueError: If `N` <= 0.
4171
4172    Supported Platforms:
4173        ``Ascend`` ``GPU`` ``CPU``
4174
4175    Examples:
4176        >>> from mindspore import Tensor, ops
4177        >>> a = Tensor([1., 2., 3., 5.])
4178        >>> print(ops.vander(a, N=3))
4179        [[1.   1.   1.]
4180         [4.   2.   1.]
4181         [9.   3.   1.]
4182         [25.  5.   1.]]
4183        >>> a = Tensor([1., 2., 3., 5.])
4184        >>> print(ops.vander(a))
4185        [[1.    1.   1.   1.]
4186         [8.    4.   2.   1.]
4187         [27.   9.   3.   1.]
4188         [125.  25.  5.   1.]]
4189    """
4190    if not isinstance(x, Tensor):
4191        raise TypeError(
4192            f"For vander, x must be Tensor, but got {type(x)}")
4193    if x.ndim != 1:
4194        raise ValueError(
4195            f"For vander, x must be 1-D, but got dimension = {x.ndim}")
4196    if N is None:
4197        N = len(x)
4198    if not isinstance(N, int):
4199        raise TypeError(
4200            f"For vander, N must be an integer but got {type(N)}.")
4201    if N <= 0:
4202        raise ValueError(
4203            f"For vander, N must be greater than 0, but got {N}.")
4204    exponent = ops.range(Tensor(N - 1), Tensor(-1), Tensor(-1))
4205    x = F.expand_dims(x, 1)
4206    exponent = F.expand_dims(exponent, 0)
4207    return F.tensor_pow(x, exponent)
4208
4209
4210def var(input, axis=None, ddof=0, keepdims=False):
4211    r"""
4212    Returns the variance of each row of the input Tensor by default, or it can calculate them
4213    in specified dimension `axis`. If `axis` is a list of dimensions, reduce over all of them.
4214
4215    Note:
4216        If ddof is 0, 1, True or False, the supported device is only Ascend and CPU. In other cases,
4217        the supported device is Ascend, GPU and CPU.
4218
4219    Args:
4220        input (Tensor[Number]): Input Tensor with a dtype of number.Number, its shape should be :math:`(N, *)`
4221            where :math:`*` means any number of additional dims.
4222        axis (Union[int, tuple(int)], optional): The dimensions to reduce. Only constant value is allowed.
4223            Must be in the range [-rank(`input`), rank(`input`)). Default: ``None`` , reduce all dimensions.
4224        ddof (Union[int, bool], optional): Means Delta Degrees of Freedom.
4225            If ddof is an integer, the divisor used in calculations is :math:`N - ddof`,
4226            where :math:`N` represents the number of elements.
4227            If ddof is True, will use the Bessel correction unbiased estimation.
4228            If ddof is False, will through the biased estimation to calculate variance.
4229            Default: ``0`` .
4230        keepdims (bool, optional): Whether the output Tensor has dim retained or not.
4231            If ``true`` , keep these reduced dimensions and the length is 1.
4232            If false, don't keep these dimensions. Default: ``False`` .
4233
4234    Returns:
4235        Tensor, the variance.
4236        Suppose the shape of `input` is :math:`(x_0, x_1, ..., x_R)`:
4237
4238        - If `axis` is () and `keepdims` is set to ``False`` , returns a 0-D Tensor, indicating
4239          the standard deviation of all elements in `input`.
4240        - If `axis` is int 1 and `keepdims` is set to ``False`` , then the returned Tensor
4241          has shape :math:`(x_0, x_2, ..., x_R)`.
4242        - If `axis` is tuple(int) or list(int), e.g. (1, 2) and `keepdims` is set to ``False`` ,
4243          then the returned Tensor has shape :math:`(x_0, x_2, ..., x_R)`.
4244
4245    Raises:
4246        TypeError: If `input` is not a Tensor.
4247        TypeError: If `axis` is not one of the following: None, int, tuple.
4248        TypeError: If `keepdims` is not a bool.
4249        ValueError: If `axis` is out of range.
4250
4251    Supported Platforms:
4252        ``Ascend`` ``GPU`` ``CPU``
4253
4254    Examples:
4255        >>> import mindspore as ms
4256        >>> input = ms.Tensor([[1, 2, 3, 4], [-1, 1, 4, -10]], ms.float32)
4257        >>> output = ms.ops.var(input, 1, 2, True)
4258        >>> print(output)
4259        [[ 2.5]
4260         [54.5]]
4261    """
4262    axis = _check_var_std_input(input, ddof, keepdims, axis, "var")
4263    output = var_mean(input, axis, ddof, keepdims)
4264    return output[0]
4265
4266
4267def var_mean(input, axis=None, ddof=0, keepdims=False):
4268    r"""
4269    Returns the variance and mean of each row of the input Tensor by default,
4270    or it can calculate them in specified dimension `axis`.
4271    If `axis` is a list of dimensions, reduce over all of them.
4272
4273    Note:
4274        If ddof is 0, 1, True or False, the supported device is only Ascend and CPU. In other cases,
4275        the supported device is Ascend, GPU and CPU.
4276
4277    Args:
4278        input (Tensor[Number]): Input Tensor with a dtype of number.Number, its shape should be :math:`(N, *)`
4279            where :math:`*` means any number of additional dims.
4280        axis (Union[int, tuple(int)], optional): The dimensions to reduce. Only constant value is allowed.
4281            Must be in the range [-rank(`input`), rank(`input`)). Default: ``None`` , reduce all dimensions.
4282        ddof (Union[int, bool], optional): Means Delta Degrees of Freedom.
4283            If ddof is an integer, the divisor used in calculations is :math:`N - ddof`,
4284            where :math:`N` represents the number of elements.
4285            If ddof is True, will use the Bessel correction unbiased estimation.
4286            If ddof is False, will through the biased estimation to calculate the variance.
4287            Default: ``0`` .
4288        keepdims (bool, optional): Whether the output Tensor has dim retained or not.
4289            If true, keep these reduced dimensions and the length is 1.
4290            If false, don't keep these dimensions. Default: ``False`` .
4291
4292    Returns:
4293        A tuple containing the variance and mean.
4294        Suppose the shape of `input` is :math:`(x_0, x_1, ..., x_R)`:
4295
4296        - If `axis` is () and `keepdims` is set to ``False`` , returns a 0-D Tensor, indicating
4297          the standard deviation of all elements in `input`.
4298        - If `axis` is int 1 and `keepdims` is set to ``False`` , then the returned Tensor
4299          has shape :math:`(x_0, x_2, ..., x_R)`.
4300        - If `axis` is tuple(int) or list(int), e.g. (1, 2) and `keepdims` is set to ``False`` ,
4301          then the returned Tensor has shape :math:`(x_0, x_2, ..., x_R)`.
4302
4303    Raises:
4304        TypeError: If `input` is not a Tensor.
4305        TypeError: If `axis` is not one of the following: None, int, tuple.
4306        TypeError: If `keepdims` is not a bool.
4307        ValueError: If `axis` is out of range.
4308
4309    Supported Platforms:
4310        ``Ascend`` ``GPU`` ``CPU``
4311
4312    Examples:
4313        >>> import mindspore as ms
4314        >>> input = ms.Tensor([[1, 2, 3, 4], [-1, 1, 4, -10]], ms.float32)
4315        >>> output_var, output_mean = ms.ops.var_mean(input, 1, 2, True)
4316        >>> print(output_var)
4317        [[ 2.5]
4318         [54.5]]
4319        >>> print(output_mean)
4320        [[ 2.5]
4321         [-1.5]]
4322    """
4323    axis = _check_var_std_input(input, ddof, keepdims, axis, "var_mean")
4324    if ddof in (0, 1):
4325        output = _get_cache_prim(P.ReduceStd)(axis=axis, unbiased=bool(ddof), keep_dims=keepdims)(input)
4326        return tensor_pow(output[0], 2), output[1]
4327    x_mean = mean(input, axis, True)
4328    x_sub = tensor_sub(input, x_mean)
4329    x_pow = tensor_pow(x_sub, 2)
4330    x_sum = sum(x_pow, axis, keepdims)
4331    res_mean = mean(input, axis, keepdims)
4332    nums = 1
4333    if axis == ():
4334        nums = input.size
4335    else:
4336        for ax in axis:
4337            nums *= input.shape[ax]
4338    return true_divide(x_sum, nums - ddof), res_mean
4339
4340
4341def std(input, axis=None, ddof=0, keepdims=False):
4342    r"""
4343    Returns the standard-deviation of each row of the input Tensor by default, or it can calculate them
4344    in specified dimension `axis`. If `axis` is a list of dimensions, reduce over all of them.
4345
4346    Note:
4347        If ddof is 0, 1, True or False, the supported device is only Ascend and CPU. In other cases,
4348        the supported device is Ascend, GPU and CPU.
4349
4350    Args:
4351        input (Tensor[Number]): Input Tensor with a dtype of number.Number, its shape should be :math:`(N, *)`
4352            where :math:`*` means any number of additional dims.
4353        axis (Union[int, tuple(int)], optional): The dimensions to reduce. Only constant value is allowed.
4354            Must be in the range [-rank(`input`), rank(`input`)). Default: ``None`` , reduce all dimensions.
4355        ddof (Union[int, bool], optional): Means Delta Degrees of Freedom.
4356            If ddof is an integer, the divisor used in calculations is :math:`N - ddof`,
4357            where :math:`N` represents the number of elements.
4358            If ddof is True, will use the Bessel correction unbiased estimation.
4359            If ddof is False, will through the biased estimation to calculate the standard deviation.
4360            Default: ``0`` .
4361        keepdims (bool, optional): Whether the output Tensor has dim retained or not.
4362            If true, keep these reduced dimensions and the length is 1.
4363            If false, don't keep these dimensions. Default: ``False`` .
4364
4365    Returns:
4366        Tensor, the standard deviation.
4367        Suppose the shape of `input` is :math:`(x_0, x_1, ..., x_R)`:
4368
4369        - If `axis` is () and `keepdims` is set to False, returns a 0-D Tensor, indicating
4370          the standard deviation of all elements in `input`.
4371        - If `axis` is int 1 and `keepdims` is set to False, then the returned Tensor
4372          has shape :math:`(x_0, x_2, ..., x_R)`.
4373        - If `axis` is tuple(int) or list(int), e.g. (1, 2) and `keepdims` is set to False,
4374          then the returned Tensor has shape :math:`(x_0, x_2, ..., x_R)`.
4375
4376    Raises:
4377        TypeError: If `input` is not a Tensor.
4378        TypeError: If `axis` is not one of the following: None, int, tuple.
4379        TypeError: If `keepdims` is not a bool.
4380        ValueError: If `axis` is out of range.
4381
4382    Supported Platforms:
4383        ``Ascend`` ``GPU`` ``CPU``
4384
4385    Examples:
4386        >>> import mindspore as ms
4387        >>> input = ms.Tensor([[1, 2, 3, 4], [-1, 1, 4, -10]], ms.float32)
4388        >>> output = ms.ops.std(input, 1, 2, True)
4389        >>> print(output)
4390        [[1.5811388]
4391         [7.3824115]]
4392    """
4393    axis = _check_var_std_input(input, ddof, keepdims, axis, "std")
4394    output = std_mean(input, axis, ddof, keepdims)
4395    return output[0]
4396
4397
4398def std_mean(input, axis=None, ddof=0, keepdims=False):
4399    r"""
4400    Returns the standard-deviation and mean of each row of the input Tensor by default,
4401    or it can calculate them in specified dimension `axis`.
4402    If `axis` is a list of dimensions, reduce over all of them.
4403
4404    Note:
4405        If ddof is 0, 1, True or False, the supported device is only Ascend and CPU. In other cases,
4406        the supported device is Ascend, GPU and CPU.
4407
4408    Args:
4409        input (Tensor[Number]): Input Tensor with a dtype of number.Number, its shape should be :math:`(N, *)`
4410            where :math:`*` means any number of additional dims.
4411        axis (Union[int, tuple(int)], optional): Specifies the dimensions from which to calculate the standard
4412            deviation and mean. Only constant value is allowed. Must be in the range [-rank(`input`), rank(`input`)).
4413            Default: ``None`` , reduce all dimensions.
4414        ddof (Union[int, bool], optional): Means Delta Degrees of Freedom.
4415            If ddof is an integer, the divisor used in calculations is :math:`N - ddof`,
4416            where :math:`N` represents the number of elements.
4417            If ddof is True, will use the Bessel correction unbiased estimation.
4418            If ddof is False, will through the biased estimation to calculate the standard deviation.
4419            Default: ``0`` .
4420        keepdims (bool, optional): Whether the output Tensor has dim retained or not.
4421            If true, keep these reduced dimensions and the length is 1.
4422            If false, don't keep these dimensions. Default: ``False`` .
4423
4424    Returns:
4425        A tuple containing the standard deviation and mean.
4426        Suppose the shape of `input` is :math:`(x_0, x_1, ..., x_R)`:
4427
4428        - If `axis` is () and `keepdims` is set to ``False`` , returns a 0-D Tensor, indicating
4429          the standard deviation of all elements in `input`.
4430        - If `axis` is int 1 and `keepdims` is set to ``False`` , then the returned Tensor
4431          has shape :math:`(x_0, x_2, ..., x_R)`.
4432        - If `axis` is tuple(int) or list(int), e.g. (1, 2) and `keepdims` is set to ``False`` ,
4433          then the returned Tensor has shape :math:`(x_0, x_2, ..., x_R)`.
4434
4435    Raises:
4436        TypeError: If `input` is not a Tensor.
4437        TypeError: If `axis` is not one of the following: None, int, tuple.
4438        TypeError: If `keepdims` is not a bool.
4439        ValueError: If `axis` is out of range.
4440
4441    Supported Platforms:
4442        ``Ascend`` ``GPU`` ``CPU``
4443
4444    Examples:
4445        >>> import mindspore as ms
4446        >>> input = ms.Tensor([[1, 2, 3, 4], [-1, 1, 4, -10]], ms.float32)
4447        >>> output_std, output_mean = ms.ops.std_mean(input, 1, 2, True)
4448        >>> print(output_std)
4449        [[1.5811388]
4450         [7.3824115]]
4451        >>> print(output_mean)
4452        [[ 2.5]
4453         [-1.5]]
4454    """
4455    axis = _check_var_std_input(input, ddof, keepdims, axis, "std_mean")
4456    if ddof in (0, 1):
4457        return _get_cache_prim(P.ReduceStd)(axis=axis, unbiased=bool(ddof), keep_dims=keepdims)(input)
4458    output = var_mean(input, axis, ddof, keepdims)
4459    return tensor_pow(output[0], 0.5), output[1]
4460
4461
4462def reciprocal(input):
4463    r"""
4464    Returns reciprocal of a tensor element-wise.
4465
4466    .. math::
4467
4468        out_{i} =  \frac{1}{x_{i}}
4469
4470    Args:
4471        input (Tensor): The input tensor.
4472
4473    Returns:
4474        Tensor, has the same shape as the `input`.
4475
4476    Raises:
4477        TypeError: If `input` is not a Tensor.
4478
4479    Supported Platforms:
4480        ``Ascend`` ``GPU`` ``CPU``
4481
4482    Examples:
4483        >>> import mindspore as ms
4484        >>> from mindspore import ops
4485        >>> import numpy as np
4486        >>> input = ms.Tensor(np.array([1.0, 2.0, 4.0]), ms.float32)
4487        >>> output = ops.reciprocal(input)
4488        >>> print(output)
4489        [1.   0.5  0.25]
4490    """
4491    return reciprocal_(input)
4492
4493
4494def outer(input, vec2):
4495    """
4496    Return outer product of `input` and `vec2`. If `input` is a vector of size :math:`n`
4497    and `vec2` is a vector of size :math:`m` , then output must be a matrix of shape :math:`(n, m)` .
4498
4499    Note:
4500        This function does not broadcast.
4501
4502    Args:
4503        input (Tensor): 1-D input vector.
4504        vec2 (Tensor): 1-D input vector.
4505
4506    Returns:
4507        out (Tensor, optional), 2-D matrix, the outer product of two vectors.
4508
4509    Raises:
4510        TypeError: If `input` or `vec2` is not a Tensor.
4511
4512    Supported Platforms:
4513        ``Ascend`` ``GPU`` ``CPU``
4514
4515    Examples:
4516        >>> import mindspore
4517        >>> import numpy as np
4518        >>> from mindspore import Tensor
4519        >>> from mindspore import ops
4520        >>> input = Tensor(np.array([7, 8, 9]), mindspore.int32)
4521        >>> vec2 = Tensor(np.array([7, 10, 11]), mindspore.int32)
4522        >>> out = ops.outer(input, vec2)
4523        >>> print(out)
4524        [[49 70 77]
4525         [56 80 88]
4526         [63 90 99]]
4527    """
4528
4529    if not isinstance(input, (Tensor, Tensor_)):
4530        raise TypeError("the input input must be Tensor!")
4531    if not isinstance(vec2, (Tensor, Tensor_)):
4532        raise TypeError("the input vec2 must be Tensor!")
4533    input = input.reshape(-1, 1)
4534    y = tensor_mul(input, vec2)
4535    return y
4536
4537
4538def mv(mat, vec):
4539    """
4540    Multiplies matrix `mat` and vector `vec`.
4541
4542    If `mat` is a Tensor with :math:`(N, M)`, `vec` is a 1-D Tensor of size :math:`M`,
4543    out will be 1-D of size :math:`N`.
4544
4545    Args:
4546        mat (Tensor): Input matrix of shape :math:`(N, M)`.
4547        vec (Tensor): Input vector of shape :math:`(M,)`.
4548
4549    Returns:
4550        Tensor, the shape of the output Tensor is :math:`(N,)`.
4551
4552    Raises:
4553        TypeError: If `mat` or `vec` is not a Tensor.
4554        ValueError: If `mat` is not a 2-D Tensor or `vec` is not a 1-D Tensor.
4555
4556    Supported Platforms:
4557        ``Ascend`` ``GPU`` ``CPU``
4558
4559    Examples:
4560        >>> import numpy as np
4561        >>> from mindspore import Tensor, ops
4562        >>> mat = Tensor(np.array([[3., 4.], [1., 6.], [1., 3.]]).astype(np.float32))
4563        >>> vec = Tensor(np.array([1., 2.]).astype(np.float32))
4564        >>> output = ops.mv(mat, vec)
4565        >>> print(output)
4566        [11. 13. 7.]
4567    """
4568    if not isinstance(mat, (Tensor, Tensor_)):
4569        raise TypeError("The input mat must be Tensor.")
4570    if not isinstance(vec, (Tensor, Tensor_)):
4571        raise TypeError("The input vec must be Tensor.")
4572
4573    length_vec = get_x_shape(vec.shape)
4574    vec = reshape_(vec, (length_vec[0], 1))
4575
4576    out = matmul_(mat, vec)
4577    out = out.T
4578    out = out[0]
4579    return out
4580
4581
4582def addbmm(input, batch1, batch2, *, beta=1, alpha=1):
4583    r"""
4584    Applies batch matrix multiplication to `batch1` and `batch2`, with a reduced add step and add `input` to the result.
4585
4586    The optional values `alpha` and `beta` are the matrix-matrix product between `batch1` and `batch2` and the scale
4587    factor for the added tensor `input` respectively. If `beta` is 0, then `input` will be ignored.
4588
4589    .. math::
4590        output = \beta input + \alpha (\sum_{i=0}^{b-1} {batch1_i @ batch2_i})
4591
4592    Args:
4593        input (Tensor): Tensor to be added.
4594        batch1 (Tensor): The first batch of tensor to be multiplied.
4595        batch2 (Tensor): The second batch of tensor to be multiplied.
4596
4597    Keyword Args:
4598        beta (Union[int, float], optional): Multiplier for `input`. Default: ``1`` .
4599        alpha (Union[int, float], optional): Multiplier for `batch1` @ `batch2`. Default: ``1`` .
4600
4601    Returns:
4602        Tensor, has the same dtype as `input`.
4603
4604    Raises:
4605        TypeError: If `alpha` or beta is not an int or float.
4606        ValueError: If `batch1`, `batch2` cannot apply batch matrix multiplication.
4607
4608    Supported Platforms:
4609        ``Ascend`` ``GPU`` ``CPU``
4610
4611    Examples:
4612        >>> import numpy as np
4613        >>> from mindspore import Tensor, ops
4614        >>> m = np.ones((3, 3)).astype(np.float32)
4615        >>> arr1 = np.arange(24).astype(np.float32).reshape((2, 3, 4))
4616        >>> arr2 = np.arange(24).astype(np.float32).reshape((2, 4, 3))
4617        >>> a = Tensor(arr1)
4618        >>> b = Tensor(arr2)
4619        >>> c = Tensor(m)
4620        >>> output = ops.addbmm(c, a, b)
4621        >>> print(output)
4622        [[ 949. 1009. 1069.]
4623         [1285. 1377. 1469.]
4624         [1621. 1745. 1869.]]
4625    """
4626    if not isinstance(alpha, (int, float)):
4627        raise TypeError(f"For 'addbmm', parameter 'alpha' must be an int or float, but got {type(alpha)}.")
4628    if not isinstance(beta, (int, float)):
4629        raise TypeError(f"For 'addbmm', parameter 'beta' must be an int or float, but got {type(beta)}.")
4630    bmm_res = batch_matmul_(batch1, batch2)
4631    return beta * input + alpha * (bmm_res.sum(axis=0))
4632
4633
4634def addmm(input, mat1, mat2, *, beta=1, alpha=1):
4635    r"""
4636    Multiplies matrix `mat1` and matrix `mat2`. The matrix `input` is added to the final result.
4637
4638    .. math::
4639        output = \beta input + \alpha (mat1 @ mat2)
4640
4641    Args:
4642        input (Tensor): Tensor to be added.
4643        mat1 (Tensor): The first tensor to be multiplied.
4644        mat2 (Tensor): The second tensor to be multiplied.
4645
4646    Keyword Args:
4647        beta (Union[int, float], optional): Multiplier for `input`. Default: ``1`` .
4648        alpha (Union[int, float], optional): Multiplier for `mat1` @ `mat2`. Default: ``1`` .
4649
4650    Returns:
4651        Tensor, has the same dtype as `input`.
4652
4653    Raises:
4654        ValueError: If `mat1`, `mat2` cannot apply matrix multiplication.
4655
4656    Supported Platforms:
4657        ``Ascend`` ``GPU`` ``CPU``
4658
4659    Examples:
4660        >>> import numpy as np
4661        >>> from mindspore import Tensor, ops
4662        >>> m = np.ones((3, 3)).astype(np.float32)
4663        >>> arr1 = np.arange(12).astype(np.float32).reshape((3, 4))
4664        >>> arr2 = np.arange(12).astype(np.float32).reshape((4, 3))
4665        >>> a = Tensor(arr1)
4666        >>> b = Tensor(arr2)
4667        >>> c = Tensor(m)
4668        >>> output = ops.addmm(c, a, b)
4669        >>> print(output)
4670        [[ 43.  49.  55.]
4671         [115. 137. 159.]
4672         [187. 225. 263.]]
4673    """
4674    if not isinstance(alpha, (int, float)):
4675        raise TypeError(f"For 'addmm', parameter 'alpha' must be an int or float, but got {type(alpha)}.")
4676    if not isinstance(beta, (int, float)):
4677        raise TypeError(f"For 'addmm', parameter 'beta' must be an int or float, but got {type(beta)}.")
4678    return beta * input + alpha * (matmul_(mat1, mat2))
4679
4680
4681def addmv(input, mat, vec, *, beta=1, alpha=1):
4682    """
4683    Multiplies matrix `mat` and vector `vec`. The vector `input` is added to the final result.
4684
4685    If mat is a :math:`(N, M)` tensor, vec is a 1-D tensor of size :math:`M`, then `input` must be broadcastable
4686    with a 1-D tensor of size :math:`N`.In this case `out` will be 1-D tensor of size :math:`N`.
4687
4688    The optional values `beta` and `alpha` are the matrix-vector product between `mat` and `vec` and the scale
4689    factor for the added Tensor `input` respectively. If `beta` is 0, then `input` will be ignored.
4690
4691    .. math::
4692        output = β input + α (mat @ vec)
4693
4694    Args:
4695        input (Tensor): Vector to be added. The shape of the tensor is :math:`(N,)`.
4696        mat (Tensor): The first tensor to be multiplied. The shape of the tensor is :math:`(N, M)`.
4697        vec (Tensor): The second tensor to be multiplied. The shape of the tensor is :math:`(M,)`.
4698
4699    Keyword Args:
4700        beta (scalar[int, float, bool], optional): Multiplier for `input` (β). The `beta` must be int or
4701            float or bool. Default: ``1`` .
4702        alpha (scalar[int, float, bool], optional): Multiplier for `mat` @ `vec` (α). The `alpha` must
4703            be int or float or bool. Default: ``1`` .
4704
4705    Returns:
4706        Tensor, the shape of the output tensor is :math:`(N,)`, has the same dtype as `input`.
4707
4708    Raises:
4709        TypeError: If `mat`, `vec`, `input` is not a Tensor.
4710        TypeError: If inputs `mat`, `vec` are not the same dtype.
4711        ValueError: If `mat` is not a 2-D Tensor.
4712        ValueError: If `vec` is not a 1-D Tensor.
4713
4714    Supported Platforms:
4715        ``Ascend`` ``GPU`` ``CPU``
4716
4717    Examples:
4718        >>> import numpy as np
4719        >>> from mindspore import Tensor, ops
4720        >>> input = Tensor(np.array([2., 3.]).astype(np.float32))
4721        >>> mat = Tensor(np.array([[2., 5., 3.], [4., 2., 2.]]).astype(np.float32))
4722        >>> vec = Tensor(np.array([3., 2., 4.]).astype(np.float32))
4723        >>> output = ops.addmv(input, mat, vec)
4724        >>> print(output)
4725        [30. 27.]
4726    """
4727
4728    input_dtype = dtype_(input)
4729    if not (isinstance(input, Tensor) and isinstance(mat, Tensor) and isinstance(vec, Tensor)):
4730        raise TypeError("For Addmv, inputs must be all tensors.")
4731    if dtype_(mat) != dtype_(vec):
4732        raise TypeError("For Addmv, the mat and vec should be the same dtype.")
4733    _check_input_dtype("input", input_dtype,
4734                       [mstype.float16, mstype.float32, mstype.float64,
4735                        mstype.int16, mstype.int32, mstype.int64], "Addmv")
4736    _check_attr_dtype("alpha", alpha, [int, float, bool], "Addmv")
4737    _check_attr_dtype("beta", beta, [int, float, bool], "Addmv")
4738    if input_dtype in (mstype.int16, mstype.int32, mstype.int64):
4739        alpha = ops.scalar_cast(alpha, mstype.int64)
4740        beta = ops.scalar_cast(beta, mstype.int64)
4741    out = beta * input + alpha * mv(mat, vec)
4742    return out
4743
4744
4745def adjoint(x):
4746    r"""
4747    Calculates the conjugation of Tensor element by element, and transposes the last two dimensions.
4748
4749    Args:
4750        x (Tensor): Input Tensor.
4751
4752    Returns:
4753        Tensor, the calculated result.
4754
4755    Raises:
4756        TypeError: If `x` is not a Tensor.
4757
4758    Supported Platforms:
4759        ``Ascend`` ``GPU`` ``CPU``
4760
4761    Examples:
4762        >>> import mindspore
4763        >>> import numpy as np
4764        >>> from mindspore import Tensor, ops
4765        >>> a = Tensor(np.array([[0. + 0.j, 1. + 1.j], [2. + 2.j, 3. + 3.j]]), mindspore.complex128)
4766        >>> output = ops.adjoint(a)
4767        >>> print(output)
4768        [[0.-0.j 2.-2.j]
4769         [1.-1.j 3.-3.j]]
4770    """
4771    _dtype = x.dtype
4772    _t = x.swapaxes(-1, -2)
4773    if _dtype in mstype.complex_type:
4774        return _t.conj()
4775    return _t
4776
4777
4778def addr(x, vec1, vec2, *, beta=1, alpha=1):
4779    """
4780    Computes the outer product of two vector `vec1` and `vec2`, and adds the resulting matrix to `x`.
4781
4782    Given `vec1` and `vec2` of sizes :math:`N` and :math:`M`,
4783    `x` must be able to broadcast to a matrix of shape :math:`(N, M)`.
4784
4785    `beta` and `alpha` are optional scaling factors for the outer product of `vec1` and `vec2`,
4786    and the matrix `x` respectively. Setting `beta` to 0 will exclude `x` from the computation.
4787
4788    .. math::
4789        output = β x + α (vec1 ⊗ vec2)
4790
4791    Args:
4792        x (Tensor): Vector to be added. The shape of the tensor is :math:`(N, M)`.
4793        vec1 (Tensor): The first tensor to be multiplied. The shape of the tensor is :math:`(N,)`.
4794        vec2 (Tensor): The second tensor to be multiplied. The shape of the tensor is :math:`(M,)`.
4795
4796    Keyword Args:
4797        beta (scalar[int, float, bool], optional): Multiplier for `x` (β). The `beta` must be int or
4798            float or bool. Default: ``1`` .
4799        alpha (scalar[int, float, bool], optional): Multiplier for `vec1` ⊗ `vec2` (α). The `alpha` must
4800            be int or float or bool. Default: ``1`` .
4801
4802    Returns:
4803        Tensor, the shape of the output tensor is :math:`(N, M)`, has the same dtype as `x`.
4804
4805    Raises:
4806        TypeError: If `x`, `vec1`, `vec2` is not a Tensor.
4807        TypeError: If inputs `vec1`, `vec2` are not the same dtype.
4808        ValueError: If `vec1`, `vec2` is not a 1-D Tensor.
4809
4810    Supported Platforms:
4811        ``Ascend`` ``GPU`` ``CPU``
4812
4813    Examples:
4814        >>> import numpy as np
4815        >>> from mindspore import Tensor, ops
4816        >>> x = Tensor(np.array([[2., 2.], [3., 2.], [3., 4.]], np.float32))
4817        >>> vec1 = Tensor(np.array([2., 3., 2.], np.float32))
4818        >>> vec2 = Tensor(np.array([3, 4], np.float32))
4819        >>> output = ops.addr(x, vec1, vec2)
4820        >>> print(output)
4821        [[ 8. 10.]
4822         [12. 14.]
4823         [ 9. 12.]]
4824    """
4825
4826    input_dtype = dtype_(x)
4827    if not (isinstance(x, Tensor) and isinstance(vec1, Tensor) and isinstance(vec2, Tensor)):
4828        raise TypeError("For Addr, inputs must be all tensors.")
4829    if dtype_(vec1) != dtype_(vec2):
4830        raise TypeError("For Addr, the vec1 and vec2 should be the same dtype.")
4831    _check_input_dtype("x", input_dtype,
4832                       [mstype.float16, mstype.float32, mstype.float64,
4833                        mstype.int16, mstype.int32, mstype.int64], "Addr")
4834    _check_attr_dtype("alpha", alpha, [int, float, bool], "Addr")
4835    _check_attr_dtype("beta", beta, [int, float, bool], "Addr")
4836    if input_dtype in (mstype.int16, mstype.int32, mstype.int64):
4837        alpha = ops.scalar_cast(alpha, mstype.int64)
4838        beta = ops.scalar_cast(beta, mstype.int64)
4839
4840    length_vec1 = get_x_shape(vec1.shape)
4841    vec1 = reshape_(vec1, (length_vec1[0], 1))
4842    length_vec2 = get_x_shape(vec2.shape)
4843    vec2 = reshape_(vec2, (1, length_vec2[0]))
4844
4845    out = beta * x + alpha * matmul_(vec1, vec2)
4846    return out
4847
4848
4849def lcm(input, other):
4850    """
4851    Computes least common multiplier of input tensors element-wise.
4852    The shape of two inputs should be broadcastable, and data type of them should be
4853    one of: int32, int64
4854
4855    Args:
4856        input (Tensor): The first input tensor.
4857        other (Tensor): The second input tensor.
4858
4859    Returns:
4860        Tensor, the shape is the same as the one after broadcasting, and the data type is one
4861        with higher digits in the two inputs.
4862
4863    Raises:
4864        TypeError: If data type `input` or `other` is not int32 or int64.
4865        ValueError: If shapes of two inputs are not broadcastable.
4866
4867    Supported Platforms:
4868        ``Ascend`` ``GPU`` ``CPU``
4869
4870    Examples:
4871        >>> import numpy as np
4872        >>> from mindspore import Tensor, ops
4873        >>> input = Tensor(np.array([7, 8, 9]))
4874        >>> other = Tensor(np.array([14, 6, 12]))
4875        >>> y = ops.lcm(input, other)
4876        >>> print(y)
4877        [14 24 36]
4878    """
4879    return lcm_(input, other)
4880
4881
4882def cdist(x1, x2, p=2.0):
4883    """
4884    Computes p-norm distance between each pair of row vectors of two input Tensors.
4885
4886    Note:
4887        On Ascend, the supported dtypes are float16 and float32.
4888        On CPU, the supported dtypes are float16 and float32.
4889        On GPU, the supported dtypes are float32 and float64.
4890
4891    Args:
4892        x1 (Tensor): Input tensor of shape :math:`(B, P, M)`.
4893          Letter :math:`B` represents 0 or positive int number.
4894          When :math:`B` is equal to 0, it means this dimension can be ignored,
4895          i.e. shape of the tensor is :math:`(P, M)`.
4896        x2 (Tensor): Input tensor of shape :math:`(B, R, M)`, has the same dtype as `x1`.
4897        p (float, optional): P value for the p-norm distance to calculate between each
4898          vector pair, P ∈ [0,∞]. Default: ``2.0`` .
4899
4900    Returns:
4901        Tensor, p-norm distance, has the same dtype as `x1`, its shape is :math:`(B, P, R)`.
4902
4903    Raises:
4904        TypeError: If `x1` or `x2` is not Tensor.
4905        TypeError: If dtype of `x1` or `x2` is not listed in the "Note" above.
4906        TypeError: If `p` is not float32.
4907        ValueError: If `p` is negative.
4908        ValueError: If dimension of `x1` is not the same as `x2`.
4909        ValueError: If dimension of `x1` or `x2` is neither 2 nor 3.
4910        ValueError: If the batch shape of `x1` is not the same as the shape of `x2`.
4911        ValueError: If the number of columns of `x1` is not the same as that of `x2`.
4912
4913    Supported Platforms:
4914        ``Ascend`` ``GPU`` ``CPU``
4915
4916    Examples:
4917        >>> import numpy as np
4918        >>> from mindspore import Tensor, ops
4919        >>> x = Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))
4920        >>> y = Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))
4921        >>> output = ops.cdist(x, y, 2.0)
4922        >>> print(output)
4923        [[[2.8284273 2.8284273]
4924          [1.4142137 1.4142137]]]
4925    """
4926    cdist_ = _get_cache_prim(P.Cdist)(p)
4927    return cdist_(x1, x2)
4928
4929
4930def lerp(input, end, weight):
4931    """
4932    Does a linear interpolation of two tensors input and end based on a float or tensor weight.
4933
4934    If `weight` is a tensor, the shapes of three inputs need to be broadcast;
4935    If `weight` is a float, the shapes of `input` and `end` need to be broadcast.
4936
4937    .. math::
4938
4939        output_{i} = input_{i} + weight_{i} * (end_{i} - input_{i})
4940
4941    Args:
4942        input (Tensor): The tensor with the starting points. Data type must be float16 or float32.
4943        end (Tensor): The tensor with the ending points. Data type must be the same as `input`.
4944        weight (Union[float, Tensor]): The weight for the interpolation formula. Must be a float
4945            or a scalar tensor with float16 or float32 data type.
4946
4947    Returns:
4948        Tensor, has the same type and shape as input `input`.
4949
4950    Raises:
4951        TypeError: If `input` or `end` is not a tensor.
4952        TypeError: If `weight` is neither scalar(float) nor tensor.
4953        TypeError: If dtype of `input` or `end` is neither float16 nor float32.
4954        TypeError: If dtype of `weight` is neither float16 nor float32 when it is a tensor.
4955        TypeError: If `input` and `end` have different data types.
4956        TypeError: If `input`, `end` and `weight` have different data types when `weight` is a tensor.
4957        ValueError: If `end` could not be broadcast to a tensor with shape of `input`.
4958        ValueError: If `weight` could not be broadcast to tensors with shapes of `input` and `end` when it is a tensor.
4959
4960    Supported Platforms:
4961        ``Ascend`` ``GPU`` ``CPU``
4962
4963    Examples:
4964        >>> import mindspore
4965        >>> import numpy as np
4966        >>> from mindspore import Tensor, ops
4967        >>> input = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
4968        >>> end = Tensor(np.array([10., 10., 10., 10.]), mindspore.float32)
4969        >>> output = ops.lerp(input, end, 0.5)
4970        >>> print(output)
4971        [5.5 6. 6.5 7. ]
4972    """
4973    return lerp_(input, end, weight)
4974
4975
4976def bernoulli(input, p=0.5, seed=None):
4977    r"""
4978    Randomly set the elements of output to 0 or 1 with the probability of `p`
4979    which follows the Bernoulli distribution.
4980
4981    .. math::
4982        out_{i} \sim Bernoulli(p_{i})
4983
4984    Args:
4985        input (Tensor): Input Tensor. Data
4986                        type must be int8, uint8, int16, int32, int64, bool, float32 or float64.
4987        p (Union[Tensor, float], optional): Success probability, representing the probability of setting 1 for the
4988            corresponding position of the current Tensor. It has the same shape as `input`, the value of `p`
4989            must be in the range `[0, 1]`. Default: ``0.5`` .
4990        seed (Union[int, None], optional): The seed value for random generating. The value of `seed` must be a
4991            positive integer. Default: ``None`` , means using the current timestamp.
4992
4993    Returns:
4994        output (Tensor), with the same shape and type as `input` .
4995
4996    Raises:
4997        TypeError: If dtype of `input` is not one of: int8, uint8, int16, int32, int64, bool, float32, float64.
4998        TypeError: If dtype of `p` is not one of: float32, float64.
4999        TypeError: If dtype of `seed` is not int or None.
5000        ValueError: If `p` is not in range [0, 1].
5001        ValueError: If `seed` is less than 0.
5002        ValueError: If `p` is a Tensor but has different shape than `input`.
5003
5004    Supported Platforms:
5005        ``GPU`` ``CPU``
5006
5007    Examples:
5008        >>> import mindspore
5009        >>> import numpy as np
5010        >>> from mindspore import Tensor
5011        >>> from mindspore import ops
5012        >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int8)
5013        >>> output = ops.bernoulli(input_x, p=1.0)
5014        >>> print(output)
5015        [1 1 1]
5016        >>> input_p = Tensor(np.array([0.0, 1.0, 1.0]), mindspore.float32)
5017        >>> output = ops.bernoulli(input_x, input_p)
5018        >>> print(output)
5019        [0 1 1]
5020    """
5021    if seed is None:
5022        seed = -1
5023    validator.check_is_int(seed, 'seed', 'bernoulli')
5024    bernoulli_ = _get_cache_prim(Bernoulli)(seed)
5025    if not isinstance(p, Tensor):
5026        p = Tensor([p])
5027    return bernoulli_(input, p)
5028
5029
5030def bessel_i1(x):
5031    r"""
5032    Computes modified Bessel function of the first kind, order 1 element-wise.
5033
5034    .. math::
5035        \begin{array}{ll} \\
5036            I_{1}(x)=\mathrm{i}^{-1} J_{1}(\mathrm{i} x)=\sum_{m=0}^
5037            {\infty} \frac{x^{2m+1}}{2^{2m+1} m ! (m+1) !}
5038        \end{array}
5039
5040    where :math:`J_{1}` is Bessel function of the first kind, order 1.
5041
5042    Args:
5043        x (Tensor): The input tensor. The data type must be float16, float32 or float64.
5044
5045    Returns:
5046        Tensor, has the same shape and dtype as the `x`.
5047
5048    Raises:
5049        TypeError: If `x` is not a Tensor.
5050        TypeError: If dtype of `x` is not float16, float32 or float64.
5051
5052    Supported Platforms:
5053        ``GPU`` ``CPU``
5054
5055    Examples:
5056        >>> import mindspore
5057        >>> import numpy as np
5058        >>> from mindspore import Tensor, ops
5059        >>> x = Tensor(np.array([-1, -0.5, 0.5, 1]), mindspore.float32)
5060        >>> output = ops.bessel_i1(x)
5061        >>> print(output)
5062        [-0.5651591  -0.25789431  0.25789431  0.5651591]
5063    """
5064    return bessel_i1_(x)
5065
5066
5067def bessel_i1e(x):
5068    r"""
5069    Computes exponential scaled modified Bessel function of the first kind, order 1 element-wise.
5070
5071    The formula is defined as:
5072
5073    .. math::
5074        \begin{array}{ll} \\
5075            \text I_{1}e(x)=e^{(-|x|)} * I_{1}(x)=e^{(-|x|)} * \sum_{m=0}^
5076            {\infty} \frac{x^{2m+1}}{2^{2m+1} m ! (m+1) !}
5077        \end{array}
5078
5079    where :math:`I_{1}` is  modified Bessel function of the first kind, order 1.
5080
5081    Args:
5082        x (Tensor): The input tensor. The data type must be float16, float32 or float64.
5083
5084    Returns:
5085        Tensor, has the same shape and dtype as the `x`.
5086
5087    Raises:
5088        TypeError: If `x` is not a Tensor.
5089        TypeError: If dtype of `x` is not float16, float32 or float64.
5090
5091    Supported Platforms:
5092        ``Ascend`` ``GPU`` ``CPU``
5093
5094    Examples:
5095        >>> import mindspore
5096        >>> import numpy as np
5097        >>> from mindspore import Tensor, ops
5098        >>> x = Tensor(np.array([-1, -0.5, 0.5, 1]), mindspore.float32)
5099        >>> output = ops.bessel_i1e(x)
5100        >>> print(output)
5101        [-0.20791042  -0.15642083  0.15642083  0.20791042]
5102    """
5103    return bessel_i1e_(x)
5104
5105
5106def bessel_k1(x):
5107    r"""
5108    Computes modified Bessel function of the second kind, order 1 element-wise.
5109
5110    The formula is defined as:
5111
5112    .. math::
5113        \begin{array}{ll} \\
5114            K_{1}(x)=\lim_{\nu \to 1} \left(\frac{\pi}{2}\right) \frac{I_{-\nu}(x)-
5115            I_{\nu}(x)}{\sin (\nu \pi)} = \int_{0}^{\infty} e^{-x \cosh t} \cosh (t) d t
5116        \end{array}
5117
5118    where :math:`I_{1}` is modified Bessel function of the first kind, order 1.
5119
5120    Args:
5121        x (Tensor): The input tensor. The data type must be float16, float32 or float64.
5122
5123    Returns:
5124        Tensor, has the same shape and dtype as the `x`.
5125
5126    Raises:
5127        TypeError: If `x` is not a Tensor.
5128        TypeError: If dtype of `x` is not float16, float32 or float64.
5129
5130    Supported Platforms:
5131        ``GPU`` ``CPU``
5132
5133    Examples:
5134        >>> import mindspore
5135        >>> import numpy as np
5136        >>> from mindspore import Tensor, ops
5137        >>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
5138        >>> output = ops.bessel_k1(x)
5139        >>> print(output)
5140        [1.65644112  0.60190723  0.13986588  0.0124835]
5141    """
5142    return bessel_k1_(x)
5143
5144
5145def bessel_k1e(x):
5146    r"""
5147    Computes exponential scaled modified Bessel function of the second kind, order 1 element-wise.
5148
5149    The formula is defined as:
5150
5151    .. math::
5152        \begin{array}{ll} \\
5153            K_{1}e(x)= e^{(-|x|)} * K_{1}(x) = e^{(-|x|)} * \int_{0}
5154            ^{\infty} e^{-x \cosh t} \cosh (t) d t
5155        \end{array}
5156
5157    where :math:`K_{1}` is modified Bessel function of the second kind, order 1.
5158
5159    Args:
5160        x (Tensor): The input tensor. The data type must be float16, float32 or float64.
5161
5162    Returns:
5163        Tensor, has the same shape and dtype as the `x`.
5164
5165    Raises:
5166        TypeError: If `x` is not a Tensor.
5167        TypeError: If dtype of `x` is not float16, float32 or float64.
5168
5169    Supported Platforms:
5170        ``GPU`` ``CPU``
5171
5172    Examples:
5173        >>> import mindspore
5174        >>> import numpy as np
5175        >>> from mindspore import Tensor, ops
5176        >>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
5177        >>> output = ops.bessel_k1e(x)
5178        >>> print(output)
5179        [2.73100971  1.63615349  1.03347685  0.68157595]
5180    """
5181    return bessel_k1e_(x)
5182
5183
5184@constexpr
5185def _check_input_dtype(param_name, input_dtype, allow_dtypes, cls_name):
5186    validator.check_type_name(param_name, input_dtype, allow_dtypes, cls_name)
5187
5188
5189def deg2rad(x):
5190    """
5191    Converts angles in degrees to angles in radians element-wise.
5192
5193    Args:
5194        x (Tensor): The input tensor.
5195            With float16, float32 or float64 data type.
5196
5197    Returns:
5198        Tensor, has the same dtype as the `x`.
5199
5200    Raises:
5201        TypeError: If `x` is not a Tensor.
5202        TypeError: If dtype of `x` isn't float16, float32 or float64.
5203
5204    Supported Platforms:
5205        ``Ascend`` ``GPU`` ``CPU``
5206
5207    Examples:
5208        >>> import numpy as np
5209        >>> from mindspore import Tensor, ops
5210        >>> x = Tensor(np.array([[90.0, -90.0], [180.0, -180.0], [270.0, -270.0]]).astype(np.float32))
5211        >>> output = ops.deg2rad(x)
5212        >>> print(output)
5213        [[ 1.5707964 -1.5707964]
5214         [ 3.1415927 -3.1415927]
5215         [ 4.712389  -4.712389 ]]
5216    """
5217    if not isinstance(x, (Tensor, Tensor_)):
5218        raise TypeError("The input x must be tensor")
5219    x_dtype = dtype_(x)
5220    _check_input_dtype("x", x_dtype, [mstype.float16, mstype.float32, mstype.float64], "")
5221    if x_dtype == mstype.float16:
5222        out = x * (Tensor(math.pi / 180.0).astype(mstype.float16))
5223    else:
5224        out = x * math.pi / 180.0
5225    return out
5226
5227
5228def rad2deg(x):
5229    """
5230    Converts angles in radians to angles in degrees element-wise.
5231
5232    Args:
5233        x (Tensor): The input tensor.
5234
5235    Returns:
5236        Tensor, has the same shape and dtype as the `x`.
5237
5238    Raises:
5239        TypeError: If `x` is not a Tensor.
5240        TypeError: If dtype of `x` isn't float16, float32 or float64.
5241
5242    Supported Platforms:
5243        ``Ascend`` ``GPU`` ``CPU``
5244
5245    Examples:
5246        >>> import mindspore
5247        >>> from mindspore import Tensor
5248        >>> from mindspore import ops
5249        >>> x = Tensor([[6.283, -3.142],[1.570, -6.283],[3.142, -1.570]], mindspore.float32)
5250        >>> output = ops.rad2deg(x)
5251        >>> print(output)
5252        [[ 359.98935 -180.02333]
5253         [  89.95438 -359.98935]
5254         [ 180.02333  -89.95438]]
5255
5256    """
5257    if not isinstance(x, (Tensor, Tensor_)):
5258        raise TypeError("The input x must be tensor")
5259    x_dtype = dtype_(x)
5260    _check_input_dtype("x", x_dtype, [mstype.float16, mstype.float32, mstype.float64], "")
5261    if x_dtype == mstype.float16:
5262        out = x * (Tensor(180.0 / math.pi).astype(mstype.float16))
5263    else:
5264        out = x * 180.0 / math.pi
5265    return out
5266
5267
5268def frac(x):
5269    """
5270    Calculates the fractional part of each element in the input
5271
5272    Args:
5273        x (Tensor): x is a tensor.
5274
5275    Returns:
5276        Tensor, has the same shape and type as input.
5277
5278    Raises:
5279        TypeError: If `x` is not a Tensor.
5280
5281    Supported Platforms:
5282        ``Ascend`` ``GPU`` ``CPU``
5283
5284    Examples:
5285        >>> import mindspore
5286        >>> from mindspore import Tensor
5287        >>> from mindspore import dtype as mstype
5288        >>> from mindspore import ops
5289        >>> x = Tensor([2, 4.2, -2.5], mstype.float16)
5290        >>> output = ops.frac(x)
5291        >>> print(output)
5292        [ 0.      0.1992 -0.5   ]
5293    """
5294    return mod_(x, 1)
5295
5296
5297#####################################
5298# Reduction Operation Functions.
5299#####################################
5300
5301
5302@_primexpr
5303def _create_cummin_perm(axis, x_shape):
5304    """Insure axis is in [-len(x_shape),len(s_shape)-1]"""
5305
5306    def _check(axis, len_axis):
5307        if not isinstance(axis, int):
5308            raise TypeError(f"The date type of 'axis' must be Int, but got {axis}.")
5309        if axis < -len_axis or axis > len_axis:
5310            raise ValueError(f"The value of axis must be in [{-len_axis}, {len_axis}], but got {axis}.")
5311
5312    len_axis = len(x_shape)
5313    _check(axis, len_axis)
5314    prem = [i for i in range(len_axis)]
5315    if axis < 0:
5316        axis = axis + len_axis
5317    prem[0], prem[axis] = axis, 0
5318    prem = tuple(prem)
5319    return prem
5320
5321
5322def cummin(input, axis):
5323    r"""
5324    Returns a tuple (values,indices) where 'values' is the cumulative minimum value of input Tensor `input`
5325    along the dimension `axis`, and `indices` is the index location of each minimum value.
5326
5327    .. math::
5328        \begin{array}{ll} \\
5329            y_{i} = \min(x_{1}, x_{2}, ... , x_{i})
5330        \end{array}
5331
5332    Args:
5333        input (Tensor): The input Tensor, rank of `input` > 0.
5334        axis (int): The dimension to do the operation over. The value of `axis` must be in the range
5335            `[-input.ndim, input.ndim - 1]`.
5336
5337    Returns:
5338        tuple [Tensor], tuple of 2 Tensors, containing the cumulative minimum of elements and the index.
5339        The shape of each output tensor is the same as input `input`.
5340
5341    Raises:
5342        TypeError: If `input` is not a Tensor.
5343        TypeError: If `input` is a Tensor, but the type is complex or bool.
5344        TypeError: If `axis` is not an int.
5345        ValueError: If `axis` is out the range of `[-input.ndim, input.ndim - 1]`.
5346
5347    Supported Platforms:
5348        ``Ascend`` ``GPU`` ``CPU``
5349
5350    Examples:
5351        >>> from mindspore import Tensor, ops
5352        >>> import mindspore
5353        >>> a = Tensor([-0.2284, -0.6628,  0.0975,  0.2680, -1.3298, -0.4220], mindspore.float32)
5354        >>> output = ops.cummin(a, axis=0)
5355        >>> print(output[0])
5356        [-0.2284 -0.6628 -0.6628 -0.6628 -1.3298 -1.3298]
5357        >>> print(output[1])
5358        [0 1 1 1 4 4]
5359    """
5360    if isinstance(axis, bool):
5361        raise TypeError(f"For 'cummin', the date type of 'axis' must be Int, but got {axis}.")
5362    cummin_op = _get_cache_prim(Cummin)(axis=0)
5363    if axis == 0:
5364        out1, out2 = cummin_op(input)
5365    else:
5366        x_shape = shape_(input)
5367        prem = _create_cummin_perm(axis, x_shape)
5368        input = transpose_(input, prem)
5369        out1, out2 = cummin_op(input)
5370        out1 = transpose_(out1, prem)
5371        out2 = transpose_(out2, prem)
5372    return [out1, out2]
5373
5374
5375def cumsum(x, axis, dtype=None):
5376    """
5377    Computes the cumulative sum of input Tensor along `axis`.
5378
5379    .. math::
5380
5381        y_i = x_1 + x_2 + x_3 + ... + x_i
5382
5383    Note:
5384        On Ascend, the dtype of `x` only support :int8, uint8, int32, float16 or float32 in case of static shape.
5385        For the case of dynamic shape, the dtype of `x` only support int32, float16 or float32.
5386
5387    Args:
5388        x (Tensor): The input Tensor of shape :math:`(N, *)` where :math:`*` means, any number
5389            of additional dimensions.
5390        axis (int): Axis along which the cumulative sum is computed.
5391        dtype (:class:`mindspore.dtype`, optional): The desired dtype of returned Tensor. If specified,
5392            the input Tensor will be cast to `dtype` before the computation. This is useful for preventing overflows.
5393            If not specified, stay the same as original Tensor. Default: ``None`` .
5394
5395    Returns:
5396        Tensor, the shape of the output Tensor is consistent with the input Tensor's.
5397
5398    Raises:
5399        TypeError: If `x` is not a Tensor.
5400        ValueError: If the axis is out of range.
5401
5402    Supported Platforms:
5403        ``Ascend`` ``GPU`` ``CPU``
5404
5405    Examples:
5406        >>> import mindspore
5407        >>> import numpy as np
5408        >>> from mindspore import Tensor
5409        >>> from mindspore import ops
5410        >>> x = Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))
5411        >>> # case 1: along the axis 0
5412        >>> y = ops.cumsum(x, 0)
5413        >>> print(y)
5414        [[ 3.  4.  6. 10.]
5415         [ 4. 10. 13. 19.]
5416         [ 8. 13. 21. 26.]
5417         [ 9. 16. 28. 35.]]
5418        >>> # case 2: along the axis 1
5419        >>> y = ops.cumsum(x, 1)
5420        >>> print(y)
5421        [[ 3.  7. 13. 23.]
5422         [ 1.  7. 14. 23.]
5423         [ 4.  7. 15. 22.]
5424         [ 1.  4. 11. 20.]]
5425    """
5426    if dtype is not None and x.dtype != dtype:
5427        x = x.astype(dtype, copy=False)
5428    return cumsum_(x, axis)
5429
5430
5431def sparse_segment_mean(x, indices, segment_ids):
5432    r"""
5433    Computes a Tensor such that :math:`output_i = \frac{\sum_j x_{indices[j]}}{N}` where mean is over :math:`j` such
5434    that :math:`segment\_ids[j] == i` and :math:`N` is the total number of values summed. If the mean is empty for
5435    a given segment ID :math:`i`, :math:`output[i] = 0`.
5436
5437    Note:
5438        - On CPU, values in `segment_ids` are always validated to be sorted, and an error is thrown for indices that
5439          are not increasing. Moreover, values in `indices` are validated to be bounded, and an error is thrown when
5440          `indices` are out of range[0, x.shape[0]).
5441        - On GPU, this does not throw an error for unsorted `segment_ids` and out-of-bound `indices`. Out-of-order
5442          `segment_ids` result in safe but unspecified behavior, while out-of-range `indices` will be ignored.
5443
5444    Args:
5445        x (Tensor): A Tensor, and its rank must be greater than or equal to 1.
5446        indices (Tensor): A 1-D Tensor, with int32 or int64 data type.
5447        segment_ids (Tensor): A 1-D Tensor, must have the same dtype as `indices`.
5448            Values should be sorted and can be repeated.
5449
5450    Returns:
5451        Tensor, whose dtype and rank is the same as `x`. The first dimension is equal to the value of the last element
5452        of `segment_ids` plus one, and the other dimensions are the same as those of `x`.
5453
5454    Raises:
5455        TypeError: If `x`, `indices` or `segment_ids` is not a Tensor.
5456        TypeError: If the dtype of `x` is not one of the following dtype: float16, float32, float64.
5457        TypeError: If the dtype of `indices` and `segment_ids` are not one of the following dtype: int32, int64.
5458        TypeError: If the dtype of `indices` and `segment_ids` are not the same.
5459        ValueError: If the shape of `x`, `indices` or `segment_ids` don't meet the parameter description.
5460        ValueError: If the size of `indices` and `segment_ids` are not the same.
5461
5462    Supported Platforms:
5463        ``GPU`` ``CPU``
5464
5465    Examples:
5466        >>> import mindspore
5467        >>> from mindspore import Tensor, ops
5468        >>> x = Tensor([[0, 1, 2], [1, 2, 3], [3, 6, 7]], dtype=mindspore.float32)
5469        >>> indices = Tensor([0, 1, 2], dtype=mindspore.int32)
5470        >>> segment_ids = Tensor([1,2,2], dtype=mindspore.int32)
5471        >>> out = ops.sparse_segment_mean(x, indices, segment_ids)
5472        >>> print(out)
5473        [[0. 0. 0.]
5474         [0. 1. 2.]
5475         [2. 4. 5.]]
5476    """
5477    return sparse_segment_mean_(x, indices, segment_ids)
5478
5479
5480def block_diag(*inputs):
5481    r"""
5482    Creates a block diagonal matrix from the provided Tensor.
5483
5484    Args:
5485        inputs (Tensor): One or more tensors, the dimension of Tensor should be 0, 1 or 2.
5486
5487    Returns:
5488        Tensor, two-dimensional with all input tensors arranged in
5489        order so that their top left and bottom right corners are
5490        diagonally adjacent. All other elements are set to 0.
5491
5492    Raises:
5493        TypeError: If the input is not a Tensor.
5494        ValueError: If the dimension of Tensor is not 0, 1 or 2.
5495
5496    Supported Platforms:
5497        ``Ascend`` ``GPU`` ``CPU``
5498
5499    Examples:
5500        >>> from mindspore import Tensor, ops
5501        >>> from mindspore import dtype as mstype
5502        >>> x1 = Tensor([[4], [3], [2]], mstype.int32)
5503        >>> x2 = Tensor([7, 6, 5], mstype.int32)
5504        >>> x3 = Tensor(1, mstype.int32)
5505        >>> x4 = Tensor([[5, 4, 3], [2, 1, 0]], mstype.int32)
5506        >>> x5 = Tensor([[8, 7], [7, 8]], mstype.int32)
5507        >>> out = ops.block_diag(x1, x2, x3, x4, x5)
5508        >>> print(out.asnumpy())
5509        [[4 0 0 0 0 0 0 0 0 0]
5510         [3 0 0 0 0 0 0 0 0 0]
5511         [2 0 0 0 0 0 0 0 0 0]
5512         [0 7 6 5 0 0 0 0 0 0]
5513         [0 0 0 0 1 0 0 0 0 0]
5514         [0 0 0 0 0 5 4 3 0 0]
5515         [0 0 0 0 0 2 1 0 0 0]
5516         [0 0 0 0 0 0 0 0 8 7]
5517         [0 0 0 0 0 0 0 0 7 8]]
5518    """
5519
5520    def to_col_block(arys, i, a):
5521        return [
5522            a if idx == i else ops.zeros((ary.shape[0], a.shape[1]), ary.dtype)
5523            for idx, ary in enumerate(arys)
5524        ]
5525
5526    def to_2d(ary):
5527        if not isinstance(ary, Tensor):
5528            raise TypeError(
5529                f"For 'block_diag', each element of 'inputs' must be a tensor, but got {type(ary)}"
5530            )
5531        if ary.ndim == 0:
5532            return ops.expand_dims(ops.expand_dims(ary, 0), 0)
5533        if ary.ndim == 1:
5534            return ops.expand_dims(ary, 0)
5535        if ary.ndim == 2:
5536            return ary
5537        raise ValueError(
5538            "For 'block_diag', the dimension of each elements in 'inputs' must be 0, 1, or 2, but got "
5539            f"{ary.ndim}"
5540        )
5541
5542    if not inputs:
5543        raise RuntimeError("For 'block_diag', the input is empty.")
5544    arys = [to_2d(ary) for ary in inputs]
5545    matrix = [ops.concat(to_col_block(arys, idx, ary)) for idx, ary in enumerate(arys)]
5546    return ops.concat(matrix, 1)
5547
5548
5549def atleast_1d(inputs):
5550    r"""
5551    Reshapes Tensor in `inputs`, every Tensor has at least one dimension after this operation.
5552
5553    Scalar is converted to a 1-D Tensor, input tensor with one or more dimensions will be returned as it is.
5554
5555    Args:
5556        inputs (Union[Tensor, list[Tensor]]): One or more input tensors.
5557
5558    Returns:
5559        Tensor or list[Tensor]. If returned a list, every element `a` in that list satisfies `a.ndim >= 1`.
5560
5561    Raises:
5562        TypeError: If the `inputs` is not a tensor or a list of tensors.
5563
5564    Supported Platforms:
5565        ``Ascend`` ``GPU`` ``CPU``
5566
5567    Examples:
5568        >>> import numpy as np
5569        >>> from mindspore import Tensor, ops
5570        >>> x1 = Tensor(np.ones((2, 3)))
5571        >>> x2 = Tensor(np.ones(()))
5572        >>> x3 = Tensor(np.ones(5))
5573        >>> out = ops.atleast_1d([x1, x2, x3])
5574        >>> print(out[0].asnumpy())
5575        [[1. 1. 1.]
5576         [1. 1. 1.]]
5577        >>> print(out[1].asnumpy())
5578        [1.]
5579        >>> print(out[2].asnumpy())
5580        [1. 1. 1. 1. 1.]
5581    """
5582    if isinstance(inputs, Tensor):
5583        return _expand(inputs, 1)
5584    for tensor in inputs:
5585        if not isinstance(tensor, Tensor):
5586            raise TypeError(f"For 'atleast_1d', each element of 'inputs' must be a tensor, but got {type(tensor)}")
5587    return tuple([_expand(arr, 1) for arr in inputs])
5588
5589
5590def dstack(inputs):
5591    r"""
5592    Stacks tensors along the third axis.
5593
5594    1-D tensors :math:`(N,)` should be reshaped to :math:`(1,N,1)`.
5595    2-D tensors :math:`(M,N)` should be reshaped to :math:`(M,N,1)` before concatenation.
5596
5597    Args:
5598        inputs (Union(List[Tensor], Tuple[Tensor])): A sequence of tensors.
5599            The tensors must have the same shape along all but the third axis.
5600            1-D or 2-D tensors must have the same shape.
5601
5602    Returns:
5603        Stacked Tensor, will be at least 3-D.
5604        The output shape is similar to the output of `numpy.dstack()` function.
5605
5606    Raises:
5607        TypeError: If `inputs` is not tuple or list.
5608        ValueError: If `inputs` is empty.
5609
5610    Supported Platforms:
5611        ``Ascend`` ``GPU`` ``CPU``
5612
5613    Examples:
5614        >>> import numpy as np
5615        >>> from mindspore import Tensor, ops
5616        >>> x1 = Tensor(np.arange(1, 7).reshape(2, 3))
5617        >>> x2 = Tensor(np.arange(7, 13).reshape(2, 3))
5618        >>> out = ops.dstack([x1, x2])
5619        >>> print(out.asnumpy())
5620        [[[ 1.  7.]
5621          [ 2.  8.]
5622          [ 3.  9.]]
5623         [[ 4. 10.]
5624          [ 5. 11.]
5625          [ 6. 12.]]]
5626    """
5627    if not isinstance(inputs, (tuple, list)):
5628        raise TypeError(f"For 'dstack', 'inputs' must be list or tuple of tensors, but got {type(inputs)}")
5629    if not inputs:
5630        raise TypeError(f"For 'dstack', 'inputs' can not be empty.")
5631    trans_inputs = ()
5632    for tensor in inputs:
5633        if not isinstance(tensor, Tensor):
5634            raise TypeError(f"For 'dstack', each elements of 'inputs' must be Tensor, but got {type(tensor)}")
5635        if tensor.size == 0:
5636            raise TypeError(f"For 'dstack', each elements of 'inputs' can not be empty.")
5637        if tensor.ndim <= 1:
5638            tensor = _expand(tensor, 2)
5639        if tensor.ndim == 2:
5640            tensor = expand_dims_(tensor, 2)
5641        trans_inputs += (tensor,)
5642    if not trans_inputs:
5643        raise ValueError("For 'dstack', at least one tensor is needed to concatenate.")
5644    return _get_cache_prim(P.Concat)(2)(trans_inputs)
5645
5646
5647@_primexpr
5648def _check_is_int(arg_value, arg_name, cls_name):
5649    validator.check_is_int(arg_value, arg_name, cls_name)
5650
5651
5652def diff(x, n=1, axis=-1, prepend=None, append=None):
5653    r"""
5654    Computes the n-th discrete difference along a specified axis of a given input `x`.
5655
5656    The first difference is calculated as :math:`out[i] = x[i+1] - x[i]` along the specified `axis`.
5657    To compute higher differences, the function is called recursively
5658    using the output from the previous iteration as input.
5659
5660    Note:
5661        Zero-shaped Tensor is not supported, a value error is raised if
5662        an empty Tensor is encountered. Any dimension of a Tensor is 0, which is considered
5663        an empty Tensor. Tensor with shape of :math:`(0,)`, :math:`(1, 2, 0, 4)` are all
5664        empty Tensor.
5665
5666    Args:
5667        x (Tensor): Input tensor.
5668            Full support for signed integers, partial support for floats and complex numbers
5669        n (int, optional): The number of times values are differenced. If zero,
5670            the input is returned as-is. Currently only 1 is supported. Default: ``1`` .
5671        axis (int, optional): The axis along which the difference is taken, default
5672            is the last axis. Default: ``-1`` .
5673        prepend (Tensor, optional): Values to prepend to `x` along
5674            `axis` prior to performing the difference. Scalar values are expanded to
5675            arrays with length 1 in the direction of `axis` and the shape of the input
5676            array along all other axis. Otherwise the dimension and shape must
5677            match `x` except along `axis`. Default: ``None`` .
5678        append (Tensor, optional): Values to append to `x` along
5679            `axis` prior to performing the difference. Scalar values are expanded to
5680            arrays with length 1 in the direction of `axis` and the shape of the input
5681            array along all other axis. Otherwise the dimension and shape must
5682            match `x` except along `axis`. Default: ``None`` .
5683
5684    Returns:
5685        Tensor, the n-th differences of input. The shape of the output is the same as `x`
5686        except along `axis` where the size is reduced by `n`. The type of the output
5687        is the same as `x`.
5688
5689    Raises:
5690        TypeError: If the data type of the elementes in `x` is uint16, uint32 or uint64.
5691        TypeError: If `x` is not a tensor.
5692        ValueError: If `x` is an empty Tensor.
5693        ValueError: If the dim of `x` is less than 1.
5694        RuntimeError: If `n` is not 1.
5695
5696    Supported Platforms:
5697        ``Ascend`` ``GPU`` ``CPU``
5698
5699    Examples:
5700        >>> from mindspore import Tensor, ops
5701        >>> x = Tensor([1, 3, -1, 0, 4])
5702        >>> out = ops.diff(x)
5703        >>> print(out.asnumpy())
5704        [ 2 -4  1  4]
5705    """
5706    if not isinstance(x, Tensor):
5707        raise TypeError(f"For 'diff', 'x' must be a tensor, but got {type(x)}")
5708    if x.ndim < 1:
5709        raise TypeError(f"For 'diff', the dimension 'x' must be at least 1, but got {x.ndim}")
5710    if 0 in x.shape:
5711        raise ValueError(f"For 'diff', 'x' can not be an empty Tensor.")
5712    _check_is_int(n, 'n', 'diff')
5713    if n != 1:
5714        raise RuntimeError(f"For 'diff', 'n' must be 1, but got {n}")
5715    if x.dtype in (mstype.uint16, mstype.uint32, mstype.uint64):
5716        msg = f"For 'diff', the data type of the elements in 'x' cannot be uint16, uint32, uint64, but got {x.dtype}"
5717        raise TypeError(msg)
5718    if prepend is not None and append is not None:
5719        x = ops.Concat(axis)((prepend, x, append))
5720    elif append is not None:
5721        x = ops.Concat(axis)((x, append))
5722    elif prepend is not None:
5723        x = ops.Concat(axis)((prepend, x))
5724    a = ops.make_range(x.shape[axis])
5725    a1 = x.gather(TupleToTensor()(a[:-1], mstype.int64), axis)
5726    a2 = x.gather(TupleToTensor()(a[1:], mstype.int64), axis)
5727    return a2 - a1
5728
5729
5730def tril_indices(row, col, offset=0, *, dtype=mstype.int64):
5731    r"""
5732    Calculates the indices of the lower triangular elements in a `row` * `col` matrix
5733    and returns them as a 2-by-N Tensor. The first row of the Tensor contains
5734    row coordinates, and the second row contains column coordinates. The coordinates are
5735    sorted by row and then by column.
5736
5737    The lower triangular part of the matrix consists of all elements on and below the diagonal.
5738
5739    Note:
5740        When running on CUDA, row * col must be less than 2^59 to prevent overflow during calculation.
5741
5742    Args:
5743        row (int): number of rows in the 2-D matrix.
5744        col (int): number of columns in the 2-D matrix.
5745        offset (int, optional): diagonal offset from the main diagonal. Default: ``0`` .
5746
5747    Keyword Args:
5748        dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor.
5749            An optional data type of `mindspore.int32` and `mindspore.int64`. Default: ``mstype.int64`` .
5750
5751    Returns:
5752        - **y** (Tensor) - indices of the elements in lower triangular part of matrix. The type is specified by `dtype`.
5753          The shape of output is :math:`(2, tril\_size)`, where :math:`tril\_size` is the number of elements in the
5754          lower triangular matrix.
5755
5756    Raises:
5757        TypeError: If `row`, `col` or `offset` is not an int.
5758        TypeError: If `dtype` is neither int32 nor int64.
5759        ValueError: If `row` or `col` < 0.
5760
5761    Supported Platforms:
5762        ``Ascend`` ``GPU`` ``CPU``
5763
5764    Examples:
5765        >>> import mindspore
5766        >>> from mindspore import ops
5767        >>> output = ops.tril_indices(4, 3, -1, dtype=mindspore.int64)
5768        >>> print(output)
5769        [[1 2 2 3 3 3]
5770         [0 0 1 0 1 2]]
5771        >>> print(output.dtype)
5772        Int64
5773    """
5774
5775    tril_indices_ = TrilIndices(row=row, col=col, offset=offset, dtype=dtype)
5776    return tril_indices_()
5777
5778
5779def triu_indices(row, col, offset=0, *, dtype=mstype.int64):
5780    r"""
5781    Calculates the indices of the upper triangular elements in a `row` * `col` matrix
5782    and returns them as a 2-by-N Tensor. The first row of the Tensor contains
5783    row coordinates, and the second row contains column coordinates. The coordinates are
5784    sorted by row and then by column.
5785
5786    The upper triangular part of the matrix consists of all elements on and above the diagonal.
5787
5788    Note:
5789        When running on CUDA, row * col must be less than 2^59 to prevent overflow during calculation.
5790
5791    Args:
5792        row (int): number of rows in the 2-D matrix.
5793        col (int): number of columns in the 2-D matrix.
5794        offset (int, optional): diagonal offset from the main diagonal. Default: ``0`` .
5795
5796    Keyword Args:
5797        dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor.
5798            An optional data type of `mindspore.int32` and `mindspore.int64`. Default: ``mstype.int64``.
5799
5800    Returns:
5801        - **y** (Tensor) - indices of the elements in upper triangular part of matrix. The type is specified by `dtype`.
5802          The shape of output is :math:`(2, triu\_size)`, where :math:`triu\_size` is the number of elements in the
5803          upper triangular matrix.
5804
5805    Raises:
5806        TypeError: If `row`, `col` or `offset` is not an int.
5807        TypeError: If `dtype` is neither int32 nor int64.
5808        ValueError: If `row` or `col` < 0.
5809
5810    Supported Platforms:
5811        ``Ascend`` ``GPU`` ``CPU``
5812
5813    Examples:
5814        >>> import mindspore
5815        >>> from mindspore import ops
5816        >>> output = ops.triu_indices(4, 4, 2, dtype=mindspore.int64)
5817        >>> print(output)
5818        [[0 0 1]
5819         [2 3 3]]
5820        >>> print(output.dtype)
5821        Int64
5822    """
5823
5824    triu_indices_ = TriuIndices(row=row, col=col, offset=offset, dtype=dtype)
5825    return triu_indices_()
5826
5827
5828def atleast_2d(inputs):
5829    r"""
5830    Reshapes Tensor in `inputs`, every Tensor has at least 2 dimension after this operation.
5831
5832    Scalar or 1-D Tensor is converted to 2-D Tensor, tensor with higher dimensions will be returned as it is.
5833
5834    Args:
5835        inputs (Union[Tensor, list[Tensor]]): One or more input tensors.
5836
5837    Returns:
5838        Tensor or list[Tensor]. If returned a list, every element `a` in that list satisfies `a.ndim >= 2` .
5839
5840    Raises:
5841        TypeError: If the `inputs` is not a tensor or a list of tensors.
5842
5843    Supported Platforms:
5844        ``Ascend`` ``GPU`` ``CPU``
5845
5846    Examples:
5847        >>> import mindspore.numpy as np
5848        >>> from mindspore import ops
5849        >>> x1 = np.ones((2, 3))
5850        >>> x2 = np.ones(())
5851        >>> x3 = np.ones(5)
5852        >>> out = ops.atleast_2d([x1, x2, x3])
5853        >>> print(out)
5854        (Tensor(shape=[2, 3], dtype=Float32, value=
5855        [[ 1.00000000e+00, 1.00000000e+00, 1.00000000e+00],
5856        [ 1.00000000e+00, 1.00000000e+00, 1.00000000e+00]]), Tensor(shape=[1, 1], dtype=Float32, value=
5857        [[ 1.00000000e+00]]), Tensor(shape=[1, 5], dtype=Float32, value=
5858        [[ 1.00000000e+00, 1.00000000e+00, 1.00000000e+00, 1.00000000e+00, 1.00000000e+00]]))
5859    """
5860    if isinstance(inputs, Tensor):
5861        return _expand(inputs, 2)
5862    for tensor in inputs:
5863        if not isinstance(tensor, Tensor):
5864            msg = "expect Tensor or list of tensors, but got " + f"{type(tensor)}"
5865            raise TypeError(msg)
5866    return tuple([_expand(arr, 2) for arr in inputs])
5867
5868
5869def cartesian_prod(*inputs):
5870    r"""
5871    Performs a Cartesian product for a given tensor sequence.
5872    The behavior is similar to Python's `itertools.product`.
5873
5874    Args:
5875        inputs (List[Tensor]): Tensor sequence.
5876
5877    Returns:
5878        Tensor, a Cartesian product for a given tensor sequence.
5879
5880    Raises:
5881        TypeError: If the input is not a tensor.
5882
5883    Supported Platforms:
5884        ``Ascend`` ``GPU`` ``CPU``
5885
5886    Examples:
5887        >>> from mindspore import Tensor, ops
5888        >>> x1 = Tensor([1, 2])
5889        >>> x2 = Tensor([5])
5890        >>> out = ops.cartesian_prod(x1, x2)
5891        >>> print(out.asnumpy())
5892        [[1 5]
5893         [2 5]]
5894        >>> x1 = Tensor([1, 2, 3, 4])
5895        >>> x2 = Tensor([5, 6, 7])
5896        >>> x3 = Tensor([8, 9, 0, 1, 2])
5897        >>> out = ops.cartesian_prod(x1, x2, x3)
5898        >>> print(len(out))
5899        60
5900    """
5901    meshgrid = _get_cache_prim(P.Meshgrid)(indexing="ij")
5902    meshgrid_output = meshgrid(inputs)
5903    stack = _get_cache_prim(P.Stack)(axis=-1)
5904    stack_output = stack(meshgrid_output)
5905    return reshape_(stack_output, (-1, len(inputs)))
5906
5907
5908def atleast_3d(inputs):
5909    r"""
5910    Reshapes Tensor in `inputs`, every Tensor has at least 3 dimension after this operation.
5911
5912    Scalar, 1-D or 2-D Tensor is converted to 3-D Tensor,
5913    tensor with higher dimensions will be returned as it is.
5914
5915    Args:
5916        inputs (Union[Tensor, list[Tensor]]): One or more input tensors.
5917
5918    Returns:
5919        Tensor or list[Tensor]. If returned a list, every element `a` in that list satisfies `a.ndim >= 3`.
5920        For example, a 1-D Tensor of shape :math:`(N,)` becomes a Tensor of shape :math:`(1, N, 1)`, and
5921        a 2-D Tensor of shape :math:`(M, N)` becomes a tensor of shape :math:`(M, N, 1)`.
5922
5923    Raises:
5924        TypeError: If the `inputs` is not a tensor or a list of tensors.
5925
5926    Supported Platforms:
5927        ``Ascend`` ``GPU`` ``CPU``
5928
5929    Examples:
5930        >>> import numpy as np
5931        >>> from mindspore import Tensor, ops
5932        >>> x1 = Tensor(np.ones((2, 3)))
5933        >>> x2 = Tensor(np.ones(()))
5934        >>> x3 = Tensor(np.ones(5))
5935        >>> out = ops.atleast_3d([x1, x2, x3])
5936        >>> print(out[0].asnumpy())
5937        [[[1.]
5938          [1.]
5939          [1.]]
5940        <BLANKLINE>
5941         [[1.]
5942          [1.]
5943          [1.]]]
5944        >>> print(out[1].asnumpy())
5945        [[[1.]]]
5946        >>> print(out[2].asnumpy())
5947        [[[1.]
5948          [1.]
5949          [1.]
5950          [1.]
5951          [1.]]]
5952    """
5953
5954    def _expand3(arr):
5955        ndim = rank_(arr)
5956        if ndim == 0:
5957            return reshape_(arr, (1, 1, 1))
5958        if ndim == 1:
5959            return reshape_(arr, (1, size_(arr), 1))
5960        if ndim == 2:
5961            return reshape_(arr, shape_(arr) + (1,))
5962        return arr
5963
5964    if isinstance(inputs, Tensor):
5965        return _expand3(inputs)
5966    for tensor in inputs:
5967        if not isinstance(tensor, Tensor):
5968            raise TypeError(f"For 'atleast_3d', each element of 'inputs' must be a tensor, but got {type(tensor)}")
5969    return tuple([_expand3(arr) for arr in inputs])
5970
5971
5972def view_as_real(input):
5973    r"""
5974    View a complex Tensor as a real Tensor.
5975    The size of last dimension of the returned real Tensor is 2, and the last dimension is composed of
5976    the real and imaginary components of complex numbers.
5977
5978    Args:
5979        input (Tensor): the input must be a complex Tensor.
5980
5981    Returns:
5982        A real Tensor.
5983
5984    Raises:
5985        TypeError: If the input Tensor is not a complex Tensor.
5986
5987    Supported Platforms:
5988        ``GPU`` ``CPU``
5989
5990    Examples:
5991        >>> from mindspore import Tensor, ops
5992        >>> from mindspore import dtype as mstype
5993        >>> x = Tensor([2+1j,2+3j,2-1j,2], mstype.complex64)
5994        >>> print(ops.view_as_real(x))
5995        [[ 2.  1.]
5996         [ 2.  3.]
5997         [ 2. -1.]
5998         [ 2.  0.]]
5999    """
6000    if not is_complex(input):
6001        raise TypeError("For view_as_real, the dtype of input Tensor must be complex.")
6002    real_part = input.real().expand_dims(-1)
6003    imag_part = input.imag().expand_dims(-1)
6004    con = _get_cache_prim(ops.Concat)(-1)
6005    return con((real_part, imag_part))
6006
6007
6008def vstack(inputs):
6009    r"""
6010    Stacks tensors in sequence vertically.
6011
6012    This is equivalent to concatenation along the first axis.
6013    1-D tensors :math:`(N,)` should firstly be reshaped to :math:`(1, N)`,
6014    and then be concatenated along the first axis.
6015
6016    Args:
6017        inputs (Union(List[tensor], Tuple[tensor])): A sequence of 1-D or 2-D tensors.
6018            The tensors must have the same shape along all but the first axis.
6019            1-D tensors must have the same shape.
6020
6021    Returns:
6022        Tensor, formed by stacking the given tensors, will be at least 3-D.
6023        The output shape is similar to the output of `numpy.vstack()` function.
6024
6025    Raises:
6026        TypeError: If `inputs` is not list or tuple.
6027        ValueError: If `inputs` is empty.
6028
6029    Supported Platforms:
6030        ``Ascend`` ``GPU`` ``CPU``
6031
6032    Examples:
6033        >>> import mindspore.numpy as np
6034        >>> x1 = np.array([3, 1, 4])
6035        >>> x2 = np.array([1, 5, 9])
6036        >>> out = ops.vstack([x1, x2])
6037        >>> print(out)
6038        [[3 1 4]
6039         [1 5 9]]
6040    """
6041    if not isinstance(inputs, (tuple, list)):
6042        msg = f"For 'vstack', list or tuple of tensors are required, but got {type(inputs)}"
6043        raise TypeError(msg)
6044    if not inputs:
6045        msg = "For 'vstack', inputs can not be empty"
6046        raise TypeError(msg)
6047    trans_tup = ()
6048    for tensor in inputs:
6049        if not isinstance(tensor, Tensor):
6050            msg = f"For 'vstack', Tensor is required, but got {type(tensor)}"
6051            raise TypeError(msg)
6052        if tensor.ndim <= 1:
6053            shape = shape_(tensor)
6054            if isinstance(shape, int):
6055                shape = (shape,)
6056            ndim_diff = 2 - len(shape)
6057            if ndim_diff > 0:
6058                shape = [1] * ndim_diff + [i for i in shape]
6059            tensor = reshape_(tensor, tuple(shape))
6060        trans_tup += (tensor,)
6061    if not trans_tup:
6062        raise ValueError("For 'vstack', need at least one tensor to concatenate.")
6063    out = _get_cache_prim(P.Concat)(0)(trans_tup)
6064    return out
6065
6066
6067def row_stack(tensors):
6068    """
6069    Alias for :func:`mindspore.ops.vstack` .
6070
6071    Supported Platforms:
6072        ``Ascend`` ``GPU`` ``CPU``
6073    """
6074    return vstack(tensors)
6075
6076
6077def combinations(input, r=2, with_replacement=False):
6078    r"""
6079    Returns all r-length subsequences of input Tensor.
6080
6081    When `with_replacement` is set to ``False``, it works similar to Python's
6082    `itertools.combinations`, and when `with_replacement` is set to ``True``,
6083    it behaves like `itertools.combinations_with_replacement`.
6084
6085    Args:
6086        input (Tensor): One-dimensional tensors.
6087        r (int, optional): Number of elements to perform combination. Default: ``2`` .
6088        with_replacement (bool, optional): Allow duplication or not. Default: ``False`` .
6089
6090    Returns:
6091        Tensor, contains all possible combinations of elements sampled from input Tensor.
6092
6093    Raises:
6094        TypeError: If `input` is not a tensor.
6095        TypeError: If `r` is not an int.
6096        TypeError: If `with_replacement` is not bool.
6097        ValueError: If `input` is not one-dimensional.
6098
6099    Supported Platforms:
6100        ``Ascend`` ``GPU`` ``CPU``
6101
6102    Examples:
6103        >>> from mindspore import Tensor, ops
6104        >>> input = Tensor([1, 3, -1, 0, 4])
6105        >>> output = ops.combinations(input)
6106        >>> print(output.asnumpy())
6107        [[ 1  3]
6108         [ 1 -1]
6109         [ 1  0]
6110         [ 1  4]
6111         [ 3 -1]
6112         [ 3  0]
6113         [ 3  4]
6114         [-1  0]
6115         [-1  4]
6116         [ 0  4]]
6117    """
6118
6119    def _combinations(iterable, r):
6120        lst = ops.StridedSlice()(ops.zeros(r), (0,), (0,), (1,))
6121        pool = tuple(iterable)
6122        n = len(pool)
6123        if r > n:
6124            return lst
6125        indices = list(range(r))
6126        lst = ops.concat([ops.reshape(pool[i], (1,)) for i in indices])
6127        while True:
6128            stop = True
6129            i = 0
6130            for index in range(r)[::-1]:
6131                if indices[index] != index + n - r:
6132                    stop = False
6133                    i = index
6134                    break
6135            if stop:
6136                return lst
6137            indices[i] += 1
6138            for j in range(i + 1, r):
6139                indices[j] = indices[j - 1] + 1
6140            item = ops.concat([ops.reshape(pool[i], (1,)) for i in indices])
6141            lst = ops.concat((lst, item), -1)
6142        return None
6143
6144    def _combinations_with_replacement(iterable, r):
6145        lst = Tensor_([])
6146        pool = tuple(iterable)
6147        n = len(pool)
6148        if not n and r:
6149            return lst
6150        indices = [0] * r
6151        lst = ops.concat([ops.reshape(pool[i], (1,)) for i in indices])
6152        while True:
6153            stop = True
6154            i = 0
6155            for index in range(r)[::-1]:
6156                if indices[index] != n - 1:
6157                    stop = False
6158                    i = index
6159                    break
6160            if stop:
6161                return lst
6162            indices[i:] = [indices[i] + 1] * (r - i)
6163            item = ops.concat([ops.reshape(pool[i], (1,)) for i in indices])
6164            lst = ops.concat((lst, item), -1)
6165        return None
6166
6167    if not isinstance(input, Tensor):
6168        raise TypeError(f"For 'combinations', 'x' must be a tensor, but got {type(input)}")
6169    if input.ndim != 1:
6170        raise ValueError(f"For 'combinations', the dimension 'x' must be 1, but got {input.ndim}")
6171    if not isinstance(r, int):
6172        raise TypeError(f"For 'combinations', 'r' must be an integer, but got {type(r)}")
6173    comb_func = _combinations_with_replacement if with_replacement else _combinations
6174    ret = comb_func(input, r)
6175    if ret.size == 0:
6176        return ret
6177    return ops.reshape(ret, (-1, r))
6178
6179
6180def dist(input, other, p=2):
6181    r"""
6182    Computes batched the :math:`p`-norm distance between each pair of the two collections of row vectors.
6183
6184    Note:
6185        Since only normalization for integer :math:`p`-normal form is supported in MindSpore,
6186        a type error will be raised if :math:`p` is not an integer.
6187
6188    Args:
6189        input (Tensor): The first input tensor. The dtype must be float16 or float32.
6190        other (Tensor): The second input tensor. The dtype must be float16 or float32.
6191        p (int, optional): The order of norm. `p` is greater than or equal to 0. Default: ``2`` .
6192
6193    Returns:
6194        Tensor, has the same dtype as `input`, which shape is :math:`(1)`.
6195
6196    Raises:
6197        TypeError: If `input` or `other` is not a Tensor.
6198        TypeError: If dtype of `input` or `other` is neither float16 nor float32.
6199        TypeError: If `p` is not a non-negative integer.
6200
6201    Supported Platforms:
6202        ``Ascend`` ``GPU`` ``CPU``
6203
6204    Examples:
6205        >>> from mindspore import Tensor, ops
6206        >>> input_x = Tensor([[[1.0, 1.0], [2.0, 2.0]]])
6207        >>> input_y = Tensor([[[3.0, 3.0], [3.0, 3.0]]])
6208        >>> out = ops.dist(input_x, input_y)
6209        >>> print(out.asnumpy())
6210        3.1622777
6211    """
6212    if not isinstance(input, Tensor):
6213        raise TypeError(f"For 'dist', 'input' must be a tensor, but got {type(input)}")
6214    if not isinstance(other, Tensor):
6215        raise TypeError(f"For 'dist', 'other' must be a tensor, but got {type(other)}")
6216    z = input - other
6217    if z.ndim == 0:
6218        return ops.abs(z)
6219
6220    # the types of p will expend once ops.LpNorm supports float
6221    return ops.LpNorm(axis=0, p=p)(ops.reshape(z, (-1,)))
6222
6223
6224def copysign(x, other):
6225    r"""
6226    Create a new floating-point tensor with the magnitude of `x` and the sign of `other`, element-wise.
6227
6228    Args:
6229        x (Union[Tensor]): Values to change the sign of.
6230        other (Union[int, float, Tensor]): The sign of `other` is copied to `x`. If `x.shape != other.shape`,
6231            `other` must be broadcastable to the shape of `x` (which is also the shape of the output).
6232
6233    Returns:
6234        Tensor. The dtype of the tensor is float.
6235        The values of `x` with the sign of `other`, the shape is the same as `x`.
6236
6237    Raises:
6238        TypeError: If dtype of the input is not in the given types or
6239            the input can not be converted to tensor.
6240
6241    Supported Platforms:
6242        ``Ascend`` ``GPU`` ``CPU``
6243
6244    Examples:
6245        >>> import mindspore.numpy as np
6246        >>> from mindspore import ops
6247        >>> x = np.array([[0.3, -0.7], [0.5, 0.5]])
6248        >>> other = np.array([[-0.4, 0.6], [0.4, -0.6]])
6249        >>> out = ops.copysign(x, other)
6250        >>> print(out)
6251        [[-0.3  0.7]
6252         [ 0.5 -0.5]]
6253    """
6254
6255    def _broadcast_to_shape(x, shape):
6256        """Broadcasts x from current shape to shape"""
6257        ndim_to = len(shape)
6258        x = _expand(x, ndim_to)
6259        return _broadcast_to(x, shape_(x), shape, ndim_to)
6260
6261    if not isinstance(x, Tensor):
6262        raise TypeError("Tensor is expected, but got " + f"{type(x)}")
6263    if not isinstance(other, (int, float, Tensor)):
6264        raise TypeError(
6265            "integer, float or Tensor is expected, but got " + f"{type(other)}"
6266        )
6267
6268    if not isinstance(other, Tensor):
6269        other = _type_convert(Tensor, other)
6270    other = _broadcast_to_shape(other, shape_(x))
6271
6272    if _check_same_type(dtype_(x), mstype.bool_):
6273        raise TypeError("copysign does not accept dtype bool.")
6274
6275    if _check_same_type(dtype_(x), mstype.complex64):
6276        raise TypeError("copysign does not accept dtype complex64.")
6277    if _check_same_type(dtype_(other), mstype.complex64):
6278        raise TypeError("copysign does not accept dtype complex64.")
6279
6280    if _check_same_type(dtype_(x), mstype.complex128):
6281        raise TypeError("copysign does not accept dtype complex128.")
6282    if _check_same_type(dtype_(other), mstype.complex128):
6283        raise TypeError("copysign does not accept dtype complex128.")
6284
6285    x_float = (
6286        x
6287        if x.dtype in (mstype.float16, mstype.float32, mstype.float64)
6288        else x.astype("float32")
6289    )
6290    pos_tensor = absolute_(x_float)
6291    less_zero = tensor_lt(other, 0)
6292    return select_(less_zero, neg(pos_tensor), pos_tensor)
6293
6294
6295def hann_window(window_length, periodic=True, *, dtype=None):
6296    r"""
6297    Generates a Hann Window.
6298
6299    The Hann window is defined as
6300
6301    .. math::
6302        w(n) = \frac{1}{2} - \frac{1}{2} \cos\left(\frac{2\pi{n}}{M-1}\right),\qquad 0 \leq n \leq M-1
6303
6304    Args:
6305        window_length (int): Length of window.
6306        periodic (bool, optional): When set to ``True`` , generates a periodic window for spectral analysis.
6307            When set to ``False`` , generates a symmetric window for filter design.Default: ``True`` .
6308
6309    Keyword Args:
6310        dtype (mindspore.dtype, optional): The output window data type, it must be float. Default: ``None`` .
6311
6312    Returns:
6313        Tensor, a Hann window.
6314
6315    Raises:
6316        TypeError: If `window_length` is not an integer.
6317        TypeError: If `periodic` is not a variable of Boolean type.
6318        ValueError: If `window_length` is negative.
6319
6320    Supported Platforms:
6321        ``Ascend`` ``GPU`` ``CPU``
6322
6323    Examples:
6324        >>> from mindspore import ops
6325        >>> window_length = 5
6326        >>> out = ops.hann_window(window_length)
6327        >>> print(out.asnumpy())
6328        [0.        0.3454915 0.9045085 0.9045085 0.3454915]
6329    """
6330    if not isinstance(window_length, int):
6331        raise TypeError(
6332            f"For 'hann_window', 'window_length' must be a non-negative integer, but got {type(window_length)}"
6333        )
6334    if window_length < 0:
6335        raise ValueError(
6336            f"For 'hann_window', 'window_length' must be a non-negative integer, but got {window_length}"
6337        )
6338    if window_length <= 1:
6339        return Tensor(np.ones(window_length))
6340    if not isinstance(periodic, (bool, np.bool_)):
6341        raise TypeError(
6342            f"For 'hann_window', 'periodic' must be a variable of Boolean type, but got {type(periodic)}"
6343        )
6344    if dtype is not None and dtype not in mstype.float_type:
6345        raise TypeError(f"For 'hann_window', 'dtype' must be floating point dtypes, but got {dtype}.")
6346    if periodic:
6347        window_length = window_length + 1
6348    n = np.arange(0, window_length)
6349    w = 0.5 - 0.5 * np.cos(2 * math.pi / (window_length - 1) * n)
6350
6351    if dtype is not None:
6352        w = cast_(ms.tensor(w), dtype)
6353    return Tensor(w[:-1]) if periodic else Tensor(w)
6354
6355
6356@constexpr
6357def _type_convert(force, obj):
6358    """
6359    Convert type of `obj` to `force`.
6360    """
6361    return force(obj)
6362
6363
6364def logcumsumexp(input, axis):
6365    """
6366    Compute the cumulative log-sum-exp of the input tensor `input` along `axis` .
6367    For example, if `input` is a tensor [a, b, c] and `axis` is 0, the output will be [a, log(exp(a) + exp(b)),
6368    log(exp(a) + exp(b) + exp(c))].
6369
6370    .. warning::
6371        This is an experimental API that is subject to change or deletion.
6372
6373    Args:
6374        input (Tensor) - The input tensor. Must be one of the following types: float16, float32, float64.
6375        axis (int) - Describing the dimension to compute the cumulative product.
6376            Must be in the range [-rank(input), rank(input)).
6377
6378    Returns:
6379        Tensor, has the same dtype and shape as the `input`.
6380
6381    Raises:
6382        TypeError: If `input` is not a Tensor.
6383        TypeError: If dtype of `input` is not in [float16, float32, float64].
6384        TypeError: If dtype of `axis` is not int.
6385        ValueError: If `axis` is out of range [-rank(input), rank(input)).
6386
6387    Supported Platforms:
6388        ``Ascend`` ``CPU`` ``GPU``
6389
6390    Examples:
6391        >>> import mindspore as ms
6392        >>> from mindspore import ops
6393        >>> import numpy as np
6394        >>> x = ms.Tensor(np.array([1.0, 2.0, 3.0]).astype(np.float32))
6395        >>> output = ops.logcumsumexp(x, 0)
6396        >>> print(output)
6397        [1.        2.3132617 3.407606 ]
6398    """
6399    if not isinstance(axis, int):
6400        raise TypeError(
6401            f"For 'logcumsumexp', 'axis' must be int type, but got {type(axis)}"
6402        )
6403    return cumulative_logsumexp_(input, Tensor(axis))
6404
6405
6406def logsumexp(input, axis, keep_dims=False):
6407    r"""
6408    Reduces a dimension of a tensor by calculating exponential for all elements in the dimension,
6409    then calculate logarithm of the sum.
6410
6411    .. math::
6412
6413        logsumexp(input) = \log(\sum(e^{input-input_{max}})) + input_{max}
6414
6415    Args:
6416        input (Tensor): The input tensor. With float16 or float32 data type.
6417        axis (Union[int, tuple(int), list(int)]): The dimensions to reduce. Only constant value is allowed.
6418        keep_dims (bool): If True, keep these reduced dimensions and the length is 1.
6419            If ``False`` , don't keep these dimensions.
6420            Default : ``False`` .
6421
6422    Returns:
6423        Tensor, has the same dtype as the `input`.
6424
6425        - If axis is (), and keep_dims is False,
6426          the output is a 0-D tensor representing the sum of all elements in the input tensor.
6427        - If axis is int, set as 2, and keep_dims is False,
6428          the shape of output is :math:`(input_1, input_3, ..., input_R)`.
6429        - If axis is tuple(int), set as (2, 3), and keep_dims is False,
6430          the shape of output is :math:`(input_1, input_4, ..., input_R)`.
6431
6432    Supported Platforms:
6433        ``Ascend`` ``GPU`` ``CPU``
6434
6435    Examples:
6436        >>> import numpy as np
6437        >>> from mindspore import Tensor, ops
6438        >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
6439        >>> output = ops.logsumexp(x, 1, keep_dims=True)
6440        >>> print(output.shape)
6441        (3, 1, 5, 6)
6442    """
6443    _reduce_sum = _get_cache_prim(P.ReduceSum)(keep_dims)
6444
6445    input_max = ops.ReduceMax(keep_dims=True)(input, axis)
6446    input_exp = tensor_exp(input - input_max)
6447    input_sumexp = _reduce_sum(input_exp, axis)
6448    input_logsumexp = log_(input_sumexp)
6449    if not keep_dims:
6450        input_max = input_max.squeeze(axis=axis)
6451    return input_logsumexp + input_max
6452
6453
6454def amin(input, axis=None, keepdims=False, *, initial=None, where=None):
6455    r"""
6456    Reduces all dimensions of a tensor by returning the minimum value in `input`, by default. And also can
6457    reduce a dimension of `input` along specified `axis`. `keepdims` determines whether the dimensions of
6458    output and input are the same.
6459
6460    Note:
6461        The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
6462
6463    Args:
6464        input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
6465            :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
6466        axis (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` , reduce all
6467            dimensions. Only constant value is allowed. Assume the rank of `x` is r, and the value range is [-r,r).
6468        keepdims (bool): If ``True`` , keep these reduced dimensions and the length is 1. If ``False`` , don't keep
6469            these dimensions. Default: ``False`` .
6470
6471    Keyword Args:
6472        initial (scalar, optional): The minimum value of an output element. Must be present to allow computation
6473            on empty slice. Default: ``None`` .
6474        where (Tensor[bool], optional): A Tensor indicating whether to replace the primitive value in `input` with the
6475            value in `initial`. If ``True`` , do not replace, otherwise replace. For the index of ``True`` in `where`,
6476            the corresponding value in `initial` must be assigned. Default: ``None`` , which indicates ``True`` by
6477            default.
6478
6479    Returns:
6480        Tensor, has the same data type as input tensor.
6481
6482        - If `axis` is ``None`` , and `keepdims` is ``False`` ,
6483          the output is a 0-D tensor representing the product of all elements in the input tensor.
6484        - If `axis` is int, set as 1, and `keepdims` is ``False`` ,
6485          the shape of output is :math:`(x_0, x_2, ..., x_R)`.
6486        - If `axis` is tuple(int), set as (1, 2), and `keepdims` is ``False`` ,
6487          the shape of output is :math:`(x_0, x_3, ..., x_R)`.
6488        - If `axis` is 1-D Tensor, set as [1, 2], and `keepdims` is ``False`` ,
6489          the shape of output is :math:`(x_0, x_3, ..., x_R)`.
6490
6491    Raises:
6492        TypeError: If `input` is not a Tensor.
6493        TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
6494        TypeError: If `keepdims` is not a bool.
6495        ValueError: If `axis` is out of range.
6496
6497    Supported Platforms:
6498        ``Ascend`` ``GPU`` ``CPU``
6499
6500    Examples:
6501        >>> import mindspore
6502        >>> import numpy as np
6503        >>> from mindspore import Tensor, ops
6504        >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
6505        >>> output = ops.amin(x, 1, keepdims=True)
6506        >>> result = output.shape
6507        >>> print(result)
6508        (3, 1, 5, 6)
6509        >>> # case 1: Reduces a dimension by the minimum value of all elements in the dimension.
6510        >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
6511        ...                      [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
6512        ...                      [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
6513        >>> output = ops.amin(x)
6514        >>> print(output)
6515        1.0
6516        >>> print(output.shape)
6517        ()
6518        >>> # case 2: Reduces a dimension along axis 0.
6519        >>> output = ops.amin(x, 0, True)
6520        >>> print(output)
6521        [[[1. 1. 1. 1. 1. 1.]
6522          [2. 2. 2. 2. 2. 2.]
6523          [3. 3. 3. 3. 3. 3.]]]
6524        >>> # case 3: Reduces a dimension along axis 1.
6525        >>> output = ops.amin(x, 1, True)
6526        >>> print(output)
6527        [[[1. 1. 1. 1. 1. 1.]]
6528         [[4. 4. 4. 4. 4. 4.]]
6529         [[7. 7. 7. 7. 7. 7.]]]
6530        >>> # case 4: Reduces a dimension along axis 2.
6531        >>> output = ops.amin(x, 2, True)
6532        >>> print(output)
6533        [[[1.]
6534          [2.]
6535          [3.]]
6536         [[4.]
6537          [5.]
6538          [6.]]
6539         [[7.]
6540          [8.]
6541          [9.]]]
6542    """
6543    if axis is None:
6544        axis = ()
6545    input = _init_and_select_elem(input, initial, where, ops.minimum)
6546    return _get_cache_prim(P.ReduceMin)(keepdims)(input, axis)
6547
6548
6549def _init_and_select_elem(input, initial, where, cmp_fn):
6550    """Initialize the input according to Initial, and select the element according to where."""
6551    if initial is not None:
6552        initial = ops.fill(input.dtype, input.shape, initial)
6553        input = cmp_fn(input, initial)
6554
6555    if isinstance(where, Tensor):
6556        if initial is None:
6557            raise ValueError('initial value must be provided for where masks')
6558        where = where.broadcast_to(input.shape)
6559        initial = initial.broadcast_to(input.shape)
6560        input = ops.select(where, input, initial)
6561    return input
6562
6563
6564def amax(input, axis=None, keepdims=False, *, initial=None, where=None):
6565    r"""
6566    Reduces all dimensions of a tensor by returning the maximum value in `input`, by default. And also can
6567    reduce a dimension of `input` along specified `axis`.  `keepdims` determines whether the dimensions of
6568    output and input are the same.
6569
6570    Note:
6571        The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
6572
6573    Args:
6574        input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
6575            :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
6576        axis (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` , reduce all
6577            dimensions. Only constant value is allowed. Assume the rank of `x` is r, and the value range is [-r,r).
6578        keepdims (bool): If ``True`` , keep these reduced dimensions and the length is 1. If ``False`` , don't keep
6579            these dimensions. Default: ``False`` .
6580
6581    Keyword Args:
6582        initial (scalar, optional): The minimum value of an output element. Must be present to allow computation
6583            on empty slice. Default: ``None`` .
6584        where (Tensor[bool], optional): A Tensor indicating whether to replace the primitive value in `input` with the
6585            value in `initial`. If ``True`` , do not replace, otherwise replace. For the index of ``True`` in `where`,
6586            the corresponding value in `initial` must be assigned. Default: ``None`` , which indicates ``True`` by
6587            default.
6588
6589    Returns:
6590        Tensor, has the same data type as input tensor.
6591
6592        - If `axis` is ``None`` , and `keepdims` is ``False`` , the output is a 0-D tensor representing the product of
6593          all elements in the input tensor.
6594        - If `axis` is int, set as 1, and `keepdims` is ``False`` , the shape of output is :math:`(x_0, x_2, ..., x_R)`.
6595        - If `axis` is tuple(int), set as (1, 2), and `keepdims` is ``False`` , the shape of output is
6596          :math:`(x_0, x_3, ..., x_R)`.
6597        - If `axis` is 1-D Tensor, set as [1, 2], and `keepdims` is ``False`` , the shape of output is
6598          :math:`(x_0, x_3, ..., x_R)`.
6599
6600    Raises:
6601        TypeError: If `input` is not a Tensor.
6602        TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
6603        TypeError: If `keepdims` is not a bool.
6604        ValueError: If `axis` is out of range.
6605
6606    Supported Platforms:
6607        ``Ascend`` ``GPU`` ``CPU``
6608
6609    Examples:
6610        >>> import mindspore
6611        >>> import numpy as np
6612        >>> from mindspore import Tensor, ops
6613        >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
6614        >>> output = ops.amax(x, 1, keepdims=True)
6615        >>> result = output.shape
6616        >>> print(result)
6617        (3, 1, 5, 6)
6618        >>> # case 1: Reduces a dimension by the maximum value of all elements in the dimension.
6619        >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
6620        ...                      [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
6621        ...                      [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
6622        >>> output = ops.amax(x)
6623        >>> print(output)
6624        9.0
6625        >>> print(output.shape)
6626        ()
6627        >>> # case 2: Reduces a dimension along axis 0.
6628        >>> output = ops.amax(x, 0, True)
6629        >>> print(output)
6630        [[[7. 7. 7. 7. 7. 7.]
6631          [8. 8. 8. 8. 8. 8.]
6632          [9. 9. 9. 9. 9. 9.]]]
6633        >>> # case 3: Reduces a dimension along axis 1.
6634        >>> output = ops.amax(x, 1, True)
6635        >>> print(output)
6636        [[[3. 3. 3. 3. 3. 3.]]
6637         [[6. 6. 6. 6. 6. 6.]]
6638         [[9. 9. 9. 9. 9. 9.]]]
6639        >>> # case 4: Reduces a dimension along axis 2.
6640        >>> output = ops.amax(x, 2, True)
6641        >>> print(output)
6642        [[[1.]
6643          [2.]
6644          [3.]]
6645         [[4.]
6646          [5.]
6647          [6.]]
6648         [[7.]
6649          [8.]
6650          [9.]]]
6651    """
6652    if axis is None:
6653        axis = ()
6654    input = _init_and_select_elem(input, initial, where, ops.maximum)
6655    return _get_cache_prim(P.ReduceMax)(keepdims)(input, axis)
6656
6657
6658def mean(x, axis=None, keep_dims=False):
6659    r"""
6660    Reduces all dimension of a tensor by averaging all elements in the dimension, by default.
6661    And reduce a dimension of `x` along the specified `axis`. `keep_dims`
6662    determines whether the dimensions of the output and input are the same.
6663
6664    Note:
6665        The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
6666
6667    Args:
6668        x (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
6669          :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
6670        axis (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` ,
6671            reduce all dimensions. Only constant value is allowed. Assume the rank of `x` is r,
6672            and the value range is [-r,r).
6673        keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
6674                          If false, don't keep these dimensions. Default: ``False`` .
6675
6676    Returns:
6677        Tensor, has the same data type as input tensor.
6678
6679        - If `axis` is None, and `keep_dims` is False,
6680          the output is a 0-D tensor representing the product of all elements in the input tensor.
6681        - If `axis` is int, set as 1, and `keep_dims` is False,
6682          the shape of output is :math:`(x_0, x_2, ..., x_R)`.
6683        - If `axis` is tuple(int), set as (1, 2), and `keep_dims` is ``False`` ,
6684          the shape of output is :math:`(x_0, x_3, ..., x_R)`.
6685
6686    Raises:
6687        TypeError: If `x` is not a Tensor.
6688        TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
6689        TypeError: If `keep_dims` is not a bool.
6690        ValueError: If `axis` is out of range.
6691
6692    Supported Platforms:
6693        ``Ascend`` ``GPU`` ``CPU``
6694
6695    Examples:
6696        >>> import mindspore
6697        >>> import numpy as np
6698        >>> from mindspore import Tensor, ops
6699        >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
6700        >>> output = ops.mean(x, 1, keep_dims=True)
6701        >>> result = output.shape
6702        >>> print(result)
6703        (3, 1, 5, 6)
6704        >>> # case 1: Reduces a dimension by averaging all elements in the dimension.
6705        >>> x = Tensor(np.array([[[2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2]],
6706        ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
6707        ... [[6, 6, 6, 6, 6, 6], [8, 8, 8, 8, 8, 8], [10, 10, 10, 10, 10, 10]]]),
6708        ... mindspore.float32)
6709        >>> output = ops.mean(x)
6710        >>> print(output)
6711        5.0
6712        >>> print(output.shape)
6713        ()
6714        >>> # case 2: Reduces a dimension along the axis 0
6715        >>> output = ops.mean(x, 0, True)
6716        >>> print(output)
6717        [[[4. 4. 4. 4. 4. 4.]
6718          [5. 5. 5. 5. 5. 5.]
6719          [6. 6. 6. 6. 6. 6.]]]
6720        >>> # case 3: Reduces a dimension along the axis 1
6721        >>> output = ops.mean(x, 1, True)
6722        >>> print(output)
6723        [[[2. 2. 2. 2. 2. 2.]]
6724         [[5. 5. 5. 5. 5. 5.]]
6725         [[8. 8. 8. 8. 8. 8.]]]
6726        >>> # case 4: Reduces a dimension along the axis 2
6727        >>> output = ops.mean(x, 2, True)
6728        >>> print(output)
6729        [[[ 2.]
6730          [ 2.]
6731          [ 2.]]
6732         [[ 4.]
6733          [ 5.]
6734          [ 6.]]
6735         [[ 6.]
6736          [ 8.]
6737          [10.]]]
6738    """
6739    if axis is None:
6740        axis = ()
6741    return _get_cache_prim(P.ReduceMean)(keep_dims)(x, axis)
6742
6743
6744def mean_ext(input, axis=None, keep_dims=False, dtype=None):
6745    r"""
6746    Reduces all dimension of a tensor by averaging all elements in the dimension, by default.
6747    And reduce a dimension of `input` along the specified `axis`. `keep_dims`
6748    determines whether the dimensions of the output and input are the same.
6749
6750    Note:
6751        The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
6752
6753    Args:
6754        input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
6755            :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
6756        axis (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` ,
6757            reduce all dimensions. Only constant value is allowed. Assume the rank of `input` is r,
6758            and the value range is [-r,r).
6759        keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
6760            If ``False`` , don't keep these dimensions. Default: ``False`` .
6761        dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
6762
6763    Returns:
6764        Tensor, has the same data type as the `input`.
6765
6766        - If `axis` is ``None`` , and `keep_dims` is ``False`` ,
6767          the output is a 0-D tensor representing the product of all elements in the input tensor.
6768        - If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
6769          the shape of output is :math:`(input_0, input_2, ..., input_R)`.
6770        - If `axis` is tuple(int), set as (1, 2), and `keep_dims` is ``False`` ,
6771          the shape of output is :math:`(input_0, input_3, ..., input_R)`.
6772        - If `axis` is 1-D Tensor, set as [1, 2], and `keep_dims` is ``False`` ,
6773          the shape of output is :math:`(input_0, input_3, ..., input_R)`.
6774
6775    Raises:
6776        TypeError: If `input` is not a Tensor.
6777        TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
6778        TypeError: If `keep_dims` is not a bool.
6779        ValueError: If `axis` is out of range.
6780
6781    Supported Platforms:
6782        ``Ascend`` ``GPU`` ``CPU``
6783
6784    Examples:
6785        >>> import mindspore
6786        >>> import numpy as np
6787        >>> from mindspore import Tensor, ops
6788        >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
6789        >>> output = ops.function.math_func.mean_ext(x, 1, keep_dims=True)
6790        >>> result = output.shape
6791        >>> print(result)
6792        (3, 1, 5, 6)
6793        >>> # case 1: Reduces a dimension by averaging all elements in the dimension.
6794        >>> x = Tensor(np.array([[[2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2]],
6795        ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
6796        ... [[6, 6, 6, 6, 6, 6], [8, 8, 8, 8, 8, 8], [10, 10, 10, 10, 10, 10]]]),
6797        ... mindspore.float32)
6798        >>> output = ops.function.math_func.mean_ext(x)
6799        >>> print(output)
6800        5.0
6801        >>> print(output.shape)
6802        ()
6803        >>> # case 2: Reduces a dimension along the axis 0
6804        >>> output = ops.function.math_func.mean_ext(x, 0, True)
6805        >>> print(output)
6806        [[[4. 4. 4. 4. 4. 4.]
6807        [5. 5. 5. 5. 5. 5.]
6808        [6. 6. 6. 6. 6. 6.]]]
6809        >>> # case 3: Reduces a dimension along the axis 1
6810        >>> output = ops.function.math_func.mean_ext(x, 1, True)
6811        >>> print(output)
6812        [[[2. 2. 2. 2. 2. 2.]]
6813        [[5. 5. 5. 5. 5. 5.]]
6814        [[8. 8. 8. 8. 8. 8.]]]
6815        >>> # case 4: Reduces a dimension along the axis 2
6816        >>> output = ops.function.math_func.mean_ext(x, 2, True)
6817        >>> print(output)
6818        [[[ 2.]
6819        [ 2.]
6820        [ 2.]]
6821        [[ 4.]
6822        [ 5.]
6823        [ 6.]]
6824        [[ 6.]
6825        [ 8.]
6826        [10.]]]
6827        """
6828    return mean_ext_op(input, axis, keep_dims, dtype)
6829
6830
6831def prod(input, axis=None, keep_dims=False, dtype=None):
6832    r"""
6833    Reduces a dimension of a tensor by multiplying all elements in the dimension, by default. And also can
6834    reduce a dimension of `input` along the `axis`. Determine whether the dimensions of the output and input are the
6835    same by controlling `keep_dims`.
6836
6837    Note:
6838        The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
6839
6840    Args:
6841        input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
6842            :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
6843        axis (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` , reduce all
6844            dimensions. Only constant value is allowed. Assume the rank of `x` is r, and the value range is [-r,r).
6845        keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
6846            If ``False`` , don't keep these dimensions. Default: ``False`` .
6847        dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
6848
6849    Returns:
6850        Tensor, has the same data type as the `input`.
6851
6852        - If `axis` is ``None`` , and `keep_dims` is ``False`` ,
6853          the output is a 0-D tensor representing the product of all elements in the input tensor.
6854        - If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
6855          the shape of output is :math:`(input_0, input_2, ..., input_R)`.
6856        - If `axis` is tuple(int), set as (1, 2), and `keep_dims` is ``False`` ,
6857          the shape of output is :math:`(input_0, input_3, ..., input_R)`.
6858        - If `axis` is 1-D Tensor, set as [1, 2], and `keep_dims` is ``False`` ,
6859          the shape of output is :math:`(input_0, input_3, ..., input_R)`.
6860
6861    Raises:
6862        TypeError: If `input` is not a Tensor.
6863        TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
6864        TypeError: If `keep_dims` is not a bool.
6865        ValueError: If `axis` is out of range.
6866
6867    Supported Platforms:
6868        ``Ascend`` ``GPU`` ``CPU``
6869
6870    Examples:
6871        >>> import mindspore
6872        >>> import numpy as np
6873        >>> from mindspore import Tensor, ops
6874        >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
6875        >>> output = ops.prod(x, 1, keep_dims=True)
6876        >>> result = output.shape
6877        >>> print(result)
6878        (3, 1, 5, 6)
6879        >>> # case 1: Reduces a dimension by multiplying all elements in the dimension.
6880        >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
6881        ...                      [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
6882        ...                      [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
6883        >>> output = ops.prod(x)
6884        >>> print(output)
6885        2.2833798e+33
6886        >>> print(output.shape)
6887        ()
6888        >>> # case 2: Reduces a dimension along axis 0.
6889        >>> output = ops.prod(x, 0, True)
6890        >>> print(output)
6891        [[[ 28.  28.  28.  28.  28.  28.]
6892          [ 80.  80.  80.  80.  80.  80.]
6893          [162. 162. 162. 162. 162. 162.]]]
6894        >>> # case 3: Reduces a dimension along axis 1.
6895        >>> output = ops.prod(x, 1, True)
6896        >>> print(output)
6897        [[[  6.   6.   6.   6.   6.   6.]]
6898         [[120. 120. 120. 120. 120. 120.]]
6899         [[504. 504. 504. 504. 504. 504.]]]
6900        >>> # case 4: Reduces a dimension along axis 2.
6901        >>> output = ops.prod(x, 2, True)
6902        >>> print(output)
6903        [[[1.00000e+00]
6904          [6.40000e+01]
6905          [7.29000e+02]]
6906         [[4.09600e+03]
6907          [1.56250e+04]
6908          [4.66560e+04]]
6909         [[1.17649e+05]
6910          [2.62144e+05]
6911          [5.31441e+05]]]
6912    """
6913    if not isinstance(axis, (tuple, list, Tensor)):
6914        return prod_ext_op(input, axis, keep_dims, dtype)
6915    if dtype is not None:
6916        input = input.astype(dtype)
6917    return _get_cache_prim(P.ReduceProd)(keep_dims)(input, axis)
6918
6919
6920def _multi_svd_norm(x, row_axis, col_axis, op):
6921    """_multi_svd_norm for norm."""
6922    y = _moveaxis(x.astype(mstype.float32), (row_axis, col_axis), (-2, -1))
6923    svd_res = ops.svd(y, compute_uv=False)
6924    if op == 'amax':
6925        return ops.amax(svd_res, axis=-1)
6926    if op == 'amin':
6927        return ops.amin(svd_res, axis=-1)
6928    if op == 'sum':
6929        return ops.sum(svd_res, dim=-1)
6930    raise ValueError(f"For svd_norm, the op input must be one of ['amax', 'amin', 'sum'], but got f{op}")
6931
6932
6933def _normalize_axis_index(axis, ndim):
6934    """normalize_axis_index for norm."""
6935    # pylint: disable=chained-comparison
6936    if axis >= 0 and axis < ndim:
6937        return axis
6938    # pylint: disable=chained-comparison
6939    if axis < 0 and axis >= -ndim:
6940        return ndim + axis
6941    raise ValueError('For norm, the dim is out of range.')
6942
6943
6944@_primexpr
6945def _get_perm_for_norm(x_ndim, source, destination):
6946    destination = tuple([_normalize_axis_index(ax, x_ndim) for ax in destination])
6947    source = tuple([_normalize_axis_index(ax, x_ndim) for ax in source])
6948    perm = [n for n in range(x_ndim) if n not in source]
6949    for dest, src in sorted(zip(destination, source)):
6950        perm.insert(dest, src)
6951    perm = tuple(perm)
6952    return perm
6953
6954
6955def _moveaxis(x, source, destination):
6956    perm = _get_perm_for_norm(x.ndim, source, destination)
6957    return ops.transpose(x, perm)
6958
6959
6960@_primexpr
6961def _check_axis(axis, ord, ndim):
6962    """axis check"""
6963    if axis is None:
6964        axis = tuple(range(ndim))
6965        if (ord is None) or (ord == 'fro' and ndim == 2) or (ord == 2 and ndim == 1):
6966            return axis, True
6967        return axis, False
6968    if isinstance(axis, int):
6969        axis = (axis,)
6970    elif isinstance(axis, tuple):
6971        if len(axis) > 2:
6972            raise ValueError("For norm, the dimensions is out of range.")
6973    else:
6974        raise TypeError(f'For norm, the dim should be int or tuple of int, but got {type(axis)}')
6975    return axis, False
6976
6977
6978@_primexpr
6979def _check_ord(ord, axis):
6980    if len(axis) == 1:
6981        if isinstance(ord, str):
6982            raise TypeError(f"For norm, ord mode can not be str for vectors, but got {ord}.")
6983    elif len(axis) == 2:
6984        if ord not in [2, -2, 1, -1, float('inf'), -float('inf'), 'fro', 'nuc', None]:
6985            raise ValueError(f"For norm, the ord mode must be in "
6986                             f"[2, -2, 1, -1, float('inf'), -float('inf'), 'fro', 'nuc', None] for matrices, "
6987                             f"but got {ord}.")
6988
6989
6990def _check_dtype(d1, d2):
6991    if mstype.float32 in (d1, d2):
6992        return mstype.float32
6993    if d1 == d2:
6994        return d1
6995    raise ValueError('the dtype is not supported.')
6996
6997
6998@_primexpr
6999def _check_last_dim_shape_eq(a, b):
7000    if a.shape[-1] != b.shape[-1]:
7001        raise ValueError('shapes are not aligned')
7002
7003
7004def _complex_square(A):
7005    """calculate square with complex or not"""
7006    if ops.is_complex(A):
7007        return ops.conj(A) * A
7008    return ops.square(A)
7009
7010
7011def norm(A, ord=None, dim=None, keepdim=False, *, dtype=None):
7012    r"""
7013    Returns the matrix norm or vector norm of a given tensor.
7014
7015    `ord` is the calculation mode of norm. The following norm modes are supported.
7016
7017    ====================== ================================ ==========================================
7018    `ord`                   norm for matrices               norm for vectors
7019    ====================== ================================ ==========================================
7020    `None` (default)        Frobenius norm                   `2`-norm (see below)
7021    `'fro'`                 Frobenius norm                   -- not supported --
7022    `'nuc'`                 nuclear norm                     -- not supported --
7023    `inf`                   :math:`max(sum(abs(x), dim=1))`  :math:`max(abs(x))`
7024    `-inf`                  :math:`min(sum(abs(x), dim=1))`  :math:`min(abs(x))`
7025    `0`                     -- not supported --              :math:`sum(x != 0)`
7026    `1`                     :math:`max(sum(abs(x), dim=0))`  as below
7027    `-1`                    :math:`min(sum(abs(x), dim=0))`  as below
7028    `2`                     largest singular value           as below
7029    `-2`                    smallest singular value          as below
7030    other `int` or `float`  -- not supported --              :math:`sum(abs(x)^{ord})^{(1 / ord)}`
7031    ====================== ================================ ==========================================
7032
7033    Args:
7034        A (Tensor): Tensor of shape :math:`(*, n)` or :math:`(*, m, n)` where * is zero or more batch dimensions.
7035        ord (Union[int, float, inf, -inf, 'fro', 'nuc'], optional): norm's mode. refer to the table above for
7036            behavior. Default: ``None`` .
7037        dim (Union[int, Tuple(int)], optional): calculate the dimension of vector norm or matrix norm.
7038            Default: ``None`` .
7039
7040            - When `dim` is int, it will be calculated by vector norm.
7041
7042            - When `dim` is a 2-tuple, it will be calculated by matrix norm.
7043
7044            - If `dim` is None and `ord` is None, `A` will be flattened to 1D and the 2-norm
7045              of the vector will be calculated.
7046
7047            - If `dim` is None and `ord` is not None, `A` must be 1D or 2D.
7048
7049        keepdim (bool): whether the output Tensor retains the original dimension. Default: ``False`` .
7050
7051    Keyword Args:
7052        dtype (:class:`mindspore.dtype`, optional): When set, `A` will be converted to the specified type,
7053            `dtype`, before execution, and dtype of returned Tensor will also be `dtype`. Default: ``None`` .
7054
7055    Returns:
7056        Tensor, the result of norm calculation on the specified dimension, `dim`, has the same dtype as `A`.
7057
7058    Raises:
7059        ValueError: If `dim` is out of range.
7060        TypeError: If `dim` is neither an int nor a tuple of int.
7061        TypeError: If `A` is a vector and `ord` is a str.
7062        ValueError: If `A` is a matrices and `ord` is not in valid mode.
7063        ValueError: If `A` is a matrices and `ord` is an integer but not in [1, -1, 2, -2].
7064        ValueError: If two elements of `dim` is same after normalize.
7065        ValueError: If any elements of `dim` is out of range.
7066
7067    Supported Platforms:
7068        ``Ascend`` ``GPU`` ``CPU``
7069
7070    Note:
7071        Currently, complex numbers are not supported.
7072
7073    Examples:
7074        >>> import mindspore as ms
7075        >>> from mindspore import ops
7076        >>> data_range = ops.arange(-13, 13, dtype=ms.float32)
7077        >>> # Exclude 0 from original data for 0 is invalid input when `ord` is negative.
7078        >>> x = data_range[data_range != 0]
7079        >>> y = x.reshape(5, 5)
7080        >>> print(ops.norm(x))
7081        38.327538
7082        >>> print(ops.norm(x, float('inf')))
7083        13.0
7084        >>> print(ops.norm(x, float('-inf')))
7085        1.0
7086        >>> print(ops.norm(x, 0))
7087        25.0
7088        >>> print(ops.norm(x, 1))
7089        169.0
7090        >>> print(ops.norm(x, -1))
7091        0.15915091
7092        >>> print(ops.norm(x, 2))
7093        38.327538
7094        >>> print(ops.norm(x, -2))
7095        0.5647041
7096        >>> print(ops.norm(x, 3))
7097        24.309084
7098        >>> print(ops.norm(x, -3))
7099        0.74708974
7100        >>> print(ops.norm(y))
7101        38.327538
7102        >>> print(ops.norm(y, 'fro'))
7103        38.327538
7104        >>> print(ops.norm(y, 'nuc'))
7105        45.56681
7106        >>> print(ops.norm(y, float('inf')))
7107        55.0
7108        >>> print(ops.norm(y, float('-inf')))
7109        9.0
7110        >>> print(ops.norm(y, 1))
7111        35.0
7112        >>> print(ops.norm(y, -1))
7113        33.0
7114        >>> print(ops.norm(y, 2))
7115        37.57774
7116        >>> print(ops.norm(y, -2))
7117        1.590545e-07
7118        >>> m = ms.Tensor([[1., -1., 2.], [-2., 3., -4.]])
7119        >>> print(ops.norm(m, dim=0))
7120        [2.236068  3.1622777 4.472136 ]
7121        >>> print(ops.norm(m, dim=1))
7122        [2.4494898 5.3851647]
7123        >>> print(ops.norm(m, ord=1, dim=1))
7124        [4. 9.]
7125        >>> print(ops.norm(m, ord=-2, dim=0))
7126        [0.8944272  0.94868326 1.7888544 ]
7127        >>> print(ops.norm(m, ord=2, dim=1))
7128        [2.4494898 5.3851647]
7129        >>> n = ops.arange(27, dtype=ms.float32).reshape(3, 3, 3)
7130        >>> print(ops.norm(n, dim=(1, 2)))
7131        [14.282857 39.76179  66.45299 ]
7132        >>> print(ops.norm(n[0, :, :]), ops.norm(n[1, :, :]), ops.norm(n[2, :, :]))
7133        14.282857 39.76179 66.45299
7134    """
7135    ndim = A.ndim
7136    dim, immediate = _check_axis(dim, ord, ndim)
7137    _check_ord(ord, dim)
7138    if dtype is not None:
7139        A = ops.cast(A, dtype)
7140    # Immediately handle some default, simple, fast, and common cases.
7141    if immediate:
7142        ret = ops.sqrt(ops.reduce_sum(_complex_square(A), dim))
7143        if keepdim:
7144            ret = ret.reshape(ndim * [1])
7145        return ret
7146
7147    if isinstance(ord, int):
7148        if len(dim) == 2:
7149            row_axis, col_axis = dim
7150            row_axis = _normalize_axis_index(row_axis, ndim)
7151            col_axis = _normalize_axis_index(col_axis, ndim)
7152            if ord == 1:
7153                if col_axis > row_axis:
7154                    col_axis -= 1
7155                ret = ops.max(A.abs().sum(row_axis), axis=col_axis)[0]
7156            elif ord == -1:
7157                if col_axis > row_axis:
7158                    col_axis -= 1
7159                ret = ops.min(A.abs().sum(row_axis), axis=col_axis)[0]
7160            elif ord == 2:
7161                ret = _multi_svd_norm(A, row_axis, col_axis, 'amax')
7162            elif ord == -2:
7163                ret = _multi_svd_norm(A, row_axis, col_axis, 'amin')
7164            else:
7165                raise ValueError(f"For norm, the ord {ord} are not support for matrices.")
7166            if keepdim:
7167                ret_shape = list(A.shape)
7168                ret_shape[dim[0]] = 1
7169                ret_shape[dim[1]] = 1
7170                ret = ret.reshape(ret_shape)
7171            return ret
7172        if len(dim) == 1:
7173            if ord == 0:
7174                return (A != 0).astype(A.dtype).sum(axis=dim, keepdims=keepdim)
7175            if ord > 0:
7176                _lp_norm = _get_cache_prim(ops.LpNorm)(dim, ord, keepdim)
7177                return _lp_norm(A)
7178            return ops.sum(ops.abs(A).pow(ord), dim=dim, keepdim=keepdim).pow(1.0 / ord)
7179    if len(dim) == 1:
7180        if ord == float('inf'):
7181            return ops.max(ops.abs(A), axis=dim[0], keepdims=keepdim)[0]
7182        if ord == -float('inf'):
7183            return ops.min(ops.abs(A), axis=dim[0], keepdims=keepdim)[0]
7184        if ord == 0:
7185            # Zero norm
7186            return (A != 0).astype(A.dtype).sum(axis=dim, keepdims=keepdim)
7187        if ord is None:
7188            # special case for speedup
7189            s = _complex_square(A)
7190            reduce_sum = _get_cache_prim(ops.ReduceSum)(keepdim)
7191            return ops.sqrt(reduce_sum(s, dim))
7192        # None of the str-type keywords for ord ('fro', 'nuc')
7193        # are valid for vectors
7194        absx = ops.abs(A)
7195        absx **= ord
7196        reduce_sum = _get_cache_prim(ops.ReduceSum)(keepdim)
7197        ret = reduce_sum(absx, dim)
7198        if isinstance(ord, Tensor):
7199            ret **= ops.reciprocal(ord)
7200        else:
7201            ret **= 1 / ord
7202        return ret
7203    if len(dim) == 2:
7204        row_axis, col_axis = dim
7205        row_axis = _normalize_axis_index(row_axis, ndim)
7206        col_axis = _normalize_axis_index(col_axis, ndim)
7207        if row_axis == col_axis:
7208            raise ValueError('For norm, the elements of dim can not be duplicate.')
7209
7210        if ord == float('inf'):
7211            if row_axis > col_axis:
7212                row_axis -= 1
7213            ret = ops.max(ops.reduce_sum(abs(A), col_axis), axis=row_axis)[0]
7214        elif ord == -float('inf'):
7215            if row_axis > col_axis:
7216                row_axis -= 1
7217            ret = ops.min(ops.reduce_sum(abs(A), col_axis), axis=row_axis)[0]
7218        elif ord == 'fro':
7219            ret = ops.sqrt(ops.reduce_sum(_complex_square(A), dim))
7220        elif ord == 'nuc':
7221            ret = _multi_svd_norm(A, row_axis, col_axis, 'sum')
7222        else:
7223            ret = ops.sqrt(ops.reduce_sum(_complex_square(A), dim))
7224        if keepdim:
7225            ret_shape = list(A.shape)
7226            ret_shape[dim[0]] = 1
7227            ret_shape[dim[1]] = 1
7228            ret = ret.reshape(ret_shape)
7229        return ret
7230    return None
7231
7232
7233@_primexpr
7234def _check_vector_norm_axis(axis, ndim):
7235    """vector_norm axis check"""
7236    if (not isinstance(axis, int)) and (not isinstance(axis, tuple)) and (axis is not None):
7237        raise TypeError(f'For vector_norm , the dim must be tuple or int, but got {type(axis)}')
7238
7239    if axis is None:
7240        axis = tuple(range(ndim))
7241    if isinstance(axis, int):
7242        axis = (axis,)
7243
7244    dim = []
7245    for elem_dim in axis:
7246        elem_dim = _normalize_axis_index(elem_dim, ndim)
7247        if elem_dim in dim:
7248            raise ValueError('For vector_norm, the elements of axis can not be duplicate.')
7249        dim.append(elem_dim)
7250    tuple_dim = tuple(dim)
7251    return tuple_dim
7252
7253
7254@_primexpr
7255def _check_vector_norm_ord(ord):
7256    """vector_norm ord check"""
7257    if ord not in [0, 2, float('inf'), -float('inf')] and not isinstance(ord, (int, float)):
7258        raise ValueError(f"For vector_norm, the ord mode must be in [0, 2, float('inf'), -float('inf')] "
7259                         f"or must be int or float, but got {ord}.")
7260
7261
7262def _compute_vector_norm_inf(x, dim, keepdims, norm_func):
7263    """compute vector norm of `x` when ord is ``inf`` or ``-inf`` """
7264    if len(dim) == 1:
7265        ret_norm = norm_func(ops.abs(x), axis=dim[0], keepdims=keepdims)[0]
7266    else:
7267        start_dim = min(dim)
7268        end_dim = max(dim)
7269        flatten_x = ops.flatten(x, start_dim=start_dim, end_dim=end_dim)
7270        ret_norm = norm_func(ops.abs(flatten_x), axis=start_dim, keepdims=False)[0]
7271        if keepdims is True:
7272            ret_shape = list(x.shape)
7273            for i in dim:
7274                ret_shape[i] = 1
7275            ret_norm = ret_norm.reshape(ret_shape)
7276    return ret_norm
7277
7278
7279def norm_ext(A, ord=None, dim=None, keepdim=False, *, dtype=None):
7280    r"""
7281    Returns the matrix norm or vector norm of a given tensor.
7282
7283    `ord` is the calculation mode of norm. The following norm modes are supported.
7284
7285    ====================== ================================ ==========================================
7286    `ord`                   norm for matrices               norm for vectors
7287    ====================== ================================ ==========================================
7288    `None` (default)        Frobenius norm                   `2`-norm (see below)
7289    `'fro'`                 Frobenius norm                   -- not supported --
7290    `'nuc'`                 nuclear norm                     -- not supported --
7291    `inf`                   :math:`max(sum(abs(x), dim=1))`  :math:`max(abs(x))`
7292    `-inf`                  :math:`min(sum(abs(x), dim=1))`  :math:`min(abs(x))`
7293    `0`                     -- not supported --              :math:`sum(x != 0)`
7294    `1`                     :math:`max(sum(abs(x), dim=0))`  as below
7295    `-1`                    :math:`min(sum(abs(x), dim=0))`  as below
7296    `2`                     largest singular value           as below
7297    `-2`                    smallest singular value          as below
7298    other `int` or `float`  -- not supported --              :math:`sum(abs(x)^{ord})^{(1 / ord)}`
7299    ====================== ================================ ==========================================
7300
7301    Args:
7302        A (Tensor): Tensor of shape :math:`(*, n)` or :math:`(*, m, n)` where * is zero or more batch dimensions.
7303        ord (Union[int, float, inf, -inf, 'fro', 'nuc'], optional): norm's mode. refer to the table above for
7304            behavior. Default: ``None`` .
7305        dim (Union[int, Tuple(int)], optional): calculate the dimension of vector norm or matrix norm.
7306            Default: ``None`` .
7307
7308            - When `dim` is int, it will be calculated by vector norm.
7309
7310            - When `dim` is a 2-tuple, it will be calculated by matrix norm.
7311
7312            - If `dim` is None and `ord` is None, `A` will be flattened to 1D and the 2-norm
7313              of the vector will be calculated.
7314
7315            - If `dim` is None and `ord` is not None, `A` must be 1D or 2D.
7316
7317        keepdim (bool): whether the output Tensor retains the original dimension. Default: ``False`` .
7318
7319    Keyword Args:
7320        dtype (:class:`mindspore.dtype`, optional): When set, `A` will be converted to the specified type,
7321            `dtype`, before execution, and dtype of returned Tensor will also be `dtype`. Default: ``None`` .
7322
7323    Returns:
7324        Tensor, the result of norm calculation on the specified dimension, `dim`, has the same dtype as `A`.
7325
7326    Raises:
7327        ValueError: If `dim` is out of range.
7328        TypeError: If `dim` is neither an int nor a tuple of int.
7329        TypeError: If `A` is a vector and `ord` is a str.
7330        ValueError: If `A` is a matrices and `ord` is not in valid mode.
7331        ValueError: If `A` is a matrices and `ord` is an integer but not in [1, -1, 2, -2].
7332        ValueError: If two elements of `dim` is same after normalize.
7333        ValueError: If any elements of `dim` is out of range.
7334
7335    Supported Platforms:
7336        ``Ascend``
7337
7338    Note:
7339        Currently, it only support `ops.function.math_func.norm_ext(A)`.
7340
7341    Examples:
7342        >>> import mindspore as ms
7343        >>> from mindspore import ops
7344        >>> data_range = ops.arange(-13, 13, dtype=ms.float32)
7345        >>> # Exclude 0 from original data for 0 is invalid input when `ord` is negative.
7346        >>> x = data_range[data_range != 0]
7347        >>> y = x.reshape(5, 5)
7348        >>> print(ops.function.math_func.norm_ext(x))
7349        38.327538
7350        >>> print(ops.norm(x, 0))
7351        25.0
7352    """
7353    norm_ext_op = Norm()
7354    return norm_ext_op(A, ord, dim, keepdim, dtype)
7355
7356
7357def vector_norm(x, ord=2, axis=None, keepdims=False, *, dtype=None):
7358    r"""
7359    Returns the vector norm of the given tensor on the specified dimensions.
7360
7361    `ord` is the calculation mode of norm. The following norm modes are supported.
7362
7363    ==========================      ==========================================
7364    `ord`                           norm for vectors
7365    ==========================      ==========================================
7366    ``2`` (Default)                 ``2``-norm (see below)
7367    ``inf``                         :math:`max(abs(x))`
7368    ``-inf``                        :math:`min(abs(x))`
7369    ``0``                           :math:`sum(x!=0)`
7370    other ``int`` or ``float``      :math:`sum(abs(x)^{ord})^{(1 / ord)}`
7371    ==========================      ===========================================
7372
7373    Args:
7374        x (Tensor): Tensor of shape :math:`(*, n)` where * is zero s more batch dimensions.
7375        ord (Union[int, float, inf, -inf], optional): norm's mode. refer to the table above for
7376            behavior. Default: ``2`` .
7377        axis (Union[int, Tuple(int)], optional): The dimensions along which to perform the vector norm calculation.
7378            Default: ``None`` .
7379
7380            - When `axis` is int or a tuple, the norm calculation will be performed across these specified dimensions,
7381              while the remaining dimensions will be considered as batch dimensions.
7382
7383            - When `dim` is None, the norm will be calculated after flattening the Tensor `x` .
7384
7385        keepdims (bool): whether the output Tensor retains the original dimension. Default: ``False`` .
7386
7387    Keyword Args:
7388        dtype (:class:`mindspore.dtype`, optional): When set, `x` will be converted to the specified type,
7389            `dtype` before execution, and dtype of returned Tensor will also be `dtype`.
7390            When `dtype` is ``None`` , the dtype of `A` is preserved. Default: ``None`` .
7391
7392    Returns:
7393        Tensor, the result of norm calculation on the specified dimension, `axis`, has the same dtype as `x`.
7394
7395    Raises:
7396        TypeError: If `axis` is not an int or tuple.
7397        ValueError: If `ord` is not in [int, float, inf, -inf].
7398        ValueError: The elements of `axis` are duplicate.
7399        ValueError: If any elements of `axis` is out of range.
7400
7401    Supported Platforms:
7402        ``Ascend`` ``GPU`` ``CPU``
7403
7404    Examples:
7405        >>> import mindspore as ms
7406        >>> x = ms.ops.arange(0, 12, dtype=ms.float32) - 6
7407        >>> print(ms.ops.vector_norm(x, ord=2))
7408        12.083046
7409        >>> print(ms.ops.vector_norm(x, ord=float('inf')))
7410        6.0
7411        >>> print(ms.ops.vector_norm(x, ord=float('-inf')))
7412        0.0
7413        >>> print(ms.ops.vector_norm(x, ord=0))
7414        11.0
7415        >>> print(ms.ops.vector_norm(x, ord=4.5))
7416        7.2243643
7417    """
7418    ndim = x.ndim
7419    dim = _check_vector_norm_axis(axis, ndim)
7420    _check_vector_norm_ord(ord)
7421
7422    if dtype is not None:
7423        x = ops.cast(x, dtype)
7424
7425    if ord == 2:
7426        s = _complex_square(x)
7427        reduce_sum = _get_cache_prim(ops.ReduceSum)(keepdims)
7428        return ops.sqrt(reduce_sum(s, dim))
7429    if ord == float('inf'):
7430        inf_norm = _compute_vector_norm_inf(x, dim, keepdims, ops.max)
7431        return inf_norm
7432    if ord == float('-inf'):
7433        inf_norm = _compute_vector_norm_inf(x, dim, keepdims, ops.min)
7434        return inf_norm
7435    if ord == 0:
7436        return (x != 0).astype(x.dtype).sum(axis=dim, keepdims=keepdims)
7437    # ord is other int or float
7438    abs_x = ops.abs(x)
7439    abs_x **= ord
7440    reduce_sum = _get_cache_prim(ops.ReduceSum)(keepdims)
7441    ret = reduce_sum(abs_x, dim)
7442    ret **= 1 / ord
7443    return ret
7444
7445
7446@_primexpr
7447def _check_matrix_norm_axis(axis, ndim):
7448    """matrix_norm axis check"""
7449    if not isinstance(axis, tuple):
7450        raise TypeError(f'For matrix_norm , the axis should be tuple of int, but got {type(axis)}')
7451    if len(axis) != 2:
7452        raise ValueError(f'For matrix_norm, the length of axis should be 2, but got {len(axis)}.')
7453
7454    row_axis, col_axis = axis
7455    row_axis = _normalize_axis_index(row_axis, ndim)
7456    col_axis = _normalize_axis_index(col_axis, ndim)
7457    if row_axis == col_axis:
7458        raise ValueError('For matrix_norm, the elements of axis can not be duplicate.')
7459    return row_axis, col_axis
7460
7461
7462@_primexpr
7463def _check_matrix_norm_ord(ord):
7464    """matrix_norm ord check"""
7465    if ord not in [2, -2, 1, -1, float('inf'), float('-inf'), 'fro', 'nuc']:
7466        raise ValueError(f"For matrix_norm, the ord mode must be in "
7467                         f"[2, -2, 1, -1, float('inf'), float('-inf'), 'fro', 'nuc'] "
7468                         f"but got {ord}.")
7469
7470
7471def matrix_norm(A, ord='fro', axis=(-2, -1), keepdims=False, *, dtype=None):
7472    r"""
7473    Returns the matrix norm of a given tensor on the specified dimensions.
7474
7475    `ord` is the calculation mode of norm. The following norm modes are supported.
7476
7477    ====================== ================================
7478    `ord`                  norm for matrix
7479    ====================== ================================
7480    ``'fro'`` (Default)    Frobenius norm
7481    ``'nuc'``              nuclear norm
7482    ``inf``                :math:`max(sum(abs(x), dim=1))`
7483    ``-inf``               :math:`min(sum(abs(x), dim=1))`
7484    ``1``                  :math:`max(sum(abs(x), dim=0))`
7485    ``-1``                 :math:`min(sum(abs(x), dim=0))`
7486    ``2``                  largest singular value
7487    ``-2``                 smallest singular value
7488    ====================== ================================
7489
7490    Args:
7491        A (Tensor): Tensor of shape :math:`(*, m, n)` where * is zero or more batch dimensions.
7492        ord (Union[int, inf, -inf, 'fro', 'nuc'], optional): norm's mode. refer to the table above for
7493            behavior. Default: ``'fro'`` .
7494        axis (Tuple(int, int), optional): calculate the dimension of the matrix norm.
7495            Default: ``(-2, -1)`` .
7496        keepdims (bool): whether the output Tensor retains the original dimension. Default: ``False`` .
7497
7498    Keyword Args:
7499        dtype (:class:`mindspore.dtype`, optional): When set, `A` will be converted to the specified type,
7500            `dtype`, before execution, and dtype of returned Tensor will also be `dtype`.
7501            When `dtype` is ``None`` , the dtype of `A` is preserved. Default: ``None`` .
7502
7503    Returns:
7504        Tensor, the result of norm calculation on the specified dimension, `axis`, has the same dtype as `A`.
7505
7506    Raises:
7507        TypeError: If `axis` is not a tuple of int.
7508        ValueError: If the length of `axis` is not equal to 2.
7509        ValueError: If `ord` is not in [2, -2, 1, -1, float('inf'), float('-inf'), 'fro', 'nuc'].
7510        ValueError: If two elements of `axis` is same after normalize.
7511        ValueError: If any elements of `axis` is out of range.
7512
7513    Supported Platforms:
7514        ``GPU`` ``CPU``
7515
7516    Examples:
7517        >>> import mindspore as ms
7518        >>> A = ms.ops.arange(0, 12, dtype=ms.float32).reshape(3, 4)
7519        >>> print(ms.ops.matrix_norm(x, ord='fro'))
7520        22.494444
7521        >>> print(ms.ops.matrix_norm(x, ord='nuc'))
7522        24.364643
7523        >>> print(ms.ops.matrix_norm(x, ord=float('inf')))
7524        38.0
7525        >>> print(ms.ops.matrix_norm(x, ord=float('-inf')))
7526        6.0
7527        >>> print(ms.ops.vector_norm(x, ord=1))
7528        21.0
7529        >>> print(ms.ops.vector_norm(x, ord=-1))
7530        12.0
7531        >>> print(ms.ops.vector_norm(x, ord=2))
7532        22.409302
7533        >>> print(ms.ops.vector_norm(x, ord=-2))
7534        1.672928e-07
7535    """
7536    ndim = A.ndim
7537    row_axis, col_axis = _check_matrix_norm_axis(axis, ndim)
7538    _check_matrix_norm_ord(ord)
7539    if dtype is not None:
7540        A = ops.cast(A, dtype)
7541
7542    ret = None
7543    if ord == 'fro':
7544        ret = ops.sqrt(ops.reduce_sum(_complex_square(A), axis))
7545    if ord == 'nuc':
7546        ret = _multi_svd_norm(A, row_axis, col_axis, 'sum')
7547    if ord == float('inf'):
7548        if row_axis > col_axis:
7549            row_axis -= 1
7550        ret = ops.max(ops.reduce_sum(abs(A), col_axis), axis=row_axis)[0]
7551    if ord == float('-inf'):
7552        if row_axis > col_axis:
7553            row_axis -= 1
7554        ret = ops.min(ops.reduce_sum(abs(A), col_axis), axis=row_axis)[0]
7555    if ord == 1:
7556        if col_axis > row_axis:
7557            col_axis -= 1
7558        ret = ops.max(A.abs().sum(row_axis), axis=col_axis)[0]
7559    if ord == -1:
7560        if col_axis > row_axis:
7561            col_axis -= 1
7562        ret = ops.min(A.abs().sum(row_axis), axis=col_axis)[0]
7563    if ord == 2:
7564        ret = _multi_svd_norm(A, row_axis, col_axis, 'amax')
7565    if ord == -2:
7566        ret = _multi_svd_norm(A, row_axis, col_axis, 'amin')
7567    if keepdims:
7568        ret_shape = list(A.shape)
7569        ret_shape[axis[0]] = 1
7570        ret_shape[axis[1]] = 1
7571        ret = ret.reshape(ret_shape)
7572    return ret
7573
7574
7575def lu_unpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True):
7576    r"""
7577    Converts `LU_data` and `LU_pivots` back into P, L and U matrices, where
7578    P is a permutation matrix, L is a lower triangular matrix, and U is an
7579    upper triangular matrix. Typically, `LU_data` and `LU_pivots` are generated
7580    from the LU decomposition of a matrix.
7581
7582    Args:
7583        LU_data (Tensor): The packed LU factorization data. A Tensor of shape :math:`(*, M, N)`, where :math:`*` is
7584            batch dimensions. The dim of `LU_data` must be equal to or greater than 2.
7585        LU_pivots (Tensor): The packed LU factorization pivots. A Tensor of shape :math:`(*, min(M, N))`,
7586            where :math:`*` is
7587            batch dimensions, with data type int8, uint8, int16, int32, int64.
7588        unpack_data (bool, optional): A flag indicating if the `LU_data` should be unpacked. If ``False`` ,
7589            then the returned L and U are None. Default: ``True`` .
7590        unpack_pivots (bool, optional): A flag indicating if the `LU_pivots` should be unpacked into
7591            a permutation matrix P. If ``False`` , then the returned P is None. Default: ``True`` .
7592
7593    Returns:
7594        - pivots(Tensor) - The permutation matrix of LU factorization.
7595          The shape is :math:`(*, M, M)`, the dtype is same as `LU_data`.
7596        - L (Tensor) - The L matrix  of LU factorization. The dtype is same as `LU_data`.
7597        - U (Tensor) - The U matrix  of LU factorization. The dtype is same as `LU_data`.
7598
7599    Raises:
7600        TypeError: If the dtype of `LU_data` is int, uint or float.
7601        TypeError: If the dtype of `LU_pivots` is not one of the following: int8, uint8, int16, int32, int64.
7602        ValueError: If the dimension of `LU_data` is less than 2.
7603        ValueError: If the dimension of `LU_pivots` is less than 1.
7604        ValueError: If the size of the last dimension of LU_pivots is not equal to the minimum of the sizes of the last
7605                    two dimensions of LU_data.
7606        ValueError: If the batch dimensions of LU_data's does not match LU_pivots's batch dimensions.
7607        ValueError: On the CPU platform, if the value of `LU_pivots` are out of range :math:`[1, LU\_data.shape[-2])`.
7608        RuntimeError: On the Ascend platform, if the value of `LU_pivots` are
7609                    out of range :math:`[1, LU\_data.shape[-2])`.
7610
7611    Supported Platforms:
7612        ``GPU`` ``CPU``
7613
7614    Examples:
7615        >>> import numpy as np
7616        >>> from mindspore import Tensor, ops
7617        >>> from mindspore import dtype as mstype
7618        >>> LU_data = Tensor(np.array([[[-0.3806, -0.4872,  0.5536],
7619        ...                             [-0.1287,  0.6508, -0.2396],
7620        ...                             [ 0.2583,  0.5239,  0.6902]],
7621        ...                            [[ 0.6706, -1.1782,  0.4574],
7622        ...                             [-0.6401, -0.4779,  0.6701],
7623        ...                             [ 0.1015, -0.5363,  0.6165]]]), mstype.float64)
7624        >>> LU_pivots = Tensor(np.array([[1, 3, 3],
7625        ...                              [2, 3, 3]]), mstype.int32)
7626        >>> pivots, L, U = ops.lu_unpack(LU_data, LU_pivots)
7627        >>> print(pivots)
7628        [[[1. 0. 0.]
7629          [0. 0. 1.]
7630          [0. 1. 0.]]
7631         [[0. 0. 1.]
7632          [1. 0. 0.]
7633          [0. 1. 0.]]]
7634        >>> print(L)
7635        [[[ 1.       0.       0.]
7636          [-0.1287   1.       0.]
7637          [ 0.2583   0.5239   1.]]
7638         [[ 1.0000   0.       0.]
7639          [-0.6401   1.       0.]
7640          [ 0.1015  -0.5363   1.]]]
7641        >>> print(U)
7642        [[[-0.3806  -0.4872   0.5536]
7643          [ 0.       0.6508  -0.2396]
7644          [ 0.       0.       0.6902]]
7645         [[ 0.6706  -1.1782   0.4574]
7646          [ 0.      -0.4779   0.6701]
7647          [ 0.       0.       0.6165]]]
7648    """
7649    pivots, l, u = lu_unpack_(LU_data, LU_pivots)
7650    if unpack_data:
7651        if unpack_pivots:
7652            return pivots, l, u
7653        return None, l, u
7654    if unpack_pivots:
7655        return pivots, None, None
7656    return None, None, None
7657
7658
7659def renorm(input, p, axis, maxnorm):
7660    """
7661    Renormalizes the sub-tensors along dimension `axis`, and each sub-tensor's p-norm should not exceed the
7662    `maxnorm`. The values of current sub-tensor don't need change if the p-norm of the sub-tensor is less than
7663    `maxnorm`. Otherwise the sub-tensor needs to be modified to the original value of the corresponding position
7664    divided by the p-norm of the substensor and then multiplied by `maxnorm`.
7665
7666    Args:
7667        input (Tensor): A Tensor, types: float32 or float16.
7668        p (int): Power of norm calculation.
7669        axis (int): The dimension that expected to get the slice-tensor.
7670        maxnorm (float32): Max norm.
7671
7672    Returns:
7673        Tensor, has the same dtype and shape as input.
7674
7675    Raises:
7676        TypeError: If dtype of `p` is not int.
7677        TypeError: If dtype of `axis` is not int.
7678        TypeError: If dtype of `maxnorm` is not float32.
7679        ValueError: If the value of `p` less than 1.
7680
7681    Supported Platforms:
7682        ``Ascend`` ``GPU`` ``CPU``
7683
7684    Examples:
7685        >>> import mindspore
7686        >>> import numpy as np
7687        >>> from mindspore import Tensor, ops
7688        >>> x = Tensor(np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]), mindspore.float32)
7689        >>> y = ops.renorm(x, p=1, axis=0, maxnorm=5.)
7690        >>> print(y)
7691        [[1.       1.        1.        ]
7692        [1.6666666 1.6666666 1.6666666 ]
7693        [1.6666667 1.6666667 1.6666667 ]]
7694    """
7695    renorm_ = _get_cache_prim(Renorm)(p, axis, maxnorm)
7696    return renorm_(input)
7697
7698
7699@constexpr
7700def _check_attr_dtype(param_name, input_dtype, allow_dtypes, cls_name):
7701    validator.check_value_type(param_name, input_dtype, allow_dtypes, cls_name)
7702
7703
7704@_primexpr
7705def _check_positive_float(arg_value, arg_name, cls_name):
7706    validator.check_positive_float(arg_value, arg_name, cls_name)
7707
7708
7709@_primexpr
7710def _check_int_range(arg_value, lower_limit, upper_limit, arg_name=None, prim_name=None):
7711    validator.check_int_range(arg_value, lower_limit,
7712                              upper_limit, validator.INC_LEFT, arg_name, prim_name)
7713
7714
7715def _check_logits_tensor(logits):
7716    if not isinstance(logits, (Tensor, Tensor_)):
7717        raise TypeError("The input logits must be tensor")
7718
7719
7720def _check_logits_shape(logits):
7721    if not logits.shape:
7722        raise ValueError("For gumbel_softmax, the 0-D input is not supported.")
7723
7724
7725def gumbel_softmax(logits, tau=1.0, hard=False, dim=-1):
7726    r"""
7727    Returns the samples from the Gumbel-Softmax distribution and optionally discretizes. If `hard = True`, the returned
7728    samples will be one-hot, otherwise it will be probability distributions that sum to 1 across `dim`.
7729
7730    Args:
7731        logits (Tensor): Unnormalized log probabilities. The data type must be float16 or float32.
7732        tau (float): The scalar temperature, which is a positive number. Default: ``1.0`` .
7733        hard (bool): if `True`, the returned samples will be discretized as one-hot vectors, but will be differentiated
7734          as if it is the soft sample in autograd. Default: ``False`` .
7735        dim (int): Dim for softmax to compute. Default: ``-1`` .
7736
7737    Returns:
7738        Tensor, has the same dtype and shape as `logits`.
7739
7740    Raises:
7741        TypeError: If `logits` is not a Tensor.
7742        TypeError: If dtype of `logits` is not one of: float16, float32.
7743        TypeError: If `tau` is not a float.
7744        TypeError: If `hard` is not a bool.
7745        TypeError: If `dim` is not an int.
7746        ValueError: If If `tau` is not positive.
7747
7748    Supported Platforms:
7749        ``Ascend`` ``GPU`` ``CPU``
7750
7751    Examples:
7752        >>> import mindspore
7753        >>> import numpy as np
7754        >>> from mindspore import Tensor, ops
7755        >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
7756        >>> output = ops.gumbel_softmax(input_x, 1.0, True, -1)
7757        >>> print(output.shape)
7758        (2, 3)
7759    """
7760    _check_logits_tensor(logits)
7761    _check_logits_shape(logits)
7762    logits_dtype = dtype_(logits)
7763    _check_input_dtype("logits", logits_dtype, [mstype.float16, mstype.float32], "gumbel_softmax")
7764    valid_types = [mstype.float16, mstype.float32]
7765    if logits_dtype not in valid_types:
7766        names = [t.__name__ if hasattr(t, "__name__") else t for t in valid_types]
7767        logits_dtype = logits_dtype.__name__ if hasattr(logits_dtype, '__name__') else repr(logits_dtype)
7768        raise TypeError(f"For 'gumbel_softmax', the 'logits' should be one of '{names}', but got type '{logits_dtype}'")
7769    _check_attr_dtype("tau", tau, [float], "gumbel_softmax")
7770    _check_attr_dtype("hard", hard, [bool], "gumbel_softmax")
7771    _check_attr_dtype("dim", dim, [int], "gumbel_softmax")
7772    _check_positive_float(tau, "tau", "gumbel_softmax")
7773    if hard:
7774        _check_int_range(dim, -1, len(logits.shape), 'dim', "gumbel_softmax")
7775    else:
7776        _check_int_range(dim, -len(logits.shape),
7777                         len(logits.shape), 'dim', "gumbel_softmax")
7778
7779    sample_shape = shape_(logits)
7780    uniform = C.uniform(sample_shape, scalar_to_tensor_(
7781        0.0, mstype.float32), scalar_to_tensor_(1.0, mstype.float32))
7782    uniform = cast_(uniform, logits_dtype)
7783    gumbel = neg(log_(neg(log_(uniform))))
7784    gumbel = (logits + gumbel) / tau
7785    y_soft = _get_cache_prim(P.Softmax)(dim)(gumbel)
7786    if hard:
7787        index = y_soft.argmax(axis=dim)
7788        y_hard = _get_cache_prim(P.OneHot)(dim)(index, sample_shape[dim], Tensor(1, logits_dtype),
7789                                                Tensor(0, logits_dtype))
7790        ret = ops.stop_gradient(y_hard - y_soft) + y_soft
7791    else:
7792        ret = y_soft
7793    return ret
7794
7795
7796def kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None):
7797    r"""
7798    Generates a Kaiser window, which is also known as the Kaiser-Bessel window.
7799
7800    The Kaiser window is defined as
7801
7802    .. math::
7803        w(n) = \frac{I_{0}\left( \beta\sqrt{1 - \frac{4n^{2}}{(M - 1)^{2}}} \right)}{I_{0}(\beta)}
7804
7805    with
7806
7807    .. math::
7808        - \frac{M - 1}{2} \leq n \leq \frac{M - 1}{2}
7809
7810    where :math:`I_0` is the modified zeroth-order Bessel function.
7811
7812    Args:
7813        window_length (int): Length of window.
7814        periodic (bool, optional): When set to ``True`` , generates a periodic window for spectral analysis.
7815            When set to ``False`` , generates a symmetric window for filter design. Default: ``True`` .
7816        beta (float, optional): Shape parameter, when `beta` gets large, the window narrows. Default: ``12.0`` .
7817
7818    Keyword Args:
7819        dtype (mindspore.dtype, optional): The output window data type, it must be float. Default: ``None`` .
7820
7821    Returns:
7822        Tensor, a Kaiser window.
7823
7824    Raises:
7825        TypeError: If `window_length` or `beta` is not an integer.
7826        TypeError: If `periodic` is not a variable of Boolean type.
7827        ValueError: If `window_length` is negative.
7828
7829    Supported Platforms:
7830        ``Ascend`` ``GPU`` ``CPU``
7831
7832    Examples:
7833        >>> from mindspore import ops
7834        >>> window_length = 5
7835        >>> out = ops.kaiser_window(window_length)
7836        >>> print(out.asnumpy())
7837        [5.27734413e-05 1.01719688e-01 7.92939834e-01 7.92939834e-01
7838         1.01719688e-01]
7839    """
7840    if not isinstance(window_length, int):
7841        raise TypeError(
7842            f"For 'kaiser_window', 'window_length' must be a non-negative integer, but got {type(window_length)}"
7843        )
7844    if window_length < 0:
7845        raise ValueError(
7846            f"For 'kaiser_window', 'window_length' must be a non-negative integer, but got {window_length}"
7847        )
7848    if window_length <= 1:
7849        return Tensor(np.ones(window_length))
7850    if not isinstance(periodic, bool):
7851        raise TypeError(
7852            f"For 'kaiser_window', 'periodic' must be a variable of Boolean type, but got {type(periodic)}"
7853        )
7854    if dtype is not None and dtype not in mstype.float_type:
7855        raise TypeError(f"For 'kaiser_window', 'dtype' must be floating point dtypes, but got {dtype}.")
7856    if periodic:
7857        window_length = window_length + 1
7858    n = np.arange(0, window_length)
7859    alpha = (window_length - 1) / 2.0
7860    w = np.i0(
7861        beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)
7862    ) / np.i0(float(beta))
7863    if dtype is not None:
7864        w = cast_(ms.tensor(w), dtype)
7865    out = Tensor(w[:-1]) if periodic else Tensor(w)
7866    return out
7867
7868
7869def stft(x, n_fft, hop_length=None, win_length=None, window=None, center=True,
7870         pad_mode="REFLECT", normalized=False, onesided=None, return_complex=None):
7871    r"""
7872    STFT segments the signal into narrow time intervals and takes the Fourier transform
7873    of each segment to quantify the change of a nonstationary signal's frequency
7874    and phase content over time.
7875
7876    Ignoring the optional batch dimension, this operation computes the following expression:
7877
7878    .. math::
7879
7880        X[\omega, m]=\sum_{k=0}^{\text {win_length-1 }}
7881        \text { window }[k] \text { input }[m \times \text { hop_length }+
7882        k] \exp \left(-j \frac{2 \pi \cdot \omega k}{\text { win_length }}\right)
7883
7884    where :math:`m` is the index of the sliding window, and
7885    :math:`ω` is the frequency in range :math:`0 \leq \omega < \text{n\_fft}0≤ω<n\_fft`.
7886
7887    Args:
7888        x (Tensor): Time sequences of stft, must be either a 1-D time tensor or a 2-D tensor.
7889        n_fft (int): The size of Fourier transform.
7890        hop_length (int, optional): The distance between neighboring sliding window
7891            frames. Default: ``None``(treated as equal to :math:`floor(n\_fft / 4)`).
7892        win_length (int, optional): the size of window frame and STFT filter.
7893            Default: ``None``(treated as equal to `n_fft`).
7894        window (Tensor, optional): the optional window function, 1-D tensor of size `win_length`.
7895            Default: ``None``(treated as window of all :math:`1` s). If `win_length` < `n_fft`,
7896            `window` will be padded on both sides with ones to length `n_fft` before it takes effect.
7897        center (bool, optional): whether to pad `x` on both sides. Default: ``True``.
7898        pad_mode (str, optional): controls the padding method used when
7899            `center` is True. Default: 'REFLECT'.
7900        normalized (bool, optional): controls whether to return the normalized STFT results
7901             Default: ``False``.
7902        onesided (bool, optional): controls whether to return half of results to
7903            avoid redundancy for real inputs.
7904            Default: ``None``. True for real `x` and `window`, False otherwise.
7905        return_complex (bool, optional): whether to return a complex tensor, or
7906            a real tensor with an extra last dimension for the real and
7907            imaginary components.
7908            Default: ``None``. True for complex `x` or `window`, False otherwise.
7909
7910    Returns:
7911        - **output** (Tensor) - A tensor containing the STFT result.
7912            If `return_complex` is True, it returns a complex Tensor with shape :math:`(*, N, T)`.
7913            If `return_complex` is False, it returns a real Tensor with shape :math:`(*, N, T, 2)`.
7914
7915            `N` is size of Fourier transform, it depends on parameter `onesided`:
7916            - If `onesided` is False, :math:`N = n\_fft`.
7917            - If `onesided` is True, :math:`N = n\_fft // 2 + 1`.
7918
7919            `T` is the total number of frames used, calculated by this formula:
7920            :math:`T = 1 + (len - n\_fft) / hop\_length`, where `len` depends on parameter `center`:
7921            - If `center` is False, :math:`len = signal\_length`.
7922            - If `center` is True, :math:`len = signal\_length + (n\_fft // 2) * 2`.
7923            where :math:`signal\_length` is the signal length, it equals to :math:`x.shape[-1]`.
7924
7925    Raises:
7926        TypeError: If `x` is not a 1-D or 2-D tensor.
7927        TypeError: If `window` is not a 1-D tensor.
7928        TypeError: If any one of `center` , `normalized` , `onesided`
7929            and `return_complex` is assigned a nonboolean value.
7930        TypeError: If `pad_mode` is is assigned a value that is not string.
7931        TypeError: If `n_fft` , `hop_length` or `hop_length` is not an int.
7932
7933    Supported Platforms:
7934        ``Ascend`` ``CPU``
7935
7936    Examples:
7937        >>> import mindspore as ms
7938        >>> from mindspore import ops
7939        >>> import numpy as np
7940        >>> x = ms.Tensor(np.random.rand(2,7192), ms.float32)
7941        >>> output = ops.stft(n_fft=64, x=x)
7942        >>> print(output.shape)
7943        (2, 33, 450, 2)
7944    """
7945    if hop_length is None:
7946        hop_length = int(n_fft // 4)
7947    if win_length is None:
7948        win_length = int(n_fft // 1)
7949    if window is None:
7950        window = ops.ones(win_length, mstype.float32)
7951
7952    def _is_complex(x):
7953        return dtype_(x) in [mstype.complex64, mstype.complex128]
7954
7955    if onesided is None:
7956        onesided = (not _is_complex(x)) and (not _is_complex(window))
7957    if return_complex is None:
7958        return_complex = _is_complex(x) or _is_complex(window)
7959    if center:
7960        _check_attr_dtype("center", center, [bool], "stft")
7961        signal_dim = len(x.shape)
7962        pad = n_fft // 2
7963        if signal_dim == 1:
7964            x = layer.Pad(((pad, pad),), pad_mode)(x)
7965        elif signal_dim == 2:
7966            x = layer.Pad(((0, 0), (pad, pad)), pad_mode)(x)
7967        else:
7968            raise ValueError(
7969                f"Expected a 1-D tensor or a 2-D tensor, but got {signal_dim}")
7970    stft_ = STFT(n_fft, hop_length, win_length,
7971                 normalized, onesided, return_complex)
7972    return stft_(x, window)
7973
7974
7975def _check_same_type(dtype1, dtype2):
7976    return dtype1 == dtype2
7977
7978
7979@constexpr
7980def _max(*args):
7981    """Returns the maximum value."""
7982    return max(*args)
7983
7984
7985@constexpr
7986def _min(*args):
7987    """Returns the minimum value."""
7988    return min(*args)
7989
7990
7991@_primexpr
7992def _infer_shape_rem(shape1, shape2, ndim1, ndim2, transpose_b):
7993    """Infers the shape of the last two dimensions after performing matmul."""
7994    shape_rem = []
7995    if ndim1 >= 2:
7996        shape_rem.append(shape1[-2])
7997    if transpose_b:
7998        if ndim2 >= 2:
7999            shape_rem.append(shape2[-2])
8000    else:
8001        if ndim1 >= 1:
8002            shape_rem.append(shape2[-1])
8003    return tuple(shape_rem)
8004
8005
8006def _check_value(items, max_size, msg_prefix, shape1, shape2):
8007    for item in items:
8008        if item not in (1, max_size):
8009            raise ValueError(f"{msg_prefix} operands could not be broadcast together with shape1 {shape1} and "
8010                             f"shape2 {shape2}.")
8011
8012
8013@_primexpr
8014def _check_matmul_shapes(shape1, shape2, prim_name=None):
8015    """Checks shape1 and shape2 are valid to perform matmul, and returns output shape after broadcasting."""
8016    msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
8017    shape_out = list()
8018    r_shape1 = shape1[:-2]
8019    r_shape2 = shape2[:-2]
8020    max_len = max(len(r_shape1), len(r_shape2))
8021    for i in range(max_len):
8022        items = [it[i - max_len + len(it)] if i - max_len + len(it) >= 0 else 1 for it in (r_shape1, r_shape2)]
8023        max_size = max(items)
8024        _check_value(items, max_size, msg_prefix, shape1, shape2)
8025        shape_out.append(max_size)
8026    return tuple(shape_out)
8027
8028
8029@_primexpr
8030def _check_need_broadcast(shape1, shape2):
8031    """Returns True if broadcast is necessary for batchmatmul."""
8032    return shape1[:-2] != shape2[:-2]
8033
8034
8035@_primexpr
8036def _expand(x, ndim):
8037    """Expand x to ndim from axis, which can be 0 or -1."""
8038    while rank_(x) < ndim:
8039        x = expand_dims_(x, 0)
8040    return x
8041
8042
8043def _broadcast_to(x, shape_cur, shape_to, ndim_to):
8044    """Broadcasts x from shape_cur to shape_to."""
8045    size = tile_size_(shape_cur, shape_to, ndim_to)
8046    F.stop_gradient(size)
8047    return tile_(x, size)
8048
8049
8050def matmul(input, other):
8051    """
8052    Returns the matrix product of two tensors.
8053
8054    Note:
8055        Numpy arguments `out`, `casting`, `order`, `subok`, `signature`, and `extobj` are
8056        not supported.
8057        The dtype of `input` and `other` must be same.
8058        On Ascend, the rank of `input` or `other` must be between 1 and 6.
8059
8060    Args:
8061        input (Tensor): Input tensor, scalar not allowed.
8062            The last dimension of `input` must be the same size as the second last dimension of `other`.
8063            And the shape of input and other could be broadcast.
8064        other (Tensor): Input tensor, scalar not allowed.
8065            The last dimension of `input` must be the same size as the second last dimension of `other`.
8066            And the shape of input and other could be broadcast.
8067
8068    Returns:
8069        Tensor or scalar, the matrix product of the inputs. This is a scalar only
8070        when both `input`, `other` are 1-d vectors.
8071
8072    Raises:
8073        TypeError: If the dtype of `input` and the dtype of `other` are not the same.
8074        ValueError: If the last dimension of `input` is not the same size as the
8075            second-to-last dimension of `other`, or if a scalar value is passed in.
8076        ValueError: If the shape of `input` and `input` could not broadcast together.
8077        RuntimeError: If the rank of `input` or `other` is less than 1 or greater than 6 on the Ascend platform.
8078
8079    Supported Platforms:
8080        ``Ascend`` ``GPU`` ``CPU``
8081
8082    Examples:
8083        >>> import mindspore
8084        >>> import numpy as np
8085        >>> from mindspore import Tensor, ops
8086        >>> # case 1 : Reasonable application of broadcast mechanism
8087        >>> input = Tensor(np.arange(2*3*4).reshape(2, 3, 4), mindspore.float32)
8088        >>> other = Tensor(np.arange(4*5).reshape(4, 5), mindspore.float32)
8089        >>> output = ops.matmul(input, other)
8090        >>> print(output)
8091        [[[  70.   76.   82.   88.   94.]
8092        [ 190.  212.  234.  256.  278.]
8093        [ 310.  348.  386.  424.  462.]]
8094        [[ 430.  484.  538.  592.  646.]
8095        [ 550.  620.  690.  760.  830.]
8096        [ 670.  756.  842.  928. 1014.]]]
8097        >>> print(output.shape)
8098        (2, 3, 5)
8099        >>> # case 2 : the rank of `input` is 1
8100        >>> input = Tensor(np.ones([1, 2]), mindspore.float32)
8101        >>> other = Tensor(np.ones([2,]), mindspore.float32)
8102        >>> output = ops.matmul(input, other)
8103        >>> print(output)
8104        [2.]
8105        >>> print(output.shape)
8106        (1,)
8107    """
8108    return auto_generate.matmul_ext(input, other)
8109
8110
8111def inner(input, other):
8112    r"""
8113    Returns the inner product of two tensors.
8114
8115    For 1-D tensors (without complex conjugation), returns the ordinary inner product of vectors.
8116
8117    For higher dimensions, returns a sum product over the last axis.
8118
8119    Note:
8120         If `input` or `other` is a Tensor scalar, :func:`mindspore.ops.inner` will be the same as
8121         :func:`mindspore.ops.mul` .
8122
8123    Args:
8124        input (Tensor): First input.
8125        other (Tensor): Second input.
8126
8127    Returns:
8128        Tensor, the result of the inner product.
8129
8130    Raises:
8131        ValueError: If neither `input` nor `other` is scalar, and the last dimension of the two input tensors do not
8132            match.
8133
8134    Supported Platforms:
8135        ``Ascend`` ``GPU`` ``CPU``
8136
8137    Examples:
8138        >>> import mindspore as ms
8139        >>> from mindspore import Tensor, ops
8140        >>> from mindspore import dtype as mstype
8141        >>> # case1: 2 1D tensors
8142        >>> input = ms.Tensor([1, 2, 3], mstype.float32)
8143        >>> y = ms.Tensor([4, 5, 6], mstype.float32)
8144        >>> output = ops.inner(input, y)
8145        >>> print(output)
8146        32
8147        >>> # case2: Tensor scalar and tensor
8148        >>> input = ms.Tensor([[[1, 2, 3], [3, 2, 1]], [[4, 5, 6], [4, 5, 6]]], mstype.float32)
8149        >>> y = ms.Tensor(2, mstype.float32)
8150        >>> output = ops.inner(input, y)
8151        >>> print(output)
8152        [[[ 2.  4.  6.]
8153          [ 6.  4.  2.]]
8154         [[ 8. 10. 12.]
8155          [ 8. 10. 12.]]]
8156        >>> # case3: Two tensors
8157        >>> input = ms.Tensor([[[1, 2, 3], [3, 2, 1]], [[4, 5, 6], [4, 5, 6]]], mstype.float32)
8158        >>> y = ms.Tensor([[2, 3, 4], [4, 3, 2]], mstype.float32)
8159        >>> output = ops.inner(input, y)
8160        >>> print(output)
8161        [[[20. 16.]
8162          [16. 20.]]
8163         [[47. 43.]
8164          [47. 43.]]]
8165    """
8166    x_dim = input.ndim
8167    other_dim = other.ndim
8168
8169    if x_dim == 0 or other_dim == 0:
8170        output = input * other
8171        return output
8172
8173    x_shape = input.shape
8174    other_shape = other.shape
8175    if x_shape[-1] != other_shape[-1]:
8176        raise ValueError(f"For 'inner', the last dimension of 'input' and 'other' must be the same, \
8177                         but got input.shape: {x_shape} and other.shape: {other_shape}.")
8178    return ops.tensor_dot(input, other, axes=(-1, -1))
8179
8180
8181def bmm(input_x, mat2):
8182    r"""
8183    Computes matrix multiplication between two tensors by batch.
8184
8185    .. math::
8186
8187        \text{output}[..., :, :] = \text{matrix}(input_x[..., :, :]) * \text{matrix}(mat2[..., :, :])
8188
8189    The dim of `input_x` can not be less than `3` and the dim of `mat2` can not be less than `2`.
8190
8191    Args:
8192        input_x (Tensor): The first tensor to be multiplied. The shape of the tensor is :math:`(*B, N, C)`,
8193            where :math:`*B` represents the batch size which can be multidimensional, :math:`N` and :math:`C` are the
8194            size of the last two dimensions.
8195        mat2 (Tensor): The second tensor to be multiplied. The shape of the tensor is :math:`(*B, C, M)`.
8196
8197    Returns:
8198        Tensor, the shape of the output tensor is :math:`(*B, N, M)`.
8199
8200    Raises:
8201        ValueError: If dim of `input_x` is less than `3` or dim of `mat2` is less than `2`.
8202        ValueError: If the length of the third dim of `input_x` is not equal to
8203            the length of the second dim of `mat2`.
8204
8205    Supported Platforms:
8206        ``Ascend`` ``GPU`` ``CPU``
8207
8208    Examples:
8209        >>> import mindspore as ms
8210        >>> from mindspore import Tensor, ops
8211        >>> import numpy as np
8212        >>> input_x = Tensor(np.arange(24).reshape((2, 4, 1, 3)), ms.float32)
8213        >>> mat2 = Tensor(np.arange(72).reshape((2, 4, 3, 3)), ms.float32)
8214        >>> output = ops.bmm(input_x, mat2)
8215        >>> print(output)
8216        [[[[  15.   18.   21.]]
8217          [[ 150.  162.  174.]]
8218          [[ 447.  468.  489.]]
8219          [[ 906.  936.  966.]]]
8220         [[[1527. 1566. 1605.]]
8221          [[2310. 2358. 2406.]]
8222          [[3255. 3312. 3369.]]
8223          [[4362. 4428. 4494.]]]]
8224    """
8225    return batch_matmul_(input_x, mat2)
8226
8227
8228def quantile(input, q, axis=None, keepdims=False):
8229    r"""
8230    Computes the q-th quantiles of all elements in `input`, when the
8231    q-th quantile lies between two data points, a linear interpolation is implemented between them.
8232
8233    Args:
8234        input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
8235            Supported dtypes: float32, float64.
8236        q (Union[float, Tensor]): A scalar or 1D tensor of quantile values in the range [0, 1].
8237            Supported dtypes: float32, float64.
8238        axis (int, optional): The dimension to reduce. By default, `axis` is None resulting in the
8239            input tensor being flattened before computation. Default: ``None``.
8240        keepdims (bool, optional): Whether the output tensor has dim retained or not. Default: ``False``.
8241
8242    Returns:
8243        Tensor, has the same dtype as the `input`.
8244
8245        Suppose the shape of `input` is :math:`(m, x_0, x_1, ..., x_i, ..., X_R)`, `axis` = :math:`i` and m is
8246        the element count of input `q`.
8247
8248        - If `q` is scalar and `keepdims` is True, the shape of output is :math:`(x_0, x_1, ..., 1, ..., X_R)`.
8249        - If `q` is scalar and `keepdims` is False, the shape of output is :math:`(x_0, x_1, ..., X_R)`.
8250        - If `q` is 1D Tensor and `keepdims` is True, the shape of output is :math:`(m, x_0, x_1, ..., 1, ..., X_R)`.
8251        - If `q` is 1D Tensor and `keepdims` is False, the shape of output is :math:`(m, x_0, x_1, ..., X_R)`.
8252
8253    Raises:
8254        TypeError: If `input` is not a Tensor.
8255        TypeError: If `q` is not a Tensor or float.
8256        TypeError: If dtype of `input` is not float32 or float64.
8257        TypeError: If dtype of `q` is not float32 or float64.
8258        TypeError: If dtype of `input` and the dtype of `q` is different.
8259        ValueError: If the `q` values not in the range [0, 1].
8260        ValueError: If the `axis` values out of range.
8261
8262    Supported Platforms:
8263
8264
8265    Examples:
8266        >>> import mindspore
8267        >>> import numpy as np
8268        >>> from mindspore import Tensor, ops
8269        >>> x = Tensor(np.array([-0.7832, 0.8003, 0.8111]), mindspore.float32)
8270        >>> q = Tensor(np.array([0.1, 0.7, 0.9]), mindspore.float32)
8271        >>> output = ops.quantile(x, q)
8272        >>> print(output.asnumpy())
8273        [-0.4665 0.80462 0.80894]
8274    """
8275
8276    if axis is not None:
8277        _check_attr_dtype("axis", axis, [int], "quantile")
8278    if keepdims is not None:
8279        _check_attr_dtype("keepdims", keepdims, [bool], "quantile")
8280
8281    quantile_ = _get_cache_prim(Quantile)(dim=axis, keep_dims=keepdims)
8282    return quantile_(input, q)
8283
8284
8285def nanquantile(input, q, axis=None, keepdims=False):
8286    r"""
8287    This operator is derived from mindspore.ops.quantile() that 'ignores' NaN values.
8288    It computes quantiles as though the input has no NaN values. If all values in a
8289    reduced dimension are NaN then the quantiles for that reduction will be NaN.
8290
8291    Refer to :func:`mindspore.ops.quantile` for more details.
8292
8293    Args:
8294        input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
8295            Supported dtypes: float32, float64.
8296        q (Union[float, Tensor]): A scalar or 1D tensor of quantile values in the range [0, 1].
8297            Supported dtypes: float32, float64.
8298        axis (int, optional): The dimension to reduce. By default, `axis` is None resulting in the
8299            input tensor being flattened before computation. Default: ``None``.
8300        keepdims (bool, optional): Whether the output tensor has dim retained or not. Default: ``False``.
8301
8302    Returns:
8303        Tensor, has the same dtype as the `input`.
8304
8305        Suppose the shape of `input` is :math:`(m, x_0, x_1, ..., x_i, ..., X_R)`, `axis` = :math:`i` and m is
8306        the element count of input `q`.
8307
8308        - If `q` is scalar and `keepdims` is True, the shape of output is :math:`(x_0, x_1, ..., 1, ..., X_R)`.
8309        - If `q` is scalar and `keepdims` is False, the shape of output is :math:`(x_0, x_1, ..., X_R)`.
8310        - If `q` is 1D Tensor and `keepdims` is True, the shape of output is :math:`(m, x_0, x_1, ..., 1, ..., X_R)`.
8311        - If `q` is 1D Tensor and `keepdims` is False, the shape of output is :math:`(m, x_0, x_1, ..., X_R)`.
8312
8313    Raises:
8314        TypeError: If `input` is not a Tensor.
8315        TypeError: If `q` is not a Tensor or float.
8316        TypeError: If dtype of `input` is not float32 or float64.
8317        TypeError: If dtype of `q` is not float32 or float64.
8318        TypeError: If dtype of `input` and the dtype of `q` is different.
8319        ValueError: If the `q` values not in the range [0, 1].
8320        ValueError: If the `axis` values out of range.
8321
8322    Supported Platforms:
8323
8324
8325    Examples:
8326        >>> import mindspore
8327        >>> import numpy as np
8328        >>> from mindspore import Tensor, ops
8329        >>> x = Tensor(np.array([float('nan'), 0.8003, 0.8111]), mindspore.float32)
8330        >>> q = Tensor(np.array([0.1, 0.7, 0.9]), mindspore.float32)
8331        >>> output = ops.nanquantile(x, q)
8332        >>> print(output.asnumpy())
8333        [0.80138 0.80786 0.81002]
8334    """
8335
8336    if axis is not None:
8337        _check_attr_dtype("axis", axis, [int], "nanquantile")
8338    if keepdims is not None:
8339        _check_attr_dtype("keepdims", keepdims, [bool], "nanquantile")
8340
8341    quantile_ = _get_cache_prim(Quantile)(dim=axis, keep_dims=keepdims, ignore_nan=True)
8342    return quantile_(input, q)
8343
8344
8345def baddbmm(input, batch1, batch2, beta=1, alpha=1):
8346    r"""
8347    The result is the sum of the input and a batch matrix-matrix product of matrices in batch1 and batch2.
8348    The formula is defined as follows:
8349
8350    .. math::
8351        \text{out}_{i} = \beta \text{input}_{i} + \alpha (\text{batch1}_{i} \mathbin{@} \text{batch2}_{i})
8352
8353    Args:
8354        input (Tensor): The input Tensor. When batch1 is a :math:`(C, W, T)` Tensor and batch2 is a
8355            :math:`(C, T, H)` Tensor, input must be broadcastable with :math:`(C, W, H)` Tensor.
8356        batch1 (Tensor): :math:`batch1` in the above formula. Must be 3-D Tensor, dtype is same as input.
8357        batch2 (Tensor): :math:`batch2` in the above formula. Must be 3-D Tensor, dtype is same as input.
8358        beta (Union[float, int], optional): multiplier for input. Default: ``1`` .
8359        alpha (Union[float, int], optional): multiplier for :math:`batch1 @ batch2`. Default: ``1`` .
8360            Arguments beta and alpha must be integers when inputs of type not FloatTensor, otherwise they should
8361            be a real number.
8362
8363    Returns:
8364        Tensor, has the same dtype as input, shape will be :math:`(C, W, H)`.
8365
8366    Raises:
8367        TypeError: The type of `input`, `batch1`, `batch2` is not Tensor.
8368        TypeError: The types of `input`, `batch1`, `batch2` are different.
8369        TypeError: For inputs of type FloatTensor or DoubleTensor, \
8370                    arguments beta and alpha not be real numbers, otherwise not be integers.
8371        TypeError: For Baddbmm, attributes alpha and beta are not real numbers
8372        ValueError: If `batch1` and `batch2` are not 3-D tensors.
8373
8374    Supported Platforms:
8375        ``Ascend`` ``GPU`` ``CPU``
8376
8377    Examples:
8378        >>> import numpy as np
8379        >>> from mindspore import Tensor, ops
8380        >>> input = Tensor(np.ones([1, 3, 3]).astype(np.float32))
8381        >>> batch1 = Tensor(np.ones([1, 3, 4]).astype(np.float32))
8382        >>> batch2 = Tensor(np.ones([1, 4, 3]).astype(np.float32))
8383        >>> output = ops.baddbmm(input, batch1, batch2)
8384        >>> print(output)
8385        [[[5. 5. 5.]
8386          [5. 5. 5.]
8387          [5. 5. 5.]]]
8388    """
8389    bmmop = _get_cache_prim(BatchMatMul)(False, False)
8390    if not (isinstance(input, Tensor) and isinstance(batch1, Tensor) and isinstance(batch2, Tensor)):
8391        raise TypeError("For Baddbmm, inputs must be all tensors.")
8392    input_dtype = dtype_(input)
8393    if not (input_dtype == dtype_(batch1) and input_dtype == dtype_(batch2)):
8394        raise TypeError("For Baddbmm, the inputs should be the same dtype.")
8395    if input_dtype in (mstype.float16, mstype.float32, mstype.float64):
8396        if not (isinstance(alpha, (int, float)) and isinstance(beta, (int, float))):
8397            raise TypeError("For attributes alpha and beta should be real numbers.")
8398        check_is_number(alpha, (int, float))
8399        check_is_number(beta, (int, float))
8400    else:
8401        if not (isinstance(alpha, int) and isinstance(beta, int)):
8402            raise TypeError("For inputs of type not FloatTensor or DoubleTensor, "
8403                            "arguments beta and alpha must be integers.")
8404    y = beta * input + alpha * (bmmop(batch1, batch2))
8405    return y
8406
8407
8408def log2(input):
8409    r"""
8410    Returns a new Tensor by taking the base 2 logarithm of the elements in the input Tensor.
8411
8412    .. math::
8413        y_i = \log_2(input_i)
8414
8415    .. warning::
8416        If the input value of operator log2 is within the range (0, 0.01] or [0.95, 1.05], the output accuracy may
8417        be affected.
8418
8419    Args:
8420        input (Tensor): Input Tensor of any dimension. The value must be greater than 0.
8421
8422    Returns:
8423        Tensor, has the same shape and dtype as the `input`.
8424
8425    Raises:
8426        TypeError: If `input` is not a Tensor.
8427        TypeError: If dtype of `input` is not float16 or float32 or float64 on CPU and GPU, if dtype of `input`
8428            is not float16 or float32 on Ascend.
8429
8430    Supported Platforms:
8431        ``Ascend`` ``GPU`` ``CPU``
8432
8433    Examples:
8434        >>> import numpy as np
8435        >>> from mindspore import Tensor, ops
8436        >>> x = Tensor(np.array([2, 4, 8]).astype(np.float16))
8437        >>> output = ops.log2(x)
8438        >>> print(output)
8439        [1. 2. 3.]
8440    """
8441    x_dtype = dtype_(input)
8442    denominator = log_(_make_tensor(2, x_dtype))
8443    frac_log = log_(input)
8444    output = frac_log / denominator
8445    return output
8446
8447
8448def arrange(x):
8449    lists = []
8450    for i in range(0, x):
8451        lists.append(i)
8452    return lists
8453
8454
8455def rot90(input, k, dims):
8456    """
8457    Rotate a n-D tensor by 90 degrees in the plane specified by dims axis.
8458    Rotation direction is from the first towards the second axis if k > 0,
8459    and from the second towards the first for k < 0.
8460
8461    Args:
8462        input (Tensor): Input tensor.
8463        k (int): Number of times to rotate.
8464        dims (Union[list(int), tuple(int)]): Axis to rotate.
8465
8466    Returns:
8467        Tensor.
8468
8469    Raises:
8470        TypeError: If `input` is not a Tensor.
8471        TypeError: If `k` is not integer.
8472        TypeError: If `dims` is not tuple of integers or list of ints.
8473        ValueError: If the length of `dims` is not `2`.
8474        ValueError: If any dims is out of Tensor's range [-input.ndim, input.ndim).
8475        RuntimeError: If rotation dims are not different.
8476
8477    Supported Platforms:
8478        ``Ascend`` ``GPU`` ``CPU``
8479
8480    Examples:
8481        >>> import numpy as np
8482        >>> from mindspore import Tensor, ops
8483        >>> x = Tensor(np.array([[0, 1], [2, 3]])).astype(np.float32)
8484        >>> k = 1
8485        >>> dims = [0, 1]
8486        >>> output = ops.rot90(x, k, dims)
8487        >>> print(output)
8488        [[1. 3.]
8489        [0. 2.]]
8490    """
8491
8492    if not isinstance(input, (Tensor, Tensor_)):
8493        raise TypeError(f"For `rot90`, the `input` must be Tensor!, but get {type(input)}.")
8494    if not isinstance(k, int):
8495        raise TypeError(f"For `rot90`, the `k` must be int!, but get {type(k)}.")
8496    if not isinstance(dims, (list, tuple)):
8497        raise TypeError(f"For `rot90`, the `dims` must be list or tuple!, but get {type(dims)}.")
8498
8499    total_dims = input.ndim
8500    total_rot_dims = len(dims)
8501
8502    if total_rot_dims != 2:
8503        raise ValueError(f"For `rot90`, total rotation dims must be 2, but get {total_rot_dims}.")
8504    if dims[0] == dims[1] or (dims[0] - dims[1]) == total_dims or (dims[1] - dims[0]) == total_dims:
8505        raise RuntimeError(f"For `rot90`, rotation dims must be different, but get dim0={dims[0]}, dim1={dims[1]}.")
8506    if dims[0] >= total_dims or dims[0] < -total_dims:
8507        raise ValueError(f"For `rot90`, rotation dim0 is out of range, dim0={dims[0]}.")
8508    if dims[1] >= total_dims or dims[1] < -total_dims:
8509        raise ValueError(f"For `rot90`, rotation dim1 is out of range, dim1={dims[1]}.")
8510
8511    k = (4 + (k % 4)) % 4
8512
8513    if k == 0:
8514        out = input
8515        return out
8516    if k == 2:
8517        op1 = P.ReverseV2(axis=[dims[0]])
8518        output = op1(input)
8519        op2 = P.ReverseV2(axis=[dims[1]])
8520        out = op2(output)
8521        return out
8522
8523    axes_list = arrange(total_dims)
8524    (axes_list[dims[0]], axes_list[dims[1]]) = (axes_list[dims[1]],
8525                                                axes_list[dims[0]])
8526
8527    if k == 1:
8528        op = P.ReverseV2(axis=[dims[1]])
8529        output = op(input)
8530        out = output.transpose(axes_list)
8531    else:
8532        output = input.transpose(axes_list)
8533        op = P.ReverseV2(axis=[dims[1]])
8534        out = op(output)
8535    return out
8536
8537
8538def roll(input, shifts, dims=None):
8539    """
8540    Rolls the elements of a tensor along an axis.
8541
8542    Args:
8543        input (Tensor): Input tensor.
8544        shifts (Union[list(int), tuple(int), int]): Specifies the number of places by which elements are shifted
8545            positively (towards larger indices) along the specified dimension. Negative shifts will roll the elements
8546            in the opposite direction.
8547        dims (Union[list(int), tuple(int), int], optional): Specifies the dimension indexes of shape to be rolled.
8548            Default: ``None``. If dims is None, the Tensor will be flattened before rolling and then restored to the
8549            original shape.
8550
8551    Returns:
8552        Tensor, has the same shape and type as `input`.
8553
8554    Raises:
8555        TypeError: If `shifts` is not an int, a tuple or a list.
8556        TypeError: If `dims` is not an int, a tuple or a list.
8557
8558    Supported Platforms:
8559        ``GPU``
8560
8561    Examples:
8562        >>> import numpy as np
8563        >>> import mindspore as ms
8564        >>> from mindspore import ops
8565        >>> from mindspore import Tensor
8566        >>> input_x = Tensor(np.array([0, 1, 2, 3, 4]).astype(np.float32))
8567        >>> output = ops.roll(input_x, shifts=2, dims=0)
8568        >>> print(output)
8569        [3. 4. 0. 1. 2.]
8570    """
8571    _shape = input.shape
8572    if dims is None:
8573        flatten_x = input.reshape(-1)
8574        return Roll(shifts, 0)(flatten_x).reshape(_shape)
8575    return Roll(shifts, dims)(input)
8576
8577
8578def xdivy(x, y):
8579    """
8580    Divides the first input tensor by the second input tensor element-wise. Returns zero when `x` is zero.
8581
8582    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
8583    When the inputs are two tensors,
8584    dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
8585    If one of the inputs is scalar, the scalar could only be a constant.
8586
8587    .. note::
8588        When `x` and `y` are both of datatype complex, they should be both complex64 or complex128 at the same time.
8589
8590    Args:
8591        x (Union[Tensor, Number, bool]):  Tensor of datatype number.Number or bool, or it can be a bool or number.
8592        y (Union[Tensor, Number, bool]): Tensor of datatype number.Number or bool, or it can be a bool or number.
8593            `x` and `y` can not be both bool at the same time.
8594
8595    Returns:
8596        Tensor, the shape is the same as the one after broadcasting,
8597        and the data type is the one with higher precision or higher digits among the two inputs.
8598
8599    Raises:
8600        TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
8601        TypeError: If dtype of `x` and `y` is not in [float16, float32, float64, complex64, complex128, bool].
8602        ValueError: If `x` could not be broadcast to a tensor with shape of `y`.
8603        RuntimeError: If the data type of `x`, `y` conversion of Parameter is given
8604                      but data type conversion of Parameter is not supported.
8605
8606    Supported Platforms:
8607        ``Ascend`` ``GPU`` ``CPU``
8608
8609    Examples:
8610        >>> import mindspore
8611        >>> import numpy as np
8612        >>> from mindspore import Tensor, ops
8613        >>> x = Tensor(np.array([2, 4, -1]), mindspore.float32)
8614        >>> y = Tensor(np.array([2, 2, 2]), mindspore.float32)
8615        >>> output = ops.xdivy(x, y)
8616        >>> print(output)
8617        [ 1.   2.  -0.5]
8618    """
8619    return xdivy_(x, y)
8620
8621
8622def log10(input):
8623    r"""
8624    Returns a new Tensor by taking the base 10 logarithm of the elements in the input Tensor.
8625
8626    .. math::
8627        y_i = \log_{10}(input_i)
8628
8629    .. warning::
8630        If the input value of operator log10 is within the range (0, 0.01] or [0.95, 1.05], the output accuracy may
8631        be affected.
8632
8633    Args:
8634        input (Tensor): Input Tensor of any dimension. The each element in Tensor must be greater than 0.
8635
8636    Returns:
8637        Tensor, has the same shape and dtype as the `input`.
8638
8639    Raises:
8640        TypeError: If `input` is not a Tensor.
8641        TypeError: If dtype of `input` is not float16 or float32 or float64 on CPU and GPU, if dtype of `input`
8642            is not float16 or float32 on Ascend.
8643
8644    Supported Platforms:
8645        ``Ascend`` ``GPU`` ``CPU``
8646
8647    Examples:
8648        >>> import numpy as np
8649        >>> from mindspore import Tensor, ops
8650        >>> x = Tensor(np.array([2, 4, 10]).astype(np.float16))
8651        >>> output = ops.log10(x)
8652        >>> print(output)
8653        [0.301 0.602 1.   ]
8654    """
8655    x_dtype = dtype_(input)
8656    denominator = log_(_make_tensor(10, x_dtype))
8657    frac_log = log_(input)
8658    output = frac_log / denominator
8659    return output
8660
8661
8662def kron(input, other):
8663    """
8664    Computes the Kronecker product :math:`input ⊗ other`, denoted by ⊗, of `input` and `other`.
8665
8666    If `input` is a :math:`(a_{0}` input :math:`a_{1}` input ... input :math:`a_{n})` Tensor
8667    and `other` is a :math:`(b_{0}` input :math:`b_{1}` input ... input :math:`b_{n})` Tensor,
8668    the result will be a :math:`(a_{0}*b_{0}` input :math:`a_{1}*b_{1}` input ... input :math:`a_{n}*b_{n})`
8669    Tensor with the following entries:
8670
8671    .. math::
8672        (input ⊗ other)_{k_{0},k_{1},...k_{n}} =
8673        input_{i_{0},i_{1},...i_{n}} * other_{j_{0},j_{1},...j_{n}},
8674
8675    where :math:`k_{t} = i_{t} * b_{t} + j_{t}` for 0 ≤ `t` ≤ `n`. If one
8676    Tensor has fewer dimensions than the other it is unsqueezed
8677    until it has the same number of dimensions.
8678
8679    Note:
8680        Supports real-valued and complex-valued inputs.
8681
8682    Args:
8683        input (Tensor): Input Tensor, has the shape :math:`(r0, r1, ... , rN)`.
8684        other (Tensor): Input Tensor, has the shape :math:`(s0, s1, ... , sN)`.
8685
8686    Returns:
8687        Tensor, has the shape :math:`(r0 * s0, r1 * s1, ... , rN * sN)`.
8688
8689    Raises:
8690        TypeError: If `input` is not a Tensor.
8691        TypeError: If `other` is not a Tensor.
8692
8693    Supported Platforms:
8694        ``Ascend`` ``GPU`` ``CPU``
8695
8696    Examples:
8697        >>> import mindspore
8698        >>> import numpy as np
8699        >>> from mindspore import Tensor, nn
8700        >>> from mindspore import ops
8701        >>> input = Tensor(np.array([[0, 1, 2], [3, 4, 5]])).astype(np.float32)
8702        >>> other = Tensor(np.array([[-1, -2, -3], [-4, -6, -8]])).astype(np.float32)
8703        >>> output = ops.kron(input, other)
8704        >>> print(output)
8705        [[  0.   0.   0.  -1.  -2.  -3.  -2.  -4.  -6.]
8706         [  0.   0.   0.  -4.  -6.  -8.  -8. -12. -16.]
8707         [ -3.  -6.  -9.  -4.  -8. -12.  -5. -10. -15.]
8708         [-12. -18. -24. -16. -24. -32. -20. -30. -40.]]
8709    """
8710
8711    if not isinstance(input, (Tensor, Tensor_)):
8712        raise TypeError("the input must be Tensor!")
8713    if not isinstance(other, (Tensor, Tensor_)):
8714        raise TypeError("the other must be Tensor!")
8715    if input is None or other is None:
8716        return None
8717    if input.ndim == 0 or other.ndim == 0:
8718        return input * other
8719
8720    if input.ndim >= other.ndim:
8721        maxdim = input.ndim
8722    else:
8723        maxdim = other.ndim
8724    pad_x = maxdim - input.ndim
8725    pad_y = maxdim - other.ndim
8726    x_reshape = [0 for _ in range(2 * maxdim)]
8727    y_reshape = [0 for _ in range(2 * maxdim)]
8728    result_shape = [0 for _ in range(maxdim)]
8729
8730    for i in range(maxdim):
8731        if i >= pad_x:
8732            x_reshape[2 * i] = input.shape[i - pad_x]
8733        else:
8734            x_reshape[2 * i] = 1
8735        x_reshape[2 * i + 1] = 1
8736        y_reshape[2 * i] = 1
8737        if i >= pad_y:
8738            y_reshape[2 * i + 1] = other.shape[i - pad_y]
8739        else:
8740            y_reshape[2 * i + 1] = 1
8741        result_shape[i] = x_reshape[2 * i] * y_reshape[2 * i + 1]
8742
8743    input = input.reshape(x_reshape)
8744    other = other.reshape(y_reshape)
8745    result = (input * other).reshape(result_shape)
8746    return result
8747
8748
8749def _check_is_tensor(param_name, input, cls_name):
8750    """Returns True if input is Tensor."""
8751    if not isinstance(input, Tensor):
8752        raise TypeError(f"For {cls_name}, {param_name} must be a Tensor, but got {type(input)}.")
8753
8754
8755
8756def any(input, axis=None, keep_dims=False):
8757    r"""
8758    Reduces a dimension of `input` by the "logical OR" of all elements in the dimension, by default. And also can
8759    reduce a dimension of `input` along the `axis`. Determine whether the dimensions of the output and input are the
8760    same by controlling `keep_dims`.
8761
8762    Note:
8763        The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
8764
8765    Args:
8766        input (Tensor): Input Tensor, has the shape :math:`(N, *)` where :math:`*` means,
8767            any number of additional dimensions.
8768        axis (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce.
8769            Suppose the rank of `input` is r, `axis` must be in the range [-rank(input), rank(input)).
8770            Default: ``None`` , all dimensions are reduced.
8771        keep_dims (bool, optional): If ``True`` , keep these reduced dimensions and the length is 1.
8772            If ``False`` , don't keep these dimensions. Default : ``False`` .
8773
8774    Returns:
8775        Tensor, the dtype is bool.
8776
8777        - If `axis` is ``None`` , and `keep_dims` is ``False`` ,
8778          the output is a 0-D Tensor representing the "logical OR" of all elements in the input Tensor.
8779        - If `axis` is int, such as 2, and `keep_dims` is ``False`` ,
8780          the shape of output is :math:`(input_1, input_3, ..., input_R)`.
8781        - If `axis` is tuple(int), such as (2, 3), and `keep_dims` is ``False`` ,
8782          the shape of output is :math:`(input_1, input_4, ..., input_R)`.
8783        - If `axis` is 1-D Tensor, such as [2, 3], and `keep_dims` is ``False`` ,
8784          the shape of output is :math:`(input_1, input_4, ..., input_R)`.
8785
8786    Raises:
8787        TypeError: If `keep_dims` is not a bool.
8788        TypeError: If `input` is not a Tensor.
8789        TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
8790
8791    Supported Platforms:
8792        ``Ascend`` ``GPU`` ``CPU``
8793
8794    Examples:
8795        >>> import numpy as np
8796        >>> from mindspore import Tensor, ops
8797        >>> x = Tensor(np.array([[True, False], [True, True]]))
8798        >>> # case 1: Reduces a dimension by the "logical OR" of all elements in the dimension.
8799        >>> output = ops.any(x, keep_dims=True)
8800        >>> print(output)
8801        [[ True]]
8802        >>> print(output.shape)
8803        (1, 1)
8804        >>> # case 2: Reduces a dimension along axis 0.
8805        >>> output = ops.any(x, axis=0)
8806        >>> print(output)
8807        [ True True]
8808        >>> # case 3: Reduces a dimension along axis 1.
8809        >>> output = ops.any(x, axis=1)
8810        >>> print(output)
8811        [ True True]
8812    """
8813    if axis is None:
8814        axis = ()
8815    return _get_cache_prim(P.ReduceAny)(keep_dims)(input, axis)
8816
8817
8818def remainder(input, other):
8819    r"""
8820    Computes the remainder of dividing the first input tensor by the second input tensor element-wise.
8821
8822    Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
8823    The inputs must be two tensors or one tensor and one scalar. When the inputs are two tensors,
8824    both dtypes cannot be bool, and the shapes of them could be broadcast. When the inputs are one tensor
8825    and one scalar, the scalar could only be a constant.
8826
8827    .. math::
8828
8829        remainder(input, other) = input - input.div(other, rounding\_mode="floor") * other
8830
8831    .. warning::
8832        - When the elements of input exceed 2048, there might be accuracy problems.
8833        - The calculation results of this operator on Ascend and CPU might be inconsistent.
8834        - If shape is expressed as (D1,D2... ,Dn), then D1\*D2... \*DN<=1000000,n<=8.
8835
8836    Args:
8837        input (Union[Tensor, numbers.Number, bool]): The first input is a number, a bool
8838            or a tensor whose data type is number.
8839        other (Union[Tensor, numbers.Number, bool]): When the first input is a tensor, The second input
8840            could be a number, a bool or a tensor whose data type is number.
8841
8842    Returns:
8843        Tensor, the shape is the same as the one after broadcasting,
8844        and the data type is the one with higher precision.
8845
8846    Raises:
8847        TypeError: If neither `input` nor `other` is one of the following: Tensor, number, bool.
8848        ValueError: If the shape `input` and `other` cannot be broadcasted to each other.
8849
8850    Supported Platforms:
8851        ``Ascend`` ``GPU`` ``CPU``
8852
8853    Examples:
8854        >>> import numpy as np
8855        >>> from mindspore import Tensor, ops
8856        >>> x = Tensor(np.array([-4.0, 5.0, 6.0]).astype(np.float16))
8857        >>> y = Tensor(np.array([3.0, 2.0, 3.0]).astype(np.float16))
8858        >>> output = ops.remainder(x, y)
8859        >>> print(output)
8860        [2.  1.  0.]
8861    """
8862
8863    out = input - tensor_floordiv(input, other) * other
8864    return out
8865
8866
8867def accumulate_n(x):
8868    r"""
8869    Computes accumulation of all input tensors element-wise.
8870
8871    :func:`mindspore.ops.accumulate_n` is similar to :func:`mindspore.ops.addn`,
8872    but there is a significant difference between them: accumulate_n will not wait
8873    for all of its inputs to be ready before summing. That is to say, accumulate_n is able to save memory when inputs
8874    are ready at different time since the minimum temporary storage is proportional to the output size rather than the
8875    input size.
8876
8877    Args:
8878        x (Union(tuple[Tensor], list[Tensor])): The input tuple or list is made up of multiple tensors whose dtype is
8879            number to be added together. Each element of tuple or list should have the same shape.
8880
8881    Returns:
8882        Tensor, has the same shape and dtype as each entry of `x`.
8883
8884    Raises:
8885        TypeError: If `x` is neither tuple nor list.
8886        ValueError: If there is an input element with a different shape.
8887
8888    Supported Platforms:
8889        ``Ascend`` ``GPU``
8890
8891    Examples:
8892        >>> import mindspore
8893        >>> import numpy as np
8894        >>> from mindspore import Tensor, ops
8895        >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
8896        >>> y = Tensor(np.array([4, 5, 6]), mindspore.float32)
8897        >>> output = ops.accumulate_n([x, y, x, y])
8898        >>> print(output)
8899        [10. 14. 18.]
8900    """
8901    return accumulate_(x)
8902
8903
8904def iou(anchor_boxes, gt_boxes, mode='iou'):
8905    r"""
8906    Calculates intersection over union for boxes.
8907
8908    Computes the intersection over union (IOU) or the intersection over foreground (IOF) based on the ground-truth and
8909    predicted regions.
8910
8911    .. math::
8912        \text{IOU} = \frac{\text{Area of Overlap}}{\text{Area of Union}}
8913
8914        \text{IOF} = \frac{\text{Area of Overlap}}{\text{Area of Ground Truth}}
8915
8916    .. warning::
8917        In Ascend, only computation of float16 data is supported. To avoid overflow, the input length
8918        and width are scaled by 0.2 internally.
8919
8920    Args:
8921        anchor_boxes (Tensor): Anchor boxes, tensor of shape :math:`(N, 4)` . :math:`N` indicates the number of
8922            anchor boxes, and the value :math:`4` refers to four boundary coordinates of the predicted area
8923            "x0", "y0", "x1", and "y1". Data type must be either float16, float32 or float64.
8924        gt_boxes (Tensor): Ground truth boxes, tensor of shape :math:`(M, 4)` . :math:`M` indicates the number
8925            of ground truth boxes, and the value :math:`4` refers to four boundary coordinates of the truth
8926            area "x0", "y0", "x1", and "y1". Data type must be either float16, float32 or float64.
8927        mode (string): The mode is used to specify the calculation method,
8928            now supporting 'iou' (intersection over union) or 'iof' (intersection over foreground) mode.
8929            Default: ``'iou'`` .
8930
8931    Returns:
8932        Tensor, the IOU/IOF values, tensor of shape :math:`(M, N)` , with the same data type as `anchor_boxes`.
8933
8934    Raises:
8935        KeyError: When `mode` is not ``'iou'`` or ``'iof'``.
8936
8937    Supported Platforms:
8938        ``Ascend`` ``GPU`` ``CPU``
8939
8940    Examples:
8941        >>> import mindspore
8942        >>> import numpy as np
8943        >>> from mindspore import Tensor, ops
8944        >>> anchor_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]), mindspore.float16)
8945        >>> gt_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]), mindspore.float16)
8946        >>> mode = 'iou'
8947        >>> output = ops.iou(anchor_boxes, gt_boxes, mode)
8948        >>> print(output.shape)
8949        (3, 3)
8950    """
8951    return _get_cache_prim(P.IOU)(mode)(anchor_boxes, gt_boxes)
8952
8953
8954def _check_is_float(dtype):
8955    return dtype in (mstype.float16, mstype.float32)
8956
8957
8958def _list_comprehensions(obj, item):
8959    return tuple([item for _ in range(obj)])
8960
8961
8962def _tuple_setitem(tup, idx, value):
8963    tup = list(tup)
8964    tup[idx] = value
8965    return tuple(tup)
8966
8967
8968@_primexpr
8969def _check_dim_in_range(dim, ndim):
8970    def _check(dim, ndim):
8971        if not isinstance(dim, int):
8972            raise TypeError(f'axes should be integers, not {type(dim)}')
8973        if -ndim > dim or dim >= ndim:
8974            raise ValueError(f'dim {dim} is out of bounds for array of dimension {ndim}')
8975
8976    _check(dim, ndim)
8977    return dim % ndim
8978
8979
8980def dotrapezoid(y, dx, dim):
8981    y_left = _select(y, dim, 0)
8982    y_right = _select(y, dim, -1)
8983    y_sum = y.sum(dim)
8984    return (y_sum - (y_left + y_right) * 0.5) * dx
8985
8986
8987def dotrapezoid_tensor(y, dx, dim):
8988    y_start_dim_left = [0 for _ in range(dim)]
8989    y_start_dim_left = tuple(y_start_dim_left)
8990    y_start_dim_right = [0 for _ in range(y.ndim - dim - 1)]
8991    y_start_dim_right = tuple(y_start_dim_right)
8992    y_slice_size = _tuple_setitem(shape_(y), dim, shape_(y)[dim] - 1)
8993    y_slice_left = slice_(y, y_start_dim_left + (0,) + y_start_dim_right, y_slice_size)
8994    y_slice_right = slice_(y, y_start_dim_left + (1,) + y_start_dim_right, y_slice_size)
8995    return (tensor_add(y_slice_left, y_slice_right) * dx).sum(dim) / 2.
8996
8997
8998def add_padding_to_shape(curr_shape, target_n_dim):
8999    curr_size = len(curr_shape)
9000    if curr_size >= target_n_dim:
9001        target_n_dim = curr_size
9002    new_shape = [1 for _ in range(target_n_dim)]
9003    for i in range(curr_size):
9004        new_shape[target_n_dim - i - 1] = curr_shape[curr_size - i - 1]
9005    return new_shape
9006
9007
9008def zeros_like_except(y, dim):
9009    _check_dim_in_range(dim, y.ndim)
9010    dim = dim + y.ndim if dim < 0 else dim
9011    sizes = y.shape[:dim] + y.shape[dim + 1:]
9012    zeros = F.zeros(sizes, y.dtype)
9013    return zeros
9014
9015
9016def trapezoid_tensor(y, x, dim):
9017    r"""
9018    add trapezoid implementation when x is not None.
9019    """
9020    if y.shape[dim] == 0:
9021        return zeros_like_except(y, dim)
9022    if x.ndim < y.ndim and x.ndim != 1:
9023        x_start_dim_left = [0 for _ in range(dim)]
9024        x_start_dim_left = tuple(x_start_dim_left)
9025        x_start_dim_right = [0 for _ in range(x.ndim - dim - 1)]
9026        x_start_dim_right = tuple(x_start_dim_right)
9027        x_slice_size = _tuple_setitem(x.shape, dim, x.shape[dim] - 1)
9028        x_left = slice_(x, x_start_dim_left + (0,) + x_start_dim_right, x_slice_size)
9029        x_right = slice_(x, x_start_dim_left + (1,) + x_start_dim_right, x_slice_size)
9030        dx = x_right - x_left
9031        new_sizes = add_padding_to_shape(dx.shape, y.ndim)
9032        dx = dx.view(tuple(new_sizes))
9033        return dotrapezoid_tensor(y, dx, dim)
9034    if x.ndim == 1:
9035        if x.shape[0] != y.shape[dim]:
9036            raise RuntimeError("There must be one `x` value for each sample point")
9037        new_sizes = [1 for _ in range(y.ndim)]
9038        new_sizes[dim] = x.shape[0]
9039        x_viewed = x.view(tuple(new_sizes))
9040    else:
9041        x_viewed = x
9042    x_start_dim_left = [0 for _ in range(dim)]
9043    x_start_dim_left = tuple(x_start_dim_left)
9044    x_start_dim_right = [0 for _ in range(x_viewed.ndim - dim - 1)]
9045    x_start_dim_right = tuple(x_start_dim_right)
9046    x_slice_size = _tuple_setitem(x_viewed.shape, dim, x_viewed.shape[dim] - 1)
9047    x_left = slice_(x_viewed, x_start_dim_left + (0,) + x_start_dim_right, x_slice_size)
9048    x_right = slice_(x_viewed, x_start_dim_left + (1,) + x_start_dim_right, x_slice_size)
9049    dx = x_right - x_left
9050    return dotrapezoid_tensor(y, dx, dim)
9051
9052
9053def trapezoid(y, dx, dim):
9054    if y.shape[dim] == 0:
9055        return zeros_like_except(y, dim)
9056    return dotrapezoid(y, dx, dim)
9057
9058
9059def get(ts, depth, dim, index, r):
9060    if depth == dim:
9061        r.append(ts[index])
9062        return 0
9063    for item in ts:
9064        return get(item, depth + 1, dim, index, r)
9065
9066
9067def _select(feat, dim, index):
9068    select_shape = feat.shape
9069    select_shape = list(select_shape)
9070    select_shape[dim] = 1
9071    new_shape = feat.shape[:dim] + feat.shape[dim + 1:]
9072    indexes = ones_(tuple(select_shape), mstype.int32) * (index)
9073    return feat.gather_elements(dim, indexes).reshape(new_shape)
9074
9075
9076def trapz(y, x=None, *, dx=1.0, dim=-1):
9077    r"""
9078    Integrates `y(x)` along given dim using trapezoidal rule.
9079    By default x-dim distances between points will be 1.0,
9080    alternatively they can be provided with `x` array or with `dx` scalar.
9081
9082    .. math::
9083
9084        \mathop{ \int }\nolimits_{{}}^{{}}{y}{ \left( {x} \right) } \text{d} x
9085
9086    Args:
9087        y (Tensor): Input tensor to integrate.
9088        x (Tensor, optional): The sample points corresponding to the `y` values. If `x` is None,
9089            the sample points are assumed to be evenly spaced `dx` apart. Default: ``None`` .
9090            If `x` is not None, after subtracting 1 from the axis specified by `dim`, the shape of `x`
9091            should be same as `y` or can broadcast to `y`.
9092
9093    Keyword Args:
9094        dx (float, optional): The spacing between sample points when `x` is None. If `x` is specified,
9095            `dx` does not take effect. Default: ``1.0`` .
9096        dim (int, optional): The dim along which to integrate. Default: ``-1`` .
9097
9098    Returns:
9099        Tensor of float, definite integral as approximated by trapezoidal rule.
9100        If `y` is a one-dimensional array, the result is a floating-point number. If `y` is
9101        an n-dimensional array, the result is an N-1 dimensional array.
9102
9103    Raises:
9104        RuntimeError: If dim of `x` is 1, and x.shape[0] is not equal to y.shape[dim].
9105        ValueError: If `dim` is out of range of :math:`[-y.ndim, y.ndim)`.
9106        TypeError: If `y` is not a Tensor.
9107        TypeError: If `x` is not None and is not a Tensor.
9108        TypeError: If `dx` is not a float number.
9109        TypeError: If `dim` is not a Integer.
9110
9111    Supported Platforms:
9112        ``Ascend`` ``GPU`` ``CPU``
9113
9114    Examples:
9115        >>> import numpy as np
9116        >>> from mindspore import Tensor, ops
9117        >>> y = Tensor(np.array([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]).astype(np.float32))
9118        >>> x = Tensor(np.array([[1, 2, 3], [1, 3, 5], [1, 4, 7]]).astype(np.float32))
9119        >>> output = ops.trapz(y, x)
9120        >>> print(output)
9121        [2. 4. 6.]
9122    """
9123
9124    if not isinstance(y, (Tensor, Tensor_)):
9125        raise TypeError(f"For `trapz`, the input `y` must be Tensor, but get {type(y)}.")
9126    if not isinstance(dx, float):
9127        raise TypeError(f"For `trapz`, the input `dx` must be float, but get f{type(dx)}.")
9128    if not isinstance(dim, int):
9129        raise TypeError(f"For `trapz`, the input `dim` must be int, but get {type(dim)}.")
9130    if not _check_is_float(y.dtype):
9131        y = cast_(y, mstype.float32)
9132    _check_dim_in_range(dim, y.ndim)
9133    dim = dim + y.ndim if dim < 0 else dim
9134    if x is None:
9135        return trapezoid(y, dx, dim)
9136    if not isinstance(x, (Tensor, Tensor_)):
9137        raise TypeError(f"For `trapz`, the input `x` must be Tensor, but get {type(x)}.")
9138    x = cast_(x, mstype.float32)
9139    return trapezoid_tensor(y, x, dim)
9140
9141
9142def cholesky(input_x, upper=False):
9143    r"""
9144    Returns the Cholesky decomposition of zero or more batch dimensions consisting of symmetric positive-definite
9145    matrices.
9146
9147    If `upper` is `True`, returns an upper-triangular matrix, :math:`U`, and the decomposition has the form:
9148
9149    .. math::
9150        A = U^TU
9151
9152    If `upper` is `False`, returns a lower-triangular matrix, :math:`L`, and the decomposition has the form:
9153
9154    .. math::
9155        A = LL^T
9156
9157    where `A` is the symmetric positive-definite matrix.
9158
9159    Args:
9160        input_x (Tensor): Tensor of shape :math:`(*, N, N)`, where :math:`*` is zero or more batch dimensions
9161            consisting of symmetric positive-definite matrices, with float32 or float64 data type.
9162        upper (bool): If `upper` is `True`, returns an upper-triangular matrix. If `upper` is `False`, returns
9163            a lower-triangular matrix. Default: ``False`` .
9164
9165    Returns:
9166        Tensor, has the same shape and data type as `input_x`.
9167
9168    Raises:
9169        TypeError: If `upper` is not a bool.
9170        TypeError: If dtype of `input_x` is not one of: float64, float32.
9171        TypeError: If `input_x` is not a Tensor.
9172        ValueError: If `input_x` is not a or a batch of square matrix.
9173        ValueError: If `input_x` is not symmetric positive definite.
9174
9175    Supported Platforms:
9176        ``GPU`` ``CPU``
9177
9178    Examples:
9179        >>> import mindspore
9180        >>> import numpy as np
9181        >>> from mindspore import Tensor, ops
9182        >>> input_x = Tensor(np.array([[1.0, 1.0], [1.0, 2.0]]), mindspore.float32)
9183        >>> output = ops.cholesky(input_x, upper=False)
9184        >>> print(output)
9185        [[1. 0.]
9186         [1. 1.]]
9187    """
9188    cholesky_op = _get_cache_prim(P.Cholesky)(upper=upper)
9189    return cholesky_op(input_x)
9190
9191
9192def cholesky_inverse(input_x, upper=False):
9193    r"""
9194    Returns the inverse of the positive definite matrix using cholesky matrix factorization by its Cholesky factor.
9195
9196    If `upper` is `True`, :math:`U` is an upper triangular such that the output tensor is
9197
9198    .. math::
9199
9200        inv = (U^{T}U)^{-1}
9201
9202    If `upper` is `False`, :math:`U` is a lower triangular such that the output tensor is
9203
9204    .. math::
9205
9206        inv = (UU^{T})^{-1}
9207
9208    Note:
9209        The input must be either an upper-triangular matrix or a lower-triangular matrix from Cholesky decomposition.
9210
9211    Args:
9212        input_x (Tensor): The input tensor with a rank of 2. Supported dtypes: float32, float64.
9213        upper (bool): If `upper` is `True`, return an upper triangular matrix. If `upper` is `False`, return
9214            a lower-triangular matrix. Default: ``False``.
9215
9216    Returns:
9217        Tensor, has the same shape and dtype as `input_x`.
9218
9219    Raises:
9220        TypeError: If `input_x` is not a Tensor.
9221        TypeError: If dtype of `input_x` is not one of: float32, float64.
9222        ValueError: If the dimension of `input_x` is not equal to 2.
9223
9224    Supported Platforms:
9225        ``Ascend`` ``CPU``
9226
9227    Examples:
9228        >>> import mindspore
9229        >>> import numpy as np
9230        >>> from mindspore import Tensor, ops
9231        >>> input_x = Tensor(np.array([[2,0,0], [4,1,0], [-1,1,2]]), mindspore.float32)
9232        >>> output = ops.cholesky_inverse(input_x)
9233        >>> print(output)
9234        [[ 5.8125 -2.625   0.625 ]
9235         [-2.625   1.25   -0.25  ]
9236         [ 0.625  -0.25    0.25  ]]
9237    """
9238    cholesky_inv_op = _get_cache_prim(P.CholeskyInverse)(upper=upper)
9239    return cholesky_inv_op(input_x)
9240
9241
9242def cholesky_solve(input, input2, upper=False):
9243    r"""
9244    Computes the solution of a set of linear equations with a positive definite matrix,
9245    according to its Cholesky decomposition factor `input2` .
9246
9247    If `upper` is set to ``True`` and `input2` is upper triangular, the output tensor is that:
9248
9249    .. math::
9250        output = (input2^{T} * input2)^{{-1}}input
9251
9252    If `upper` is set to ``False`` and `input2` is lower triangular, the output is that:
9253
9254    .. math::
9255        output = (input2 * input2^{T})^{{-1}}input
9256
9257    .. warning::
9258        This is an experimental API that is subject to change or deletion.
9259
9260    Args:
9261        input (Tensor): Tensor of shape :math:`(*, N, M)`, indicating 2D or 3D matrices,
9262            with float32 or float64 data type.
9263        input2 (Tensor): Tensor of shape :math:`(*, N, N)`, indicating 2D or 3D square matrices composed of
9264            upper or lower triangular Cholesky factor, with float32 or float64 data type.
9265            `input` and `input2` must have the same type.
9266        upper (bool, optional): A flag indicates whether to treat the Cholesky factor
9267            as an upper or a lower triangular matrix. Default: ``False``, treating the Cholesky factor
9268            as a lower triangular matrix.
9269
9270    Returns:
9271        Tensor, has the same shape and data type as `input`.
9272
9273    Raises:
9274        TypeError: If `upper` is not a bool.
9275        TypeError: If dtype of `input` and `input2` is not float64 or float32.
9276        TypeError: If `input` is not a Tensor.
9277        TypeError: If `input2` is not a Tensor.
9278        ValueError: If `input` and `input2` have different batch size.
9279        ValueError: If `input` and `input2` have different row numbers.
9280        ValueError: If `input` is not 2D or 3D matrices.
9281        ValueError: If `input2` is not 2D or 3D square matrices.
9282
9283    Supported Platforms:
9284        ``Ascend`` ``GPU`` ``CPU``
9285
9286    Examples:
9287        >>> import mindspore
9288        >>> import numpy as np
9289        >>> from mindspore import Tensor, ops
9290        >>> input1 = Tensor(np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]), mindspore.float32)
9291        >>> input2 = Tensor(np.array([[2, 0, 0], [4, 1, 0], [-1, 1, 2]]), mindspore.float32)
9292        >>> out = ops.cholesky_solve(input1, input2, upper=False)
9293        >>> print(out)
9294        [[ 5.8125 -2.625   0.625 ]
9295         [-2.625   1.25   -0.25  ]
9296         [ 0.625  -0.25    0.25  ]]
9297    """
9298    return _get_cache_prim(P.CholeskySolve)(upper)(input, input2)
9299
9300
9301def cross(input, other, dim=None):
9302    r"""
9303    Computes the cross product of `input` and `other` in dimension `dim`.
9304    `input` and `other` must have the same shape, and the size of their `dim` dimension should be `3`.
9305    If `dim` is not specified, it is set to be the first dimension found with the size `3`.
9306
9307    Args:
9308        input (Tensor): input is a tensor.
9309        other (Tensor):  The other Tensor, `other` must have the same shape and type as input `input`, and
9310            the size of their `dim` dimension should be `3`.
9311        dim (int, optional): dimension to apply cross product in. if `dim` is None, it is set to be the first dimension
9312            found with the size `3`. Default: ``None``.
9313
9314    Returns:
9315        Tensor, has the same shape and type as input `input`.
9316
9317    Raises:
9318        TypeError: If `input` is not a Tensor.
9319        TypeError: If `other` is not a Tensor.
9320        TypeError: If the type of `input` is not the same as that of `other`.
9321        ValueError: If `input` and `other` not have the same size, and the size of their `dim` dimension not be `3`.
9322        ValueError: If `input` and `other` not have the same shape.
9323        ValueError: If `dim` is out of range, `dim` should be [-len(input.shape), len(input.shape)-1].
9324
9325    Supported Platforms:
9326        ``Ascend`` ``CPU``
9327
9328    Examples:
9329        >>> from mindspore import Tensor, ops
9330        >>> # case 1: dim=None.
9331        >>> x = Tensor([[1, 2, 3], [1, 2, 3]])
9332        >>> other = Tensor([[4, 5, 6], [4, 5, 6]])
9333        >>> output = ops.cross(x, other)
9334        >>> print(output)
9335        [[-3  6 -3]
9336         [-3  6 -3]]
9337        >>> # case 2: dim=1.
9338        >>> x = Tensor([[1, 2, 3], [1, 2, 3]])
9339        >>> other = Tensor([[4, 5, 6], [4, 5, 6]])
9340        >>> output = ops.cross(x, other, dim=1)
9341        >>> print(output)
9342        [[-3  6 -3]
9343         [-3  6 -3]]
9344    """
9345    if dim is None:
9346        dim = -65530
9347    cross_op = _get_cache_prim(P.Cross)(dim=dim)
9348    return cross_op(input, other)
9349
9350
9351def _einsum_convert_num_to_char(num):
9352    """For einsum, convert number into char."""
9353    if [num] == [Ellipsis]:
9354        return '...'
9355    # pylint: disable=chained-comparison
9356    if num >= 0 and num < 26:
9357        return chr(num + ord('A'))
9358    # pylint: disable=chained-comparison
9359    if num >= 26 and num < 52:
9360        return chr(num - 26 + ord('a'))
9361    raise ValueError(f"For Einsum, the number in sublist should be in range [0, 52), but got {num}")
9362
9363
9364def einsum(equation, *operands):
9365    r"""
9366    According to the Einstein summation Convention (Einsum),
9367    the product of the input tensor elements is summed along the specified dimension.
9368    You can use this operator to perform diagonal, reducesum, transpose, matmul, mul, inner product operations, etc.
9369
9370    Note:
9371        The sublist format is also supported. For example, ops.einsum(op1, sublist1, op2, sublist2, ..., sublist_out).
9372        In this format, equation can be derived by the sublists which are made up of Python's Ellipsis and list of
9373        integers in [0, 52). Each operand is followed by a sublist and an output sublist is at the end.
9374
9375    Args:
9376        equation (str): Notation based on the Einstein summation convention, represent the operation you want to do.
9377            the value can contain only letters, commas, ellipsis and arrow.
9378            The letters represent input tensor dimension, commas represent separate tensors, ellipsis indicates
9379            the tensor dimension that you do not care about, the left of the arrow indicates the input tensors,
9380            and the right of it indicates the desired output dimension.
9381        operands (Tensor): Input tensor used for calculation. The dtype of the tensor must be the same.
9382
9383    Returns:
9384        Tensor, the shape of it can be obtained from the `equation` , and the dtype is the same as input tensors.
9385
9386    Raises:
9387        TypeError: If `equation` is invalid, or the `equation` does not match the input tensor.
9388        ValueError: If the number in sublist is not in [0, 52) in sublist format.
9389
9390    Supported Platforms:
9391        ``GPU``
9392
9393    Examples:
9394        >>> import mindspore
9395        >>> import numpy as np
9396        >>> from mindspore import Tensor, ops
9397        >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
9398        >>> equation = "i->"
9399        >>> output = ops.einsum(equation, x)
9400        >>> print(output)
9401        [7.]
9402        >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
9403        >>> y = Tensor(np.array([2.0, 4.0, 3.0]), mindspore.float32)
9404        >>> equation = "i,i->i"
9405        >>> output = ops.einsum(equation, x, y)
9406        >>> print(output)
9407        [ 2. 8. 12.]
9408        >>> x = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32)
9409        >>> y = Tensor(np.array([[2.0, 3.0], [1.0, 2.0], [4.0, 5.0]]), mindspore.float32)
9410        >>> equation = "ij,jk->ik"
9411        >>> output = ops.einsum(equation, x, y)
9412        >>> print(output)
9413        [[16. 22.]
9414         [37. 52.]]
9415        >>> x = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32)
9416        >>> equation = "ij->ji"
9417        >>> output = ops.einsum(equation, x)
9418        >>> print(output)
9419        [[1. 4.]
9420         [2. 5.]
9421         [3. 6.]]
9422        >>> x = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32)
9423        >>> equation = "ij->j"
9424        >>> output = ops.einsum(equation, x)
9425        >>> print(output)
9426        [5. 7. 9.]
9427        >>> x = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32)
9428        >>> equation = "...->"
9429        >>> output = ops.einsum(equation, x)
9430        >>> print(output)
9431        [21.]
9432        >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
9433        >>> y = Tensor(np.array([2.0, 4.0, 1.0]), mindspore.float32)
9434        >>> equation = "j,i->ji"
9435        >>> output = ops.einsum(equation, x, y)
9436        >>> print(output)
9437        [[ 2. 4. 1.]
9438         [ 4. 8. 2.]
9439         [ 6. 12. 3.]]
9440        >>> x = mindspore.Tensor([1, 2, 3, 4], mindspore.float32)
9441        >>> y = mindspore.Tensor([1, 2], mindspore.float32)
9442        >>> output = ops.einsum(x, [..., 1], y, [..., 2], [..., 1, 2])
9443        >>> print(output)
9444        [[1. 2.]
9445         [2. 4.]
9446         [3. 6.]
9447         [4. 8.]]
9448    """
9449    if isinstance(equation, Tensor):
9450        equ_tmp = ''
9451        for i, lst in enumerate(operands):
9452            if i % 2 == 0:
9453                for _, num in enumerate(lst):
9454                    equ_tmp += _einsum_convert_num_to_char(num)
9455                if i in (len(operands) - 1, len(operands) - 2):
9456                    continue
9457                equ_tmp += ','
9458        if len(operands) % 2 == 0:
9459            equ_tmp += '->'
9460            for _, num in enumerate(operands[-1]):
9461                equ_tmp += _einsum_convert_num_to_char(num)
9462            operands_tmp = list([equation]) + list(operands[1:-1:2])
9463        else:
9464            operands_tmp = list([equation]) + list(operands[1::2])
9465        equation = equ_tmp
9466        operands = tuple(operands_tmp)
9467    return _get_cache_prim(P.Einsum)(equation)(operands)
9468
9469
9470def cumprod(input, dim, dtype=None):
9471    r"""
9472    Computes the cumulative product of the `input` tensor along dimension `dim`.
9473    For example, if `input` is a vector of size `N`, the result will also be a vector of size `N`, with elements.
9474
9475    .. math::
9476
9477        y_i = x_1 * x_2 * x_3 * ... * x_i
9478
9479    Args:
9480        input (Tensor[Number]): The input tensor.
9481            :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
9482        dim (int): The dimensions to compute the cumulative product. Only constant value is allowed.
9483        dtype (:class:`mindspore.dtype`, optional): The desired data type of output.
9484            If not specified, remains the same as the original Tensor. Default: ``None`` .
9485
9486    Returns:
9487        Tensor, has the same shape and dtype as the `input` unless `dtype` is specified.
9488
9489    Raises:
9490        TypeError: If `dim` is not an int.
9491        TypeError: If `dtype` conversion is not acceptable.
9492        ValueError: If `dim` is None.
9493
9494    Supported Platforms:
9495        ``Ascend`` ``GPU`` ``CPU``
9496
9497    Examples:
9498        >>> import numpy as np
9499        >>> from mindspore import Tensor, ops
9500        >>> x = Tensor(np.array([1, 2, 3], np.float32))
9501        >>> output = ops.cumprod(x, 0)
9502        >>> print(output)
9503        [1. 2. 6.]
9504    """
9505    output = cumprod_(input, dim)
9506    if dtype:
9507        output = cast_(output, dtype)
9508    return output
9509
9510
9511def igamma(input, other):
9512    r"""
9513    Calculates lower regularized incomplete Gamma function.
9514
9515    If we define `input` as `a` and `other` as `x`, the lower regularized incomplete Gamma function is defined as:
9516
9517    .. math::
9518        P(a, x) = Gamma(a, x) / Gamma(a) = 1 - Q(a, x)
9519
9520    where
9521
9522    .. math::
9523        Gamma(a, x) = \int_0^x t^{a-1} \exp^{-t} dt
9524
9525    is the lower incomplete Gamma function.
9526
9527    Above :math:`Q(a, x)` is the upper regularized complete Gamma function.
9528
9529    .. warning::
9530        This is an experimental API that is subject to change or deletion.
9531
9532    Args:
9533        input (Tensor): The first input tensor. With type of float32 or float64.
9534        other (Tensor): The second input tensor. With float32 or float64 type. `other` should have
9535          the same dtype with `input`.
9536
9537    Returns:
9538        Tensor, has the same dtype as `input` and `other`.
9539
9540    Raises:
9541        TypeError: If `input` or `other` is not a Tensor.
9542        TypeError: If dtype of input `other` and a is not float32 nor float64.
9543        TypeError: If `other` has different dtype with `input`.
9544        ValueError: If `input` could not be broadcast to a tensor with shape of `other`.
9545
9546    Supported Platforms:
9547        ``Ascend`` ``GPU`` ``CPU``
9548
9549    Examples:
9550        >>> import numpy as np
9551        >>> from mindspore import Tensor, ops
9552        >>> a = Tensor(np.array([2.0, 4.0, 6.0, 8.0]).astype(np.float32))
9553        >>> x = Tensor(np.array([2.0, 3.0, 4.0, 5.0]).astype(np.float32))
9554        >>> output = ops.igamma(a, x)
9555        >>> print(output)
9556        [0.593994 0.35276785 0.21486944 0.13337152]
9557    """
9558    return igamma_(input, other)
9559
9560
9561def igammac(input, other):
9562    r"""
9563    Calculates upper regularized incomplete Gamma function.
9564
9565    If we define `input` as `a` and `other` as `x`, the upper regularized incomplete Gamma function is defined as:
9566
9567    .. math::
9568        Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)
9569
9570    where
9571
9572    .. math::
9573        Gamma(a, x) = \int_{x}^{\infty} t^{a-1} exp(-t) dt
9574
9575    is the upper incomplete Gama function.
9576
9577    Above :math:`P(a, x)` is the lower regularized complete Gamma function.
9578
9579    .. warning::
9580        This is an experimental API that is subject to change or deletion.
9581
9582    Args:
9583        input (Tensor): The first input tensor. With type of float32 or float64.
9584        other (Tensor): The second input tensor. With float32 or float64 type. `other` should have
9585            the same dtype with `input`.
9586
9587    Returns:
9588        Tensor, has the same dtype as `input` and `other`.
9589
9590    Raises:
9591        TypeError: If `input` or `other` is not a Tensor.
9592        TypeError: If dtype of input `other` and a is not float32 nor float64.
9593        TypeError: If `other` has different dtype with `input`.
9594        ValueError: If `input` could not be broadcast to a tensor with shape of `other`.
9595
9596    Supported Platforms:
9597        ``Ascend`` ``GPU`` ``CPU``
9598
9599    Examples:
9600        >>> import numpy as np
9601        >>> from mindspore import Tensor, ops
9602        >>> a = Tensor(np.array([2.0, 4.0, 6.0, 8.0]).astype(np.float32))
9603        >>> x = Tensor(np.array([2.0, 3.0, 4.0, 5.0]).astype(np.float32))
9604        >>> output = ops.igammac(a, x)
9605        >>> print (output)
9606        [0.40600586 0.6472318 0.7851304 0.8666283]
9607    """
9608    return igammac_(input, other)
9609
9610
9611def lgamma(input):
9612    r"""
9613    Computes the natural logarithm of the absolute value of the gamma function on input.
9614
9615    .. math::
9616        \text{out}_{i} = \ln \Gamma(|\text{input}_{i}|)
9617
9618    Args:
9619        input (Tensor): The input tensor. With type of float16 or float32 or float64.
9620
9621    Returns:
9622        Tensor, has the same dtype as `input`.
9623
9624    Raises:
9625        TypeError: If `input` is not a Tensor.
9626        TypeError: If dtype of `input` is not float16 or float32 or float64.
9627
9628    Supported Platforms:
9629        ``GPU`` ``CPU``
9630
9631    Examples:
9632        >>> import mindspore
9633        >>> import numpy as np
9634        >>> from mindspore import Tensor, ops
9635        >>> x = Tensor(np.array([0.5, 3.2, 8.5]), mindspore.float32)
9636        >>> output = ops.lgamma(x)
9637        >>> print(output)
9638        [0.5723649 0.8854049 9.549267 ]
9639        >>> x = Tensor(2.1, mindspore.float32)
9640        >>> output = ops.lgamma(x)
9641        >>> print(output)
9642        0.045437694
9643    """
9644    return lgamma_(input)
9645
9646
9647def digamma(input):
9648    r"""
9649    Computes the derivative of the lgamma function on input.
9650
9651    .. math::
9652        P(x) = \frac{d}{dx}(\ln (\Gamma(x)))
9653
9654    .. warning::
9655        This is an experimental API that is subject to change or deletion.
9656
9657    Args:
9658        input (Tensor): The input tensor. With type of float16 or float32 or float64.
9659
9660    Returns:
9661        Tensor, has the same dtype as `input`.
9662
9663    Raises:
9664        TypeError: If `input` is not a Tensor.
9665        TypeError: If dtype of `input` is not float16 or float32 or float64.
9666
9667    Supported Platforms:
9668        ``GPU`` ``CPU``
9669
9670    Examples:
9671        >>> import numpy as np
9672        >>> from mindspore import Tensor, ops
9673        >>> x = Tensor(np.array([1.5, 0.5, 9]).astype(np.float16))
9674        >>> output = ops.digamma(x)
9675        >>> print(output)
9676        [ 0.0365 -1.964   2.14  ]
9677    """
9678    return digamma_(input)
9679
9680
9681def polygamma(n, input):
9682    r"""
9683    Computes the :math:`n`-th derivative of the polygamma function on `input`.
9684
9685    .. math::
9686        \psi^{(a)}(x) = \frac{d^{(a)}}{dx^{(a)}} \psi(x)
9687
9688    where :math:`\psi(x)` is the digamma function.
9689
9690    Args:
9691        n (Tensor): The order of the polygamma function.
9692            Supported dtypes: int32, int64. The shape of `n` is :math:`()`.
9693        input (Tensor): The tensor to compute the :math:`n`-th derivative of the polygamma function with.
9694
9695    Returns:
9696        Tensor, has the same dtype as `input`.
9697
9698    Raises:
9699        TypeError: If `input` is not a Tensor.
9700        TypeError: If dtype of `input` is not one of: float16, float32, float64.
9701        TypeError: If dtype of `n` is not one of: int32, int64.
9702        TypeError: If shape of `n` is not :math:`()`.
9703
9704    Supported Platforms:
9705        ``GPU`` ``CPU``
9706
9707    Examples:
9708        >>> import mindspore
9709        >>> import numpy as np
9710        >>> from mindspore import Tensor, ops
9711        >>> x = Tensor(np.array([3.14, -2.71]), mindspore.float64)
9712        >>> a = Tensor(np.array(1), mindspore.int64)
9713        >>> output = ops.polygamma(a, x)
9714        >>> print(output)
9715        [ 0.37446456 15.49884838]
9716    """
9717    return poly_gamma_(n, input)
9718
9719
9720def isinf(input):
9721    r"""
9722    Determines which elements are inf or -inf for each position.
9723
9724    .. math::
9725
9726        out_i = \begin{cases}
9727          & \ True,\ \text{ if } x_{i} = \text{Inf} \\
9728          & \ False,\ \text{ if } x_{i} \ne  \text{Inf}
9729        \end{cases}
9730
9731    where :math:`Inf` means not a number.
9732
9733    Args:
9734        input (Tensor): The input tensor.
9735
9736    Returns:
9737        Tensor, has the same shape of input, and the dtype is bool.
9738
9739    Raises:
9740        TypeError: If `input` is not a Tensor.
9741
9742    Supported Platforms:
9743        ``Ascend`` ``GPU`` ``CPU``
9744
9745    Examples:
9746        >>> import mindspore
9747        >>> import numpy as np
9748        >>> from mindspore import Tensor, ops
9749        >>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
9750        >>> output = ops.isinf(x)
9751        >>> print(output)
9752        [False False True]
9753        >>> x = Tensor(2.1, mindspore.float64)
9754        >>> output = ops.isinf(x)
9755        >>> print(output)
9756        False
9757    """
9758    return isinf_(input)
9759
9760
9761def _is_sign_inf(x, fn):
9762    """Tests element-wise for infinity with sign."""
9763    shape = x.shape
9764    zeros_tensor = zeros_(shape, mstype.float32)
9765    ones_tensor = ones_(shape, mstype.float32)
9766    is_inf = isinf_(x)
9767    is_sign = fn(x, zeros_tensor)
9768    res = ops.select(is_inf, ones_tensor, zeros_tensor)
9769    res = ops.select(is_sign, res, zeros_tensor)
9770    return cast_(res, mstype.bool_)
9771
9772
9773def isposinf(input):
9774    """
9775    Tests element-wise for positive infinity.
9776
9777    Args:
9778        input (Tensor): Input values.
9779
9780    Returns:
9781       Tensor, true where `input` is positive infinity, false otherwise.
9782
9783    Raises:
9784        TypeError: If the input is not a tensor.
9785
9786    Supported Platforms:
9787        ``Ascend`` ``GPU`` ``CPU``
9788
9789    Examples:
9790        >>> from mindspore import ops, Tensor
9791        >>> from mindspore import dtype as mstype
9792        >>> output = ops.isposinf(Tensor([[-float("inf"), float("inf")], [1, float("inf")]], mstype.float32))
9793        >>> print(output)
9794        [[False  True]
9795         [False  True]]
9796    """
9797    _check_is_tensor("input", input, "isposinf")
9798    return _is_sign_inf(input, tensor_gt)
9799
9800
9801def isneginf(input):
9802    """
9803    Tests element-wise for negative infinity.
9804
9805    Args:
9806        input (Tensor): Input Tensor.
9807
9808    Returns:
9809       Tensor, true where `input` is negative infinity, false otherwise.
9810
9811    Raises:
9812        TypeError: If the input is not a tensor.
9813
9814    Supported Platforms:
9815        ``Ascend`` ``GPU`` ``CPU``
9816
9817    Examples:
9818        >>> from mindspore import ops, Tensor
9819        >>> from mindspore import dtype as mstype
9820        >>> output = ops.isneginf(Tensor([[-float("inf"), float("inf")], [1, -float("inf")]], mstype.float32))
9821        >>> print(output)
9822        [[ True False]
9823         [False  True]]
9824    """
9825    _check_is_tensor("input", input, "isneginf")
9826    return _is_sign_inf(input, tensor_lt)
9827
9828
9829def logical_xor(input, other):
9830    r"""
9831    Computes the "logical XOR" of two tensors element-wise.
9832
9833    .. math::
9834
9835        out_{i} = input_{i} \oplus other_{i}
9836
9837    Args:
9838        input (Tensor): The first input is a tensor whose data type can be implicitly converted to bool.
9839        other (Tensor): The second input is a tensor whose data type can be implicitly converted to bool
9840            to compute XOR with the first input.
9841
9842    Returns:
9843        Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
9844
9845    Raises:
9846        TypeError: If the dtype of `input` or `other` is not bool or can not be implicitly converted to bool.
9847        ValueError: If the shape of two inputs cannot be broadcast.
9848
9849    Supported Platforms:
9850        ``Ascend`` ``CPU``
9851
9852    Examples:
9853        >>> import mindspore
9854        >>> import numpy as np
9855        >>> from mindspore import Tensor, ops
9856        >>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
9857        >>> y = Tensor(np.array([True, True, False]), mindspore.bool_)
9858        >>> output = ops.logical_xor(x, y)
9859        >>> print(output)
9860        [False True True]
9861        >>> x = Tensor(1, mindspore.bool_)
9862        >>> y = Tensor(0, mindspore.bool_)
9863        >>> output = ops.logical_xor(x, y)
9864        >>> print(output)
9865        True
9866        >>> x = True
9867        >>> y = Tensor(0, mindspore.bool_)
9868        >>> output = ops.logical_xor(x, y)
9869        >>> print(output)
9870        True
9871        >>> x = True
9872        >>> y = Tensor(np.array([True, False]), mindspore.bool_)
9873        >>> output = ops.logical_xor(x, y)
9874        >>> print(output)
9875        [False True]
9876    """
9877    if isinstance(input, Tensor) and input.dtype != mstype.bool_:
9878        input = input.astype(mstype.bool_)
9879    if isinstance(other, Tensor) and other.dtype != mstype.bool_:
9880        other = other.astype(mstype.bool_)
9881    return logical_xor_(input, other)
9882
9883
9884def imag(input):
9885    r"""
9886    Returns a new tensor containing imaginary value of the `input`.
9887    If `input` is real, it will return zeros.
9888
9889    Args:
9890        input (Tensor): The input tensor to compute to.
9891
9892    Returns:
9893        Tensor, the shape is the same as the `input`.
9894
9895    Raises:
9896       TypeError: If `input` is not a Tensor.
9897
9898    Supported Platforms:
9899        ``Ascend`` ``GPU`` ``CPU``
9900
9901    Examples:
9902        >>> import mindspore
9903        >>> import numpy as np
9904        >>> from mindspore import Tensor, ops
9905        >>> x = Tensor(np.asarray(np.complex(1.3 + 0.4j)), mindspore.complex64)
9906        >>> output = ops.imag(x)
9907        >>> print(output)
9908        0.4
9909    """
9910    return imag_(input)
9911
9912
9913@_primexpr
9914def _check_repeat_in_axis(axis, x_ndim, prim_name):
9915    """check repeat dim in axis"""
9916    if isinstance(axis, (list, tuple)):
9917        axis_deal = [dim + x_ndim if dim < 0 else dim for dim in axis]
9918        for dim in axis_deal:
9919            if axis_deal.count(dim) > 1:
9920                raise RuntimeError(f"For {prim_name}, dim {dim} appears multiple times in axis.")
9921
9922
9923def nansum(input, axis=None, keepdims=False, *, dtype=None):
9924    """
9925    Computes sum of `input` over a given dimension, treating NaNs as zero.
9926
9927    Args:
9928        input (Tensor): The input Tensor.
9929        axis (Union[int, tuple(int)], optional): The dimensions to reduce. Supposed the rank of `input` is r,
9930            axis must be in the range [-rank(input), rank(input)). Default: ``None``, all dimensions are reduced.
9931        keepdims (bool, optional): Whether the output Tensor keeps dimensions or not. Default: ``False``.
9932
9933    Keyword Args:
9934        dtype (:class:`mindspore.dtype`, optional): The dtype of output Tensor. Default: ``None``.
9935
9936    Returns:
9937        Tensor, the sum of input `input` in the given dimension dim, treating NaNs as zero.
9938
9939        - If axis is None, keepdims is False,
9940          the output is a 0-D Tensor representing the sum of all elements in the input Tensor.
9941        - If axis is int, set as 2, and keepdims is False,
9942          the shape of output is :math:`(input_1, input_3, ..., input_R)`.
9943        - If axis is tuple(int) or list(int), set as (2, 3), and keepdims is False,
9944          the shape of output is :math:`(input_1, input_4, ..., input_R)`.
9945
9946    Raises:
9947        TypeError: If `input` is not Tensor.
9948        TypeError: If `keepdims` is not a bool.
9949        TypeError: If the dtype of `input` or `dtype` is complex type.
9950        ValueError: If 'axis' not in [-rank(`input`), rank(`input`)).
9951
9952    Supported Platforms:
9953        ``Ascend`` ``GPU`` ``CPU``
9954
9955    Examples:
9956        >>> import mindspore
9957        >>> import numpy as np
9958        >>> from mindspore import Tensor, ops
9959        >>> x = Tensor(np.array([[float("nan"), 2, 3], [1, 2, float("nan")]]), mindspore.float32)
9960        >>> output1 = ops.nansum(x, axis=0, keepdims=False, dtype=mindspore.float32)
9961        >>> output2 = ops.nansum(x, axis=0, keepdims=True, dtype=mindspore.float32)
9962        >>> print(output1)
9963        [1. 4. 3.]
9964        >>> print(output2)
9965        [[1. 4. 3.]]
9966    """
9967    _check_is_tensor("input", input, "nansum")
9968    _check_repeat_in_axis(axis, input.ndim, "nansum")
9969    if input.is_complex():
9970        raise TypeError(f'For nansum, input are not supported complex type, but got {input.dtype}.')
9971    if dtype is not None and dtype in mstype.complex_type:
9972        raise TypeError(f'For nansum, dtype not supported complex type, but got {dtype}.')
9973    if axis is None:
9974        axis = ()
9975    if input.dtype == mstype.bool_:
9976        input = input.astype(mstype.int64)
9977    is_nan = isnan_(input)
9978    input = ops.masked_fill(input, is_nan, ops.cast(0, input.dtype))
9979    input = _get_cache_prim(P.ReduceSum)(keepdims)(input, axis)
9980    if dtype is not None and input.dtype != dtype:
9981        input = input.astype(dtype)
9982    return input
9983
9984
9985def diag_embed(input, offset=0, dim1=-2, dim2=-1):
9986    r"""
9987    Creates a tensor with diagonals filled by `input`. The remaining elements are filled by 0.
9988    If the shape of `input` is :math:`[x_{0}, x_{1}, ..., x_{n-1}, x_{n}]`, the output shape is: the vector obtained
9989    by inserting :math:`x_{n}+|offset|` into the vector :math:`[x_{0}, x_{1}, ..., x_{n-1}]`
9990    at position `dim1` and `dim2`.
9991
9992    Args:
9993        input (Tensor): Values to fill diagonal.
9994        offset (int, optional): Offset of the diagonal. :math:`offset=0` refers to the main diagonal. Default: ``0`` .
9995
9996            - If :math:`offset>0`, fill the diagonals that are `offset` units upward from the main diagonal.
9997            - If :math:`offset<0`, fill the diagonals that are `|offset|` units downward from the main diagonal.
9998
9999        dim1 (int, optional): The first dimension in `input` with respect to which to fill diagonal. Default: ``-2`` .
10000        dim2 (int, optional): The second dimension in `input` with respect to which to fill diagonal. Default: ``-1`` .
10001
10002    Returns:
10003        Tensor, has the same dtype as `input`, but the shape of output is one dimension higher than the `input`.
10004
10005    Raises:
10006        TypeError: If `input` is not a Tensor.
10007        TypeError: If dtype of `input` is not supported.
10008        TypeError: If `offset` is not an int.
10009        TypeError: If `dim1` or `dim2` is not an int.
10010        ValueError: If the dimension of `input` is not 1D-6D.
10011        ValueError: If `dim1` is not in range of [-len(input.shape) - 1, len(input.shape)].
10012        ValueError: If `dim2` is not in range of [-len(input.shape) - 1, len(input.shape)].
10013        ValueError: If `dim1` and `dim2` are identical.
10014
10015    Supported Platforms:
10016        ``Ascend`` ``GPU`` ``CPU``
10017
10018    Examples:
10019        >>> import mindspore
10020        >>> import numpy as np
10021        >>> from mindspore import Tensor, ops
10022        >>> x = Tensor(np.array([2,3,4]), mindspore.float32)
10023        >>> output = ops.diag_embed(x)
10024        >>> print(output)
10025        [[2. 0. 0.]
10026         [0. 3. 0.]
10027         [0. 0. 4.]]
10028    """
10029
10030    transpose_op = Transpose()
10031    matrix_set_diag_op = MatrixSetDiagV3(align="LEFT_RIGHT")
10032    zeros = ops.Zeros()
10033    if not isinstance(input, (Tensor, Tensor_)):
10034        raise TypeError("For 'diag_embed', 'input' must be Tensor.")
10035
10036    input_dtype = dtype_(input)
10037    if not (input_dtype in (mstype.int8, mstype.int16, mstype.int32, mstype.int64, mstype.uint8, mstype.uint16,
10038                            mstype.uint32, mstype.uint64, mstype.float16, mstype.float32, mstype.float64)):
10039        raise TypeError("For 'diag_embed', the dtype of 'input' must be int8, int16, int32, int64, "
10040                        f"uint8, uint16, uint32, uint64, float16, float32 or float64, but got '{input_dtype}'.")
10041    _check_attr_dtype("offset", offset, [int], "diag_embed")
10042    _check_attr_dtype("dim1", dim1, [int], "diag_embed")
10043    _check_attr_dtype("dim2", dim2, [int], "diag_embed")
10044    if len(input.shape) > 6:
10045        raise ValueError("For 'diag_embed', the dimension of 'input' must be 1-6D.")
10046    x_shape = input.shape
10047    output_dim = len(x_shape) + 1
10048    if dim1 < -output_dim or dim1 > (output_dim - 1):
10049        raise ValueError(f"For 'diag_embed', 'dim1' must be in range of [{-output_dim}, {output_dim - 1}], "
10050                         f"but got {dim1}.")
10051    if dim2 < -output_dim or dim2 > (output_dim - 1):
10052        raise ValueError(f"For 'diag_embed', 'dim2' must be in range of [{-output_dim}, {output_dim - 1}], "
10053                         f"but got {dim2}.")
10054    if dim1 < 0:
10055        dim1_ = dim1 + output_dim
10056    else:
10057        dim1_ = dim1
10058    if dim2 < 0:
10059        dim2_ = dim2 + output_dim
10060    else:
10061        dim2_ = dim2
10062    if dim1_ == dim2_:
10063        raise ValueError("For 'diag_embed', 'dim1' must not be identical to 'dim2'.")
10064    batch_shape = x_shape[:-1]
10065    if offset > 0:
10066        dsize = x_shape[-1] + offset
10067    else:
10068        dsize = x_shape[-1] - offset
10069    diag_plane = (dsize, dsize)
10070    output_shape_trans = batch_shape + diag_plane
10071    output = zeros(output_shape_trans, input.dtype)
10072    k = cast_(offset, mstype.int32)
10073    output = matrix_set_diag_op(output, input, k)
10074    dim = 0
10075    perm = ()
10076    for i in range(output_dim):
10077        if i == dim1_:
10078            perm = perm + (output_dim - 2,)
10079        elif i == dim2_:
10080            perm = perm + (output_dim - 1,)
10081        else:
10082            perm = perm + (dim,)
10083            dim = dim + 1
10084    return transpose_op(output, perm)
10085
10086
10087def sum(input, dim=None, keepdim=False, *, dtype=None):
10088    """
10089    Calculate sum of Tensor elements over a given dim.
10090
10091    Note:
10092        The `dim` with tensor type is only used for compatibility with older versions and is not recommended.
10093
10094    Args:
10095        input (Tensor): The input tensor.
10096        dim (Union[None, int, tuple(int), list(int), Tensor]): Dimensions along which a sum is performed.
10097            If ``None`` , sum all the elements of the input tensor.
10098            If the `dim` is a tuple or list of ints, a sum is performed on all the dimensions specified in the tuple.
10099            Must be in the range :math:`[-input.ndim, input.ndim)` . Default: ``None`` .
10100        keepdim (bool): Whether the output tensor has dim retained or not.
10101            If ``True`` , keep these reduced dimensions and the length is 1.
10102            If ``False`` , don't keep these dimensions. Default: ``False`` .
10103
10104    Keyword Args:
10105        dtype (:class:`mindspore.dtype`, optional): The desired data type of returned Tensor. Default: ``None`` .
10106
10107    Returns:
10108        A Tensor, sum of elements over a given dim in `input`.
10109
10110    Raises:
10111        TypeError: If `input` is not a Tensor.
10112        TypeError: If `dim` is not an int, tulpe(int), list(int), Tensor or None.
10113        ValueError: If `dim` is not in the range :math:`[-input.ndim, input.ndim)` .
10114        TypeError: If `keepdim` is not a bool.
10115
10116    Supported Platforms:
10117        ``Ascend`` ``GPU`` ``CPU``
10118
10119    Examples:
10120        >>> import numpy as np
10121        >>> from mindspore import Tensor, ops
10122        >>> from mindspore import dtype as mstype
10123        >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
10124        ...                      [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
10125        ...                      [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mstype.float32)
10126        >>> out = ops.sum(x)
10127        >>> print(out)
10128        270.0
10129        >>> out = ops.sum(x, dim=2)
10130        >>> print(out)
10131        [[ 6. 12. 18.]
10132         [24. 30. 36.]
10133         [42. 48. 54.]]
10134        >>> out = ops.sum(x, dim=2, keepdim=True)
10135        >>> print(out)
10136        [[[ 6.]
10137         [12.]
10138         [18.]]
10139        [[24.]
10140         [30.]
10141         [36.]]
10142        [[42.]
10143         [48.]
10144         [54.]]]
10145    """
10146    return sum_ext_op(input, dim, keepdim, dtype)
10147
10148
10149def tanhshrink(input):
10150    '''
10151    Tanhshrink Activation, :math:`Tanhshrink(x)=x-Tanh(x)` , where :math:`x` corresponds to `input` .
10152    See :class:`mindspore.nn.Tanhshrink` for more details.
10153
10154    Supported Platforms:
10155        ``Ascend`` ``GPU`` ``CPU``
10156
10157    Examples:
10158        >>> import mindspore as ms
10159        >>> from mindspore import ops
10160        >>> from mindspore import Tensor
10161        >>> import numpy as np
10162        >>> input = Tensor(np.array([1, 2, 3, 2, 1]), ms.float16)
10163        >>> output = ops.tanhshrink(input)
10164        >>> print(output)
10165        [0.2383 1.036  2.004  1.036  0.2383]
10166    '''
10167    if not isinstance(input, Tensor):
10168        raise TypeError(f"For tanhshrink, the input must be a Tensor, but got {type(input)}.")
10169
10170    if input.dtype in mstype.int_type + mstype.uint_type:
10171        input = input.astype(mstype.float32)
10172    return input - tanh_(input)
10173
10174
10175def zeta(input, other):
10176    r"""
10177    Elemental-wise compute the Hurwitz zeta output.
10178
10179    .. math::
10180
10181        \zeta(x, q) = \sum_{k=0}^{\infty} \frac{1}{(k + q)^x}
10182
10183    .. warning::
10184        This is an experimental API that is subject to change or deletion.
10185
10186    Args:
10187        input (Union[Tensor, int, float]): Input Tensor. Represented as :math:`x` in the formula. If it's a Tensor, its
10188            dtype must be either float32 or float64.
10189        other (Union[Tensor, int, float]): Input Tensor must have the same dtype as `input`.
10190            Represented as :math:`q` in the formula.
10191
10192    Returns:
10193        Tensor, The result of Hurwitz zeta function.
10194
10195    Raises:
10196        TypeError: If neither `input` nor `other` is not tensor.
10197        TypeError: If dtype of `input` is neither float32 nor float64.
10198        TypeError: If dtype of `other` is neither float32 nor float64.
10199
10200    Supported Platforms:
10201        ``Ascend`` ``GPU`` ``CPU``
10202
10203    Examples:
10204        >>> import mindspore
10205        >>> import numpy as np
10206        >>> from mindspore import Tensor, ops
10207        >>> x = Tensor(np.array([10.]), mindspore.float32)
10208        >>> q = Tensor(np.array([1.]), mindspore.float32)
10209        >>> z = ops.zeta(x, q)
10210        >>> print(z)
10211        [1.0009946]
10212    """
10213    if isinstance(input, (int, float)):
10214        if not isinstance(other, Tensor):
10215            raise TypeError(f"For 'zeta', at least one of the inputs should be Tensor.")
10216        _dtype = other.dtype
10217        input = cast_(input, _dtype)
10218    if isinstance(other, (int, float)):
10219        if not isinstance(input, Tensor):
10220            raise TypeError(f"For 'zeta', at least one of the inputs should be Tensor.")
10221        _dtype = input.dtype
10222        other = cast_(other, _dtype)
10223    if input.size < other.size:
10224        input = _get_cache_prim(P.BroadcastTo)(other.shape)(input)
10225    elif input.size > other.size:
10226        other = _get_cache_prim(P.BroadcastTo)(input.shape)(other)
10227    output = zeta_(input, other)
10228    return output
10229
10230
10231def matrix_power(input, n):
10232    """
10233    Raises a square matrix to the (integer) power `n` .
10234
10235    - When :math:`n=0` , returns the identity matrix, which has the same shape as `input` .
10236    - When :math:`n<0` and `input` is invertible, returns the inverse of `input` to the power of :math:`-n` .
10237
10238    .. warning::
10239        This is an experimental API that is subject to change or deletion.
10240
10241    Args:
10242        input (Tensor): A 3-D Tensor. Supported data types are float16 and float32.
10243            The shape is :math:`(b, m, m)` , represents b m-D square matrices.
10244        n (int): The exponent, a required int.
10245
10246    Returns:
10247        A 3-D Tensor. Data type and shape are the same as `input` 's.
10248
10249    Raises:
10250        TypeError: If the data type of `n` is not int.
10251        TypeError: If the data type of `input` is neither float32 nor float16.
10252        TypeError: If `input` is not a Tensor.
10253        ValueError: If `input` is not a 3-D tensor.
10254        ValueError: If shape[1] and shape[2] of `input` are not the same.
10255        ValueError: If `n` is negative but got input `input` has singular matrices.
10256
10257    Supported Platforms:
10258        ``CPU``
10259
10260    Examples:
10261        >>> import mindspore as ms
10262        >>> from mindspore import Tensor, ops
10263        >>> input = Tensor([[[0, 1], [-1, 0]], [[1, 0], [0, -1]]], dtype=ms.float32)
10264        >>> y = ops.matrix_power(input, 2)
10265        >>> print(y)
10266        [[[-1.  0.]
10267          [-0. -1.]]
10268         [[ 1.  0.]
10269          [ 0.  1.]]]
10270    """
10271    matrix_power_ops = _get_cache_prim(P.MatrixPower)(n=n)
10272    return matrix_power_ops(input)
10273
10274
10275def _maybe_wrap_dims_n(ret_dim, input_dim):
10276    """Check the dim"""
10277    if input_dim <= 0:
10278        input_dim = 1
10279
10280    min = -input_dim
10281    max = input_dim - 1
10282    for i, value in enumerate(ret_dim):
10283        dim = value
10284        if dim < min or dim > max:
10285            raise ValueError(f"Dimension out of range, it must be in range of [{min}, {max}], "
10286                             f"but got {dim}.")
10287
10288        if dim < 0:
10289            ret_dim[i] = dim + input_dim
10290    return ret_dim
10291
10292
10293def _canonicalize_fft_shape_and_dim(input, shape, dim):
10294    """Check the input's shape and dim"""
10295    input_dim = input.ndim
10296    input_sizes = input.shape
10297    ret_dim = None
10298    ret_shape = None
10299
10300    if dim is not None:
10301        ret_dim = list(dim)
10302        ret_dim = _maybe_wrap_dims_n(ret_dim, input_dim)
10303        # check if dim is duplicated
10304        set_ret_dim = set(ret_dim)
10305        if len(set_ret_dim) != len(ret_dim):
10306            raise ValueError(f"FFT dims must be unique.")
10307
10308    if shape is not None:
10309        if dim is not None and len(dim) != len(shape):
10310            raise ValueError(f"shape and dim must have the same length, but now they are "
10311                             f"{len(dim)} and {len(shape)}.")
10312        if len(shape) > input_dim:
10313            raise ValueError(f"Got shape with {len(shape)} values but input tensor only "
10314                             f"has {input_dim} dimensions.")
10315
10316        transform_ndim = len(shape)
10317        if dim is None:
10318            ret_dim = [0] * transform_ndim
10319            value = input_dim - transform_ndim
10320            for i in range(transform_ndim):
10321                ret_dim[i] = value + i
10322
10323        ret_shape = [0] * transform_ndim
10324        for i in range(transform_ndim):
10325            if shape[i] == -1:
10326                ret_shape[i] = input_sizes[ret_dim[i]]
10327            else:
10328                ret_shape[i] = shape[i]
10329    elif dim is None:
10330        ret_dim = list(range(input_dim))
10331        ret_shape = [0] * input_dim
10332        for i in range(input_dim):
10333            ret_shape[i] = input_sizes[i]
10334    else:
10335        ret_shape = [0] * len(ret_dim)
10336        for i in range(len(ret_dim)):
10337            value = ret_dim[i]
10338            ret_shape[i] = input_sizes[value]
10339
10340    for value in ret_shape:
10341        if value <= 0:
10342            raise ValueError(f"The value of ret_shape must be greater than 0, "
10343                             f"but got '{value}'.")
10344
10345    return ret_shape, ret_dim
10346
10347
10348def as_strided(x, shape=None, strides=None):
10349    n = np.dtype(mstype.dtype_to_nptype(x.dtype)).itemsize
10350    strides = tuple(np.array(strides) * n)
10351    if x.dtype == mstype.bfloat16:
10352        return Tensor(np.lib.stride_tricks.as_strided(x.float().asnumpy(), shape, strides, False, True), dtype=x.dtype)
10353    return Tensor(np.lib.stride_tricks.as_strided(x.asnumpy(), shape, strides, False, True), dtype=x.dtype)
10354
10355
10356def _resize_input(input, input_dim, ret_dim, ret_shape, input_sizes):
10357    """Resize the input"""
10358    paddings = [0] * input_dim * 2
10359    must_copy = False
10360    for i in range(len(ret_dim)):
10361        value = ret_dim[i]
10362        # resize input based on n & dim
10363        if ret_shape[i] == -1:
10364            continue
10365
10366        if input_sizes[value] < ret_shape[i]:
10367            pad_idx = len(paddings) - 2 * value - 1
10368            paddings[pad_idx] = ret_shape[i] - input_sizes[value]
10369            must_copy = True
10370
10371        if input_sizes[value] > ret_shape[i]:
10372            start_index = [0] * input_dim
10373            input_sizes[value] = ret_shape[i]
10374            input = slice_(input, start_index, input_sizes)
10375
10376    if must_copy:
10377        paddings = np.reshape(paddings, (input_dim, 2)).tolist()
10378        paddings.reverse()
10379        paddings = (*paddings,)
10380        input = _get_cache_prim(P.Pad)(paddings)(input)
10381
10382    return input
10383
10384
10385def _permute_input(input, input_dim, ret_dim):
10386    """Permute input based on dim"""
10387    dim_permute = list(range(input_dim))
10388    # is_transformed_dim
10389    is_transformed_dim = [0] * input_dim
10390    for value in ret_dim:
10391        is_transformed_dim[value] = True
10392
10393    # partition dim_permute
10394    dim_permute_a, dim_permute_b = [], []
10395    for i in range(len(dim_permute)):
10396        value = dim_permute[i]
10397        (dim_permute_a if not is_transformed_dim[i] else dim_permute_b).append(value)
10398
10399    # strides
10400    type_size = np.dtype(mstype.dtype_to_nptype(input.dtype)).itemsize
10401    input_strides = [int(x / type_size) for x in input.strides]
10402
10403    def cmp(x, y):
10404        if input_strides[x] > input_strides[y]:
10405            return -1
10406        if input_strides[x] < input_strides[y]:
10407            return 1
10408        return 0
10409
10410    # sort
10411    if dim_permute_a:
10412        dim_permute_a = sorted(dim_permute_a, key=cmp_to_key(cmp))
10413
10414    # copy
10415    if dim_permute_b:
10416        ret_dim = sorted(ret_dim, key=cmp_to_key(cmp))
10417        for i in range(len(ret_dim)):
10418            value = ret_dim[i]
10419            dim_permute_b[i] = value
10420
10421    # merge
10422    dim_permute = dim_permute_a + dim_permute_b
10423
10424    # permute
10425    input = transpose_(input, tuple(dim_permute))
10426
10427    return input, dim_permute
10428
10429
10430def _reshape_input(input, signal_ndim, batch_dims):
10431    """Reshape input"""
10432    # Collapse batch dimensions into a single dimension
10433    batched_sizes = [0] * (signal_ndim + 1)
10434    batched_sizes[0] = -1
10435    i = batch_dims
10436    j = 1
10437    while i < len(input.shape):
10438        batched_sizes[j] = input.shape[i]
10439        j += 1
10440        i += 1
10441        if j >= len(batched_sizes):
10442            break
10443    input = reshape_(input, tuple(batched_sizes))
10444    return input
10445
10446
10447def _check_fftwithsize_input(input, s, dim, norm, fft_func_name):  # pylint: disable=redefined-outer-name
10448    """Check the input of fftwithsize"""
10449    if not isinstance(input, (Tensor, Tensor_)):
10450        raise TypeError("For '{fft_func_name}', 'input' must be Tensor.")
10451
10452    input_dtype = dtype_(input)
10453    if fft_func_name in ('FFTN', 'IFFTN'):
10454        if not input_dtype in (mstype.complex64, mstype.complex128):
10455            raise TypeError("For '{fft_func_name}', the dtype of 'input' must be complex64, complex128, "
10456                            f"but got '{input_dtype}'.")
10457    else:
10458        raise TypeError("For '{fft_func_name}', it is not supported now.")
10459
10460    if s is not None:
10461        if isinstance(s, int):
10462            s = (s,)
10463        elif not isinstance(s, tuple):
10464            raise TypeError("For '{fft_func_name}', 's' must be tuple(int).")
10465        for ele in s:
10466            if not isinstance(ele, int):
10467                raise TypeError(f"For '{fft_func_name}', each elements of 's' must be int, but got {type(ele)}")
10468
10469    if dim is not None:
10470        if isinstance(dim, int):
10471            dim = (dim,)
10472        elif not isinstance(dim, tuple):
10473            raise TypeError("For '{fft_func_name}', 'dim' must be tuple(int).")
10474        for ele in dim:
10475            if not isinstance(ele, int):
10476                raise TypeError(f"For '{fft_func_name}', each elements of 'dim' must be int, but got {type(ele)}")
10477
10478    ret_shape, ret_dim = _canonicalize_fft_shape_and_dim(input, s, dim)
10479    input_dim = input.ndim
10480    signal_ndim = len(ret_dim)
10481    batch_dims = input_dim - signal_ndim
10482    input_sizes = list(input.shape)
10483
10484    if fft_func_name in ('FFTN', 'IFFTN'):
10485        input = _resize_input(input, input_dim, ret_dim, ret_shape, input_sizes)
10486        out_sizes = input.shape
10487        input, dim_permute = _permute_input(input, input_dim, ret_dim)
10488
10489    input = _reshape_input(input, signal_ndim, batch_dims)
10490
10491    if norm is None:
10492        norm = "backward"
10493    else:
10494        _check_attr_dtype("norm", norm, [str], fft_func_name)
10495
10496    FFTInput = collections.namedtuple('FFTInput', ['input', 'signal_ndim', 'norm', 'input_dim',
10497                                                   'batch_dims', 'dim_permute', 'out_sizes'])
10498    return FFTInput(input=input, signal_ndim=signal_ndim, norm=norm, input_dim=input_dim,
10499                    batch_dims=batch_dims, dim_permute=dim_permute, out_sizes=out_sizes)
10500
10501
10502def _handle_fftwithsize_output(out, input_dim, batch_dims, dim_permute, out_sizes):
10503    """Handle the output of fftwithsize"""
10504    out_strides = [0] * input_dim
10505    batch_numel = 1
10506    for i in range(batch_dims - 1, -1, -1):
10507        out_strides[dim_permute[i]] = batch_numel * out.strides[0]
10508        batch_numel *= out_sizes[dim_permute[i]]
10509
10510    for i in range(batch_dims, input_dim):
10511        out_strides[dim_permute[i]] = out.strides[1 + (i - batch_dims)]
10512
10513    type_size = np.dtype(mstype.dtype_to_nptype(out.dtype)).itemsize
10514    if out.shape != out_sizes or out.strides != out_strides:
10515        out = as_strided(out, out_sizes, [int(i / type_size) for i in out_strides])
10516    return out
10517
10518
10519def fft(input, n=None, dim=-1, norm=None):  # pylint: disable=redefined-outer-name
10520    r"""
10521    Calculates the one dimensional discrete Fourier transform of `input`.
10522
10523    Args:
10524        input (Tensor): The input tensor.
10525        n (int, optional): Signal length.
10526            If given, the input will either be zero-padded or trimmed to this length before computing the FFT.
10527            Default: ``None``.
10528        dim (int, optional): The dimension along which to take the one dimensional FFT.
10529            Default: -1.
10530        norm (string, optional): Normalization mode. Three modes are defined as,
10531            ``"forward"`` (normalize by :math `1/n`), ``"backward"``(no normalization),
10532            ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
10533            Default: ``None`` that means ``"backward"``.
10534
10535    Returns:
10536        Tensor, The result of `fft()` function.
10537
10538    Raises:
10539        TypeError: If the `input` type is not Tensor.
10540        TypeError: If the `input` data type is not one of: complex64, complex128.
10541        TypeError: If `n` or `dim` type is not int32.
10542        ValueError: If `input` dimension is less than 1.
10543        ValueError: If `n` is less than 1.
10544        ValueError: If `dim` is not in the range of "[ `-input_dim` , `input_dim-1` ]".
10545        ValueError: If norm is none of "backward", "forward" or "ortho".
10546
10547    Supported Platforms:
10548        ``GPU`` ``CPU``
10549
10550    Examples:
10551        >>> from mindspore import Tensor, ops
10552        >>> input = Tensor([ 1.6243454+0.j, -0.6117564+0.j, -0.5281718+0.j, -1.0729686+0.j])
10553        >>> y = ops.fft(input)
10554        >>> print(y)
10555        [-0.5885514+0.j          2.1525173-0.46121222j  2.7808986+0.j
10556         2.1525173+0.46121222j]
10557    """
10558    if not isinstance(input, (Tensor, Tensor_)):
10559        raise TypeError("For 'FFT', 'input' must be Tensor.")
10560
10561    input_dtype = dtype_(input)
10562    if not input_dtype in (mstype.complex64, mstype.complex128):
10563        raise TypeError("For 'FFT', the dtype of 'input' must be complex64, complex128, "
10564                        f"but got '{input_dtype}'.")
10565    _check_attr_dtype("dim", dim, [int], "FFT")
10566
10567    input_dim = input.ndim
10568    signal_ndim = 1
10569    batch_dims = input_dim - signal_ndim
10570    input_sizes = list(input.shape)
10571    dim = _maybe_wrap_dims_n([dim], input_dim)[0]
10572    n_opt = n
10573    if n is None:
10574        n = input.shape[dim]
10575    else:
10576        _check_attr_dtype("n", n, [int], "FFT")
10577    if n < 1:
10578        raise ValueError("For 'FFT', the value of 'n' must be greater than or equal to 1, "
10579                         f"but got '{n}'.")
10580    if n_opt is not None:
10581        input = _resize_input(input, input_dim, [dim], [n], input_sizes)
10582    out_sizes = input.shape
10583
10584    input, dim_permute = _permute_input(input, input_dim, [dim])
10585    input = _reshape_input(input, signal_ndim, batch_dims)
10586
10587    if norm is None:
10588        norm = "backward"
10589    else:
10590        _check_attr_dtype("norm", norm, [str], "FFT")
10591
10592    fft_ = FFTWithSize(signal_ndim=1, inverse=False, real=False, norm=norm)
10593    out = fft_(input)
10594    return _handle_fftwithsize_output(out, input_dim, batch_dims, dim_permute, out_sizes)
10595
10596
10597def fft2(input, s=None, dim=(-2, -1), norm=None):  # pylint: disable=redefined-outer-name
10598    r"""
10599    Calculates the two dimensional discrete Fourier transform of `input`.
10600
10601    Args:
10602        input (Tensor): The input tensor.
10603        s (Tuple[int], optional): Signal size in the transformed dimensions.
10604            If given, each dimension `dim[i]` will either be zero-padded or trimmed to the length `s[i]` before
10605            computing the FFT. If a length `-1` is specified, no padding is done in that dimension.
10606            Default: `s = [input.size(d) for d in dim]`
10607        dim (Tuple[int], optional): Dimensions to be transformed.
10608            Default: last two dimensions.
10609        norm (string, optional): Normalization mode. Three modes are defined as,
10610            ``"forward"``(normalize by :math `1/n`), ``"backward"``(no normalization),
10611            ``"ortho"``(normalize by :math:`1/\sqrt{n}`). Where :math `n = prod(s)` is the logical FFT size.
10612            Default: ``None`` that means ``"backward"``.
10613
10614    Returns:
10615        Tensor, The result of `fft2()` function.
10616
10617    Raises:
10618        TypeError: If the `input` type is not Tensor.
10619        TypeError: If the `input` data type is not one of: complex64, complex128.
10620        TypeError: If the `s` or `dim` is not tuple(int).
10621        ValueError: If `input` dimension is less than 2.
10622        ValueError: If the length of `s` and `dim` are not the same.
10623        ValueError: If the value in `dim` is not in the range of "[ `-input_dim` , `input_dim-1` ]".
10624        ValueError: If norm is none of "backward", "forward" or "ortho".
10625
10626    Supported Platforms:
10627        ``GPU`` ``CPU``
10628
10629    Examples:
10630        >>> from mindspore import Tensor, ops
10631        >>> input = Tensor([[ 1.6243454+0.j, -0.6117564+0.j], [-0.5281718+0.j, -1.0729686+0.j]])
10632        >>> y = ops.fft2(input)
10633        >>> print(y)
10634        [[-0.5885514+0.j  2.7808986+0.j]
10635        [ 2.6137294+0.j  1.691305 +0.j]]
10636    """
10637    return fftn(input, s, dim, norm)
10638
10639
10640def fftn(input, s=None, dim=None, norm=None):  # pylint: disable=redefined-outer-name
10641    r"""
10642    Calculates the N dimensional discrete Fourier transform of `input`.
10643
10644    Args:
10645        input (Tensor): The input tensor.
10646        s (Tuple[int], optional): Signal size in the transformed dimensions.
10647            If given, each dimension `dim[i]` will either be zero-padded or trimmed to the length `s[i]` before
10648            computing the FFT. If a length `-1` is specified, no padding is done in that dimension.
10649            Default: `s = [input.size(d) for d in dim]`
10650        dim (Tuple[int], optional): Dimensions to be transformed.
10651            Default: all dimensions, or the last `len(s)` dimensions if `s` is given.
10652        norm (string, optional): Normalization mode. Three modes are defined as,
10653            ``"forward"``(normalize by :math `1/n`), ``"backward"``(no normalization),
10654            ``"ortho"``(normalize by :math:`1/\sqrt{n}`). Where :math `n = prod(s)` is the logical FFT size.
10655            Default: ``None`` that means ``"backward"``.
10656
10657    Returns:
10658        Tensor, The result of `fftn()` function.
10659
10660    Raises:
10661        TypeError: If the `input` type is not Tensor.
10662        TypeError: If the `input` data type is not one of: complex64, complex128.
10663        TypeError: If the `s` or `dim` is not tuple(int).
10664        ValueError: If the length of `s` and `dim` are not the same.
10665        ValueError: If `input` dimension is less than 1.
10666        ValueError: If the value in `dim` is not in the range of "[ `-input_dim` , `input_dim-1` )".
10667        ValueError: If norm is none of "backward", "forward" or "ortho".
10668
10669    Supported Platforms:
10670        ``GPU`` ``CPU``
10671
10672    Examples:
10673        >>> from mindspore import Tensor, ops
10674        >>> input = Tensor([[[ 1.6243454 +0.j, -0.6117564 +0.j, -0.5281718 +0.j],
10675        ...                  [-1.0729686 +0.j, 0.86540765+0.j, -2.3015387 +0.j]]])
10676        >>> y = ops.fftn(input)
10677        >>> print(y)
10678        [[[-2.02468245+0.j          1.83940642-2.6702696j
10679           1.83940642+2.6702696j ]
10680         [ 2.99351685+0.j          2.54921257+2.81504238j
10681           2.54921257-2.81504238j]]]
10682    """
10683    fftninput = _check_fftwithsize_input(input, s, dim, norm, "FFTN")
10684    fftn_ = FFTWithSize(signal_ndim=fftninput.signal_ndim, inverse=False, real=False, norm=fftninput.norm)
10685    out = fftn_(fftninput.input)
10686    return _handle_fftwithsize_output(out, fftninput.input_dim, fftninput.batch_dims,
10687                                      fftninput.dim_permute, fftninput.out_sizes)
10688
10689
10690def ifft(input, n=None, dim=-1, norm=None):  # pylint: disable=redefined-outer-name
10691    r"""
10692    Calculates the inverse of `fft()`.
10693
10694    Args:
10695        input (Tensor): The input tensor.
10696        n (int, optional): Signal length.
10697            If given, the input will either be zero-padded or trimmed to this length before computing the IFFT.
10698            Default: ``None``.
10699        dim (int, optional): The dimension along which to take the one dimensional IFFT.
10700            Default: -1.
10701        norm (string, optional): Normalization mode. Three modes are defined as,
10702            ``"forward"``(normalize by :math `1/n`), ``"backward"``(no normalization),
10703            ``"ortho"``(normalize by :math:`1/\sqrt{n}`).
10704            Default: ``None`` that means ``"backward"``.
10705
10706    Returns:
10707        Tensor, The result of `ifft()` function.
10708
10709    Raises:
10710        TypeError: If the `input` type is not Tensor.
10711        TypeError: If the `input` data type is not one of: complex64, complex128.
10712        TypeError: If `n` or `dim` type is not int32.
10713        ValueError: If `input` dimension is less than 1.
10714        ValueError: If `n` is less than 1.
10715        ValueError: If `dim` is not in the range of "[ `-input_dim` , `input_dim-1` ]".
10716        ValueError: If norm is none of "backward", "forward" or "ortho".
10717
10718    Supported Platforms:
10719        ``GPU`` ``CPU``
10720
10721    Examples:
10722        >>> from mindspore import Tensor, ops
10723        >>> input = Tensor([ 1.6243454+0.j, -0.6117564+0.j, -0.5281718+0.j, -1.0729686+0.j])
10724        >>> y = ops.ifft(input)
10725        >>> print(y)
10726        [-0.14713785+0.j          0.5381293 +0.11530305j  0.69522465+0.j
10727         0.5381293 -0.11530305j]
10728    """
10729    if not isinstance(input, (Tensor, Tensor_)):
10730        raise TypeError("For 'IFFT', 'input' must be Tensor.")
10731
10732    input_dtype = dtype_(input)
10733    if not input_dtype in (mstype.complex64, mstype.complex128):
10734        raise TypeError("For 'IFFT', the dtype of 'input' must be complex64, complex128, "
10735                        f"but got '{input_dtype}'.")
10736    _check_attr_dtype("dim", dim, [int], "IFFT")
10737
10738    input_dim = input.ndim
10739    signal_ndim = 1
10740    batch_dims = input_dim - signal_ndim
10741    input_sizes = list(input.shape)
10742    dim = _maybe_wrap_dims_n([dim], input_dim)[0]
10743    n_opt = n
10744    if n is None:
10745        n = input.shape[dim]
10746    else:
10747        _check_attr_dtype("n", n, [int], "IFFT")
10748    if n < 1:
10749        raise ValueError("For 'IFFT', the value of 'n' must be greater than or equal to 1, "
10750                         f"but got '{n}'.")
10751    if n_opt is not None:
10752        input = _resize_input(input, input_dim, [dim], [n], input_sizes)
10753    out_sizes = input.shape
10754
10755    input, dim_permute = _permute_input(input, input_dim, [dim])
10756    input = _reshape_input(input, signal_ndim, batch_dims)
10757
10758    if norm is None:
10759        norm = "backward"
10760    else:
10761        _check_attr_dtype("norm", norm, [str], "IFFT")
10762
10763    fft_ = FFTWithSize(signal_ndim=1, inverse=True, real=False, norm=norm)
10764    out = fft_(input)
10765
10766    return _handle_fftwithsize_output(out, input_dim, batch_dims, dim_permute, out_sizes)
10767
10768
10769def ifft2(input, s=None, dim=(-2, -1), norm=None):  # pylint: disable=redefined-outer-name
10770    r"""
10771    Calculates the inverse of `fft2()`.
10772
10773    Args:
10774        input (Tensor): The input tensor.
10775        s (Tuple[int], optional): Signal size in the transformed dimensions.
10776            If given, each dimension `dim[i]` will either be zero-padded or trimmed to the length `s[i]` before
10777            computing the FFT. If a length `-1` is specified, no padding is done in that dimension.
10778            Default: `s = [input.size(d) for d in dim]`
10779        dim (Tuple[int], optional): Dimensions to be transformed.
10780            Default: (-2, -1).
10781        norm (string, optional): Normalization mode. Three modes are defined as,
10782            ``"forward"``(normalize by :math `1/n`), ``"backward"``(no normalization),
10783            ``"ortho"``(normalize by :math:`1/\sqrt{n}`). Where :math `n = prod(s)` is the logical IFFT size.
10784            Default: ``None`` that means ``"backward"``.
10785
10786    Returns:
10787        Tensor, The result of `ifft2()` function.
10788
10789    Raises:
10790        TypeError: If the `input` type is not Tensor.
10791        TypeError: If the `input` data type is not one of: complex64, complex128.
10792        TypeError: If the `s` or `dim` is not tuple(int).
10793        ValueError: If the length of `s` and `dim` are not the same.
10794        ValueError: If `input` dimension is less than 2.
10795        ValueError: If the value in `dim` is not in the range of "[ `-input_dim` , `input_dim-1` )".
10796        ValueError: If norm is none of "backward", "forward" or "ortho".
10797
10798    Supported Platforms:
10799        ``GPU`` ``CPU``
10800
10801    Examples:
10802        >>> from mindspore import Tensor, ops
10803        >>> input = Tensor([[ 1.6243454+0.j, -0.6117564+0.j], [-0.5281718+0.j, -1.0729686+0.j]])
10804        >>> y = ops.ifft2(input)
10805        >>> print(y)
10806        [[-0.14713785+0.j  0.69522465+0.j]
10807        [ 0.65343235+0.j  0.42282625+0.j]]
10808    """
10809    return ifftn(input, s, dim, norm)
10810
10811
10812def ifftn(input, s=None, dim=None, norm=None):  # pylint: disable=redefined-outer-name
10813    r"""
10814    Calculates the inverse of `fftn()`.
10815
10816    Args:
10817        input (Tensor): The input tensor.
10818        s (Tuple[int], optional): Signal size in the transformed dimensions.
10819            If given, each dimension `dim[i]` will either be zero-padded or trimmed to the length `s[i]` before
10820            computing the FFT. If a length `-1` is specified, no padding is done in that dimension.
10821            Default: `s = [input.size(d) for d in dim]`
10822        dim (Tuple[int], optional): Dimensions to be transformed.
10823            Default: all dimensions, or the last `len(s)` dimensions if `s` is given.
10824        norm (string, optional): Normalization mode. Three modes are defined as,
10825            ``"forward"``(normalize by :math `1/n`), ``"backward"``(no normalization),
10826            ``"ortho"``(normalize by :math:`1/\sqrt{n}`). Where :math `n = prod(s)` is the logical IFFT size.
10827            Default: ``None`` that means ``"backward"``.
10828
10829    Returns:
10830        Tensor, The result of `ifftn()` function.
10831
10832    Raises:
10833        TypeError: If the `input` type is not Tensor.
10834        TypeError: If the `input` data type is not one of: complex64, complex128.
10835        TypeError: If the `s` or `dim` is not tuple(int).
10836        ValueError: If the length of `s` and `dim` are not the same.
10837        ValueError: If `input` dimension is less than 1.
10838        ValueError: If the value in `dim` is not in the range of "[ `-input_dim` , `input_dim-1` )".
10839        ValueError: If norm is none of "backward", "forward" or "ortho".
10840
10841    Supported Platforms:
10842        ``GPU`` ``CPU``
10843
10844    Examples:
10845        >>> from mindspore import Tensor, ops
10846        >>> input = Tensor([[[ 1.6243454 +0.j, -0.6117564 +0.j, -0.5281718 +0.j],
10847        ...                  [-1.0729686 +0.j, 0.86540765+0.j, -2.3015387 +0.j]]])
10848        >>> y = ops.ifftn(input)
10849        >>> print(y)
10850        [[[-0.33744708+0.j          0.30656774+0.44504493j
10851           0.30656774-0.44504493j]
10852         [ 0.49891948+0.j          0.42486876-0.46917373j
10853           0.42486876+0.46917373j]]]
10854    """
10855    ifftninput = _check_fftwithsize_input(input, s, dim, norm, "IFFTN")
10856    ifftn_ = FFTWithSize(signal_ndim=ifftninput.signal_ndim, inverse=True, real=False, norm=ifftninput.norm)
10857    out = ifftn_(ifftninput.input)
10858    return _handle_fftwithsize_output(out, ifftninput.input_dim, ifftninput.batch_dims,
10859                                      ifftninput.dim_permute, ifftninput.out_sizes)
10860
10861
10862@_primexpr
10863def _check_validate_axis(axis, name):
10864    def _check(axis):
10865        if isinstance(axis, (tuple, list)):
10866            for idx, item in enumerate(axis):
10867                validator.check_value_type("axis[%d]" % idx, item, [int], name)
10868
10869    _check(axis)
10870    axis = validator.check_value_type('axis', axis, [int, tuple, list], name)
10871    return axis
10872
10873
10874@constexpr
10875def _check_validate_keepdims(keep_dims, name):
10876    keep_dims = validator.check_value_type('keep_dims', keep_dims, [bool], name)
10877    return keep_dims
10878
10879
10880def count_nonzero(x, axis=(), keep_dims=False, dtype=mstype.int32):
10881    r"""
10882    Count number of nonzero elements across axis of input tensor.
10883
10884    Args:
10885        x (Tensor): Input data is used to count non-zero numbers. With shape
10886            :math:`(*)` where :math:`*` means, any number of additional dimensions.
10887        axis (Union[int, tuple(int), list(int)], optional): The dimensions to reduce.
10888            Default: ``()`` , reduce all dimensions.
10889        keep_dims (bool, optional): Whether to maintain dimensions specified by `axis`.
10890            If true, keep these reduced dimensions and the length is 1.
10891            If false, don't keep these dimensions. Default: ``False`` .
10892        dtype (Union[Number, mindspore.bool\_], optional): The data type of the output tensor.
10893            Default: ``mstype.int32`` .
10894
10895    Returns:
10896          Tensor, number of nonzero element across axis specified by `axis`.
10897          The data type is specified by `dtype`.
10898
10899    Raises:
10900        TypeError: If `axis` is not int, tuple or list.
10901        ValueError: If any value in `axis` is not in range [-x.ndim, x.ndim).
10902
10903    Supported Platforms:
10904        ``Ascend`` ``GPU`` ``CPU``
10905
10906    Examples:
10907        >>> from mindspore import Tensor, ops
10908        >>> import numpy as np
10909        >>> import mindspore
10910        >>> # case 1: each value specified.
10911        >>> x = Tensor(np.array([[0, 1, 0], [1, 1, 0]]).astype(np.float32))
10912        >>> nonzero_num = ops.count_nonzero(x=x, axis=[0, 1], keep_dims=True, dtype=mindspore.int32)
10913        >>> print(nonzero_num)
10914        [[3]]
10915        >>> # case 2: all value is default.
10916        >>> nonzero_num = ops.count_nonzero(x=x)
10917        >>> print(nonzero_num)
10918        3
10919        >>> # case 3: axis value was specified 0.
10920        >>> nonzero_num = ops.count_nonzero(x=x, axis=[0,])
10921        >>> print(nonzero_num)
10922        [1 2 0]
10923        >>> # case 4: axis value was specified 1.
10924        >>> nonzero_num = ops.count_nonzero(x=x, axis=[1,])
10925        >>> print(nonzero_num)
10926        [1 2]
10927        >>> # case 5: keep_dims value was specified.
10928        >>> nonzero_num = ops.count_nonzero(x=x,  keep_dims=True)
10929        >>> print(nonzero_num)
10930        [[3]]
10931        >>> # case 6: keep_dims and axis value was specified.
10932        >>> nonzero_num = ops.count_nonzero(x=x, axis=[0,], keep_dims=True)
10933        >>> print(nonzero_num)
10934        [[1 2 0]]
10935    """
10936
10937    const_utils.check_type_valid(dtype_(x), mstype.number_type, 'input x')
10938    axis = _check_validate_axis(axis, "count_nonzero")
10939    keep_dims = _check_validate_keepdims(keep_dims, "count_nonzero")
10940    const_utils.check_type_valid(dtype, mstype.number_type + (mstype.bool_,), 'dtype')
10941
10942    reduce_sum = _get_cache_prim(P.ReduceSum)(keep_dims)
10943
10944    tensor_0 = ops.zeros(x.shape, x.dtype)
10945    nonzero_bool = not_equal(x, tensor_0)
10946    # ReduceSum only support float16 or float32 tensor.
10947    nonzero_val = cast_(nonzero_bool, mstype.float32)
10948    nonzero_num = cast_(reduce_sum(nonzero_val, axis), dtype)
10949
10950    return nonzero_num
10951
10952
10953@_primexpr
10954def _int_to_tuple_conv(axes):
10955    """
10956    Converts ints to tuples in input axes, expected by most validation checks.
10957    """
10958    for x in [0, 1]:
10959        if isinstance(axes[x], int):
10960            axes[x] = (axes[x],)
10961    return axes
10962
10963
10964@_primexpr
10965def _check_axes(axes, prim_name=None):
10966    """
10967    Check for validity and type of axes passed to function.
10968    """
10969    msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
10970    validator.check_value_type('axes', axes, [int, tuple, list], "tensor dot")
10971    if not isinstance(axes, int):
10972        axes = list(axes)  # to avoid immutability issues
10973        if len(axes) != 2:
10974            raise ValueError(f"{msg_prefix} dimension of 'axes' must be 2, but got 'axes': {axes}.")
10975        axes = _int_to_tuple_conv(axes)  # convert before length checks
10976        if len(axes[0]) != len(axes[1]):
10977            raise ValueError(f"{msg_prefix} first and second dim of 'axes' have to be the same size/length, "
10978                             f"but got 'axes': {axes}.")
10979        if len(axes[0]) != len(set(axes[0])) or len(axes[1]) != len(set(axes[1])):
10980            raise ValueError(f"{msg_prefix} 'axes' cannot have duplicating values, but got {axes}.")
10981    return axes
10982
10983
10984@constexpr
10985def _typecheck_input(x1_type, x2_type, prim_name=None):
10986    """
10987    Check input tensor types to be valid and confirm they are the same type.
10988    """
10989    msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
10990    const_utils.check_type_valid(x1_type, [mstype.float32, mstype.float16], 'x1')
10991    const_utils.check_type_valid(x2_type, [mstype.float32, mstype.float16], 'x2')
10992    if x1_type != x2_type:
10993        raise TypeError(f"{msg_prefix} inputs must be the same type, but got x1_type: {x1_type} "
10994                        f"and x2_type: {x2_type}.")
10995
10996
10997@_primexpr
10998def _axes_int_check(x1_shape, x2_shape, axes, prim_name=None):
10999    """
11000    Convert from single int axes to 2d tuple if required
11001    """
11002    msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
11003
11004    def _check_lt_zero(axes):
11005        if axes < 0:
11006            raise ValueError(f"{msg_prefix} 'axes' must be at least 0, but got {axes}.")
11007
11008    def _check_len(axes, x1_shape, x2_shape):
11009        if axes > len(x1_shape) or axes > len(x2_shape):
11010            raise ValueError(f"{msg_prefix} 'axes' cannot be greater than the length of 'x1_shape' and 'x2_shape', "
11011                             f"but got 'axes': {axes}, 'x1_shape': {x1_shape}, 'x2_shape': {x2_shape}.")
11012
11013    if isinstance(axes, int):
11014        _check_lt_zero(axes)
11015        if axes == 0:
11016            # outer product, no input validation required
11017            return [], []
11018        _check_len(axes, x1_shape, x2_shape)
11019        x1_ind = tuple(range(len(x1_shape))[-1 * axes:])
11020        x2_ind = tuple(range(len(x2_shape))[:axes])
11021        axes = tuple((x1_ind, x2_ind))
11022        axes = _int_to_tuple_conv(axes)
11023    return axes
11024
11025
11026@_primexpr
11027def _validate_axes(x1_shape, x2_shape, axes, prim_name=None):
11028    """
11029    Checks for axes having the correct length according to input, for any value in axis
11030    being out of range with given shape and also checking for compatible axes values
11031    with given inputs.
11032    """
11033    msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
11034
11035    def _check_len(axes_len, shape_dim_len, x_axes):
11036        if axes_len > shape_dim_len:
11037            raise ValueError(f"{msg_prefix} length of element {x_axes} in 'axes' must be less than or equal to "
11038                             f"{shape_dim_len}, but got {axes_len}.")
11039
11040    def _check_axes_value(x_axes, min_val, max_val):
11041        for _, x_value in enumerate(x_axes):
11042            if x_value > max_val or x_value < min_val:
11043                raise ValueError(f"{msg_prefix} value in 'axes' must be in range: [{min_val}, {max_val}], "
11044                                 f"but got {x_value}.")
11045
11046    shapes = [x1_shape, x2_shape]
11047
11048    # axis length check
11049    for ix_input, x_axes in enumerate(axes):
11050        axes_len = len(x_axes)
11051        shape_dim_len = len(shapes[ix_input])
11052        _check_len(axes_len, shape_dim_len, x_axes)
11053
11054    # axis values range check
11055    for ix_input, x_axes in enumerate(axes):
11056        comp_shape = shapes[ix_input]
11057        max_val = len(comp_shape) - 1
11058        min_val = -1 * len(comp_shape)
11059        _check_axes_value(x_axes, min_val, max_val)
11060
11061    # check axis value with input shape - both ways for axis valid
11062    invalid_a = False
11063    invalid_b = False
11064    for i in range(len(axes[0])):  # sizes already validated
11065        if x1_shape[axes[0][i]] != x2_shape[axes[1][i]]:
11066            invalid_a = True
11067        if x1_shape[axes[0][i]] != x2_shape[axes[1][len(axes[0]) - 1 - i]]:
11068            invalid_b = True
11069
11070    def _check(invalid_a, invalid_b, x1_shape, x2_shape, axes):
11071        if invalid_a and invalid_b:
11072            raise ValueError(f"{msg_prefix} 'i' should exist such that 'x1_shape[axes[0][i]]' is equal to "
11073                             f"'x2_shape[axes[1][i]]' or 'x2_shape[axes[1][len(axes[0])-1-i]]', but got "
11074                             f"'x1_shape': {x1_shape}, 'x2_shape': {x2_shape}, 'axes': {axes}.")
11075
11076    _check(invalid_a, invalid_b, x1_shape, x2_shape, axes)
11077
11078
11079@_primexpr
11080def _calc_new_shape(shape, axes, position=0):
11081    """
11082    Calculate transpose and reshape parameters for input transformations,
11083    'position' refers to whether tensor is first or second in the op.
11084    """
11085    contraction_axes = tuple(i if i >= 0 else i + len(shape) for i in axes[position])
11086    prod_contraction = 1
11087    for i in contraction_axes:
11088        prod_contraction *= shape[i]
11089    free_axes = tuple(i for i in range(len(shape)) if i not in contraction_axes)
11090    free_dims = tuple(shape[i] if shape[i] is not None else -1 for i in free_axes)
11091    prod_free = 1
11092    for free_dim in free_dims:
11093        prod_free *= free_dim
11094
11095    transpose_perm = contraction_axes + free_axes if position else free_axes + contraction_axes
11096    new_shape = (prod_contraction, prod_free) if position else (prod_free, prod_contraction)
11097    return new_shape, transpose_perm, free_dims
11098
11099
11100def tensor_dot(x1, x2, axes):
11101    """
11102    Computation of Tensor contraction on arbitrary axes between tensors `a` and `b`.
11103
11104    Contraction allows for the summation of products of elements of `a` and `b` on specified axes.
11105    The same number of axes must be specified for both x1 and x2, and values must be within range
11106    of number of dims of both `a` and `b`.
11107
11108    Selected dims in both inputs must also match.
11109
11110    axes = 0 leads to outer product.
11111    axes = 1 leads to normal matrix multiplication when inputs both 2D.
11112    axes = 1 is the same as axes = ((1,),(0,)) where both `a` and `b` are 2D.
11113    axes = 2 is the same as axes = ((1,2),(0,1)) where both `a` and `b` are 3D.
11114
11115    Args:
11116        x1 (Tensor): First tensor in tensor_dot with datatype float16 or float32
11117        x2 (Tensor): Second tensor in tensor_dot with datatype float16 or float32
11118        axes (Union[int, tuple(int), tuple(tuple(int)), list(list(int))]): Single value or
11119            tuple/list of length 2 with dimensions specified for `a` and `b` each. If single value `N` passed,
11120            automatically picks up last N dims from `a` input shape and first N dims from `b` input shape in order
11121            as axes for each respectively.
11122
11123    Returns:
11124        Tensor, the shape of the output tensor is :math:`(N + M)`, where :math:`N` and :math:`M` are the free axes not
11125        contracted in both inputs.
11126
11127    Raises:
11128        TypeError: If `x1` or `x2` is not a Tensor.
11129        TypeError: If `axes` is not one of the following: int, tuple, list.
11130
11131    Supported Platforms:
11132        ``Ascend`` ``GPU`` ``CPU``
11133
11134    Examples:
11135        >>> from mindspore import Tensor, ops
11136        >>> import mindspore
11137        >>> import numpy as np
11138        >>> input_x1 = Tensor(np.ones(shape=[1, 2, 3]), mindspore.float32)
11139        >>> input_x2 = Tensor(np.ones(shape=[3, 1, 2]), mindspore.float32)
11140        >>> output = ops.tensor_dot(input_x1, input_x2, ((0,1),(1,2)))
11141        >>> print(output)
11142        [[2. 2. 2]
11143         [2. 2. 2]
11144         [2. 2. 2]]
11145    """
11146    matmul_op = _get_cache_prim(P.MatMul)(False, False)
11147    # input validity checks
11148    x1_shape = shape_(x1)
11149    x2_shape = shape_(x2)
11150    axes = _check_axes(axes, 'tensor_dot')
11151    # input compatibility check & axes format update
11152    axes = _axes_int_check(x1_shape, x2_shape, axes, 'tensor_dot')
11153    _validate_axes(x1_shape, x2_shape, axes, 'tensor_dot')
11154    x1_reshape_fwd, x1_transpose_fwd, x1_ret = _calc_new_shape(x1_shape, axes, 0)
11155    x2_reshape_fwd, x2_transpose_fwd, x2_ret = _calc_new_shape(x2_shape, axes, 1)
11156    output_shape = x1_ret + x2_ret  # combine free axes from both inputs
11157    # run tensor_dot op
11158    x1_transposed = transpose_(x1, x1_transpose_fwd)
11159    x2_transposed = transpose_(x2, x2_transpose_fwd)
11160    x1_reshaped = reshape_(x1_transposed, x1_reshape_fwd)
11161    x2_reshaped = reshape_(x2_transposed, x2_reshape_fwd)
11162    mul_result = matmul_op(x1_reshaped, x2_reshaped)
11163    final_result = reshape_(mul_result, output_shape)
11164    return final_result
11165
11166
11167def vecdot(x, y, *, axis=-1):
11168    r"""
11169    Calculates the dot product of two batches of vectors across the specified dimension.
11170
11171    The formula of calculation is as follows.
11172    :math:`\bar{x_{i}}` represents the conjugate for complex vectors,
11173    and :math:`\bar{x_{i}}` is the raw value for real vectors.
11174
11175    .. math::
11176
11177        \sum_{i=1}^{n} \bar{x_{i}}{y_{i}}
11178
11179    .. warning::
11180        This is an experimental API that is subject to change or deletion.
11181
11182    Args:
11183        x (Tensor): First batch of vectors. The shape of Tensor is :math:`(*,N)`
11184            where :math:`*` means, any number of additional dimensions. Supporting broadcasting.
11185            The dtype of Tensor should be one of the following types: float, double, int, complex64 and complex128.
11186        y (Tensor): Second batch of vectors. The shape of Tensor is :math:`(*,N)`
11187            where :math:`*` means, any number of additional dimensions. Supporting broadcasting.
11188            The dtype of Tensor should be one of the following types: float, double, int, complex64 and complex128.
11189        axis (int): Dimension across which to calculate the dot product. Default: ``-1`` .
11190
11191    Returns:
11192        Tensor, the shape is almost same as the shape of Tensor after broadcasting,
11193        while the specified dimension `axis` in shape has been removed.
11194
11195    Raises:
11196        TypeError: If `x` or `y` is not a Tensor.
11197        TypeError: If type of `axis` is not int.
11198        ValueError: If `axis` is out of range.
11199
11200    Supported Platforms:
11201        ``Ascend`` ``GPU`` ``CPU``
11202
11203    .. note::
11204        Currently, complex numbers are not supported on GPU.
11205
11206    Examples:
11207        >>> import mindspore as ms
11208        >>> from mindspore import ops
11209        >>> x = ms.Tensor([[1, 3], [5, 7], [9, 8]], dtype=ms.float32)
11210        >>> y = ms.Tensor([[4, 5], [6, 7], [3, 2]], dtype=ms.float32)
11211        >>> output = ops.vecdot(x, y, axis=-1)
11212        >>> print(output)
11213        [19. 79. 43.]
11214    """
11215    if (not isinstance(x, Tensor)) or (not isinstance(y, Tensor)):
11216        raise TypeError("For vecdot, x or y must be Tensor.")
11217    if not isinstance(axis, int):
11218        raise TypeError(f"For vecdot, the dim should be int, but got {type(axis)}.")
11219    ndim = x.ndim if x.ndim > y.ndim else y.ndim
11220    if (axis < -ndim) or (axis >= ndim):
11221        raise ValueError(f"For vecdot, the dim is out of range.")
11222    if x.dtype in mstype.complex_type:
11223        x = x.conj()
11224    result = x * y
11225    result = result.sum(axis=axis)
11226    return result
11227
11228
11229@_primexpr
11230def _check_invalid_input(x1_shape, x2_shape, prim_name=None):
11231    msg_prefix = f"For \\\'{prim_name}\\\', the" if prim_name else "The"
11232    if len(x1_shape) < 2 or len(x2_shape) < 2:
11233        raise ValueError(f"{msg_prefix} inputs x1, x2 should have \\\'dimension >= 2\\\',"
11234                         f"but got \\\'len(x1_shape)\\\': ({len(x1_shape)})"
11235                         f" and \\\'len(x2_shape)\\\': ({len(x2_shape)}).")
11236
11237
11238@constexpr
11239def _typecheck_input_dot(x1_type, x2_type, prim_name=None):
11240    """
11241    Check input tensor types to be valid and confirm they are the same type for dot and batch dot ops.
11242    """
11243    msg_prefix = f"For \\\'{prim_name}\\\', the" if prim_name else "The"
11244    const_utils.check_type_valid(x1_type, [mstype.float16, mstype.float32], 'x1')
11245    const_utils.check_type_valid(x2_type, [mstype.float16, mstype.float32], 'x2')
11246    if x1_type != x2_type:
11247        raise TypeError(f"{msg_prefix} inputs must be the same type, but got "
11248                        f"x1_type: {x1_type} and x2_type: {x2_type}.")
11249
11250
11251@_primexpr
11252def _get_transpose_shape(x2_shape):
11253    x2_shape_range = tuple(range(len(x2_shape)))
11254    x2_shape_transpose = x2_shape_range[-2:-1] + x2_shape_range[:-2] + x2_shape_range[-1:]
11255    return x2_shape_transpose
11256
11257
11258def dot(input, other):
11259    """
11260    Computation a dot product between samples in two tensors.
11261
11262    Args:
11263        input (Tensor): First tensor in Dot op with datatype float16 or float32.
11264            The rank must be greater than or equal to 2.
11265        other (Tensor): Second tensor in Dot op with datatype float16 or float32.
11266            The rank must be greater than or equal to 2.
11267
11268    Returns:
11269        Tensor, dot product of input and other.
11270
11271    Raises:
11272        TypeError: If type of input and other are not the same.
11273        TypeError: If dtype of input or other is not float16 or float32.
11274        ValueError: If rank of input or other less than 2.
11275
11276    Supported Platforms:
11277        ``Ascend`` ``GPU`` ``CPU``
11278
11279    Examples:
11280        >>> import numpy as np
11281        >>> import mindspore
11282        >>> from mindspore import Tensor, ops
11283        >>> input = Tensor(np.ones(shape=[2, 3]), mindspore.float32)
11284        >>> other = Tensor(np.ones(shape=[1, 3, 2]), mindspore.float32)
11285        >>> output = ops.dot(input, other)
11286        >>> print(output)
11287        [[[3. 3.]]
11288         [[3. 3.]]]
11289        >>> print(output.shape)
11290        (2, 1, 2)
11291        >>> input = Tensor(np.ones(shape=[1, 2, 3]), mindspore.float32)
11292        >>> other = Tensor(np.ones(shape=[1, 3, 2]), mindspore.float32)
11293        >>> output = ops.dot(input, other)
11294        >>> print(output)
11295        [[[[3. 3.]]
11296          [[3. 3.]]]]
11297        >>> print(output.shape)
11298        (1, 2, 1, 2)
11299        >>> input = Tensor(np.ones(shape=[1, 2, 3]), mindspore.float32)
11300        >>> other = Tensor(np.ones(shape=[2, 3, 2]), mindspore.float32)
11301        >>> output = ops.dot(input, other)
11302        >>> print(output)
11303        [[[[3. 3.]
11304           [3. 3.]]
11305          [[3. 3.]
11306           [3. 3.]]]]
11307        >>> print(output.shape)
11308        (1, 2, 2, 2)
11309        >>> input = Tensor(np.ones(shape=[3, 2, 3]), mindspore.float32)
11310        >>> other = Tensor(np.ones(shape=[2, 1, 3, 2]), mindspore.float32)
11311        >>> output = ops.dot(input, other)
11312        >>> print(output)
11313        [[[[[3. 3.]]
11314           [[3. 3.]]]
11315          [[[3. 3.]]
11316           [[3. 3.]]]]
11317         [[[[3. 3.]]
11318           [[3. 3.]]]
11319          [[[3. 3.]]
11320           [[3. 3.]]]]
11321         [[[[3. 3.]]
11322           [[3. 3.]]]
11323          [[[3. 3.]]
11324           [[3. 3.]]]]]
11325        >>> print(output.shape)
11326        (3, 2, 2, 1, 2)
11327    """
11328    matmul_op = _get_cache_prim(P.MatMul)(False, False)
11329    input_shape = shape_(input)
11330    other_shape = shape_(other)
11331    input_type = dtype_(input)
11332    other_type = dtype_(other)
11333    _typecheck_input_dot(input_type, other_type, 'dot')
11334    _check_invalid_input(input_shape, other_shape, 'dot')
11335
11336    if len(input_shape) > 2 or len(other_shape) > 2:
11337        other_shape_transpose = _get_transpose_shape(other_shape)
11338        other_transpose = transpose_(other, other_shape_transpose)
11339        input_reshape = reshape_(input, (-1, input_shape[-1]))
11340        other_reshape = reshape_(other_transpose, (other_shape[-2], -1))
11341        mul_result = matmul_op(input_reshape, other_reshape)
11342        reshape_shape = input_shape[:-1] + other_shape[:-2] + other_shape[-1:]
11343        reshape_shape = (-1,) + reshape_shape[1:]
11344        return reshape_(mul_result, reshape_shape)
11345    return matmul_op(input, other)
11346
11347
11348@_primexpr
11349def _get_batch_size(x1_shape, x2_shape, prim_name=None):
11350    """
11351    Get batch sizes from two inputs
11352    """
11353
11354    def _check():
11355        msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
11356        if len(x1_shape) < 2 or len(x2_shape) < 2:
11357            raise ValueError(f"{msg_prefix} inputs x1, x2 should have 'dimension >= 2', "
11358                             f"but got 'len(x1_shape)': ({len(x1_shape)}) and 'len(x2_shape)': ({len(x2_shape)}).")
11359
11360    _check()
11361    return x1_shape[0], x2_shape[0]
11362
11363
11364@constexpr
11365def _typecheck_input_batch_dot(x1_type, x2_type, prim_name=None):
11366    """
11367    Check input tensor types to be valid and confirm they are the same type for batch dot ops.
11368    """
11369    msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
11370    const_utils.check_type_valid(x1_type, [mstype.float32], 'x1')
11371    const_utils.check_type_valid(x2_type, [mstype.float32], 'x2')
11372    if x1_type != x2_type:
11373        raise TypeError(f"{msg_prefix} inputs must be the same type, but got x1_type: {x1_type} and "
11374                        f"x2_type: {x2_type}.")
11375
11376
11377@_primexpr
11378def _check_axes_for_batch_dot(x1_shape, x2_shape, axes, prim_name=None):
11379    """
11380    Check whether axes are valid and cast axes from tuple to list
11381    """
11382    msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
11383
11384    def _check_1(axes):
11385        if 0 in axes:
11386            raise ValueError(f"{msg_prefix} 'axes' cannot contain 0, but got axes: {axes}.")
11387        if len(axes) != 2:
11388            raise ValueError(f"{msg_prefix} length of 'axes' must be equal to 2, but got {len(axes)}.")
11389
11390    def _check_2(axes, x1_shape, x2_shape):
11391        if axes[0] > len(x1_shape) or axes[1] > len(x2_shape):
11392            raise ValueError(f"{msg_prefix} axes[0] must be less than or equal to len(x1_shape), "
11393                             f"and axes[1] must be less than or equal to len(x2_shape)."
11394                             f"But got 'axes': {axes}, 'x1_shape': {x1_shape}, 'x2_shape': {x2_shape}.")
11395
11396    def _check_3(axes, x1_shape, x2_shape):
11397        if axes == 0:
11398            raise ValueError(f"{msg_prefix} 'axes' should not be equal to 0, but got {axes}.")
11399
11400        if axes > len(x1_shape) or axes > len(x2_shape):
11401            raise ValueError(f"{msg_prefix} 'axes' cannot be greater than the length of 'x1_shape' and 'x2_shape', "
11402                             f"but got 'axes': {axes}, 'x1_shape': {x1_shape}, 'x2_shape': {x2_shape}.")
11403
11404    if axes is None:
11405        if len(x2_shape) == 2:
11406            axes = [len(x1_shape) - 1, len(x2_shape) - 1]
11407        else:
11408            axes = [len(x1_shape) - 1, len(x2_shape) - 2]
11409
11410    if isinstance(axes, (list, tuple)):
11411        _check_1(axes)
11412        if isinstance(axes, tuple):
11413            axes = list(axes)
11414        validator.check_value_type('axes[0]', axes[0], [int], 'batch_dot')
11415        validator.check_value_type('axes[1]', axes[1], [int], 'batch_dot')
11416        # Reverse if axis < 0
11417        if axes[0] < 0:
11418            axes[0] += len(x1_shape)
11419        if axes[1] < 0:
11420            axes[1] += len(x2_shape)
11421        validator.check_non_negative_int(axes[0], 'reversed axes[0]', 'batch_dot')
11422        validator.check_non_negative_int(axes[1], 'reversed axes[1]', 'batch_dot')
11423        _check_2(axes, x1_shape, x2_shape)
11424    elif isinstance(axes, int):
11425        _check_3(axes, x1_shape, x2_shape)
11426        if axes < 0:
11427            axes = [axes + len(x1_shape), axes + len(x2_shape)]
11428            validator.check_non_negative_int(axes[0], 'reversed axes', 'batch_dot')
11429        else:
11430            axes = [axes, axes]
11431    else:
11432        raise ValueError(f"{msg_prefix} type of 'axes' must be one of those: int, tuple(int), list(int), "
11433                         f"but got {type(axes).__name__}.")
11434    return axes
11435
11436
11437@_primexpr
11438def _calc_new_shape_batchdot(shape, axes, position=0):
11439    """
11440    Calculate transpose and reshape parameters for input transformations,
11441    'position' refers to whether tensor is first or second in the op.
11442    """
11443    axis = axes[position]
11444    contraction_axes = tuple([axis])
11445    prod_contraction = 1
11446    for i in contraction_axes:
11447        prod_contraction *= shape[i]
11448    free_axes = tuple(i for i in range(1, len(shape)) if i not in contraction_axes)
11449    free_dims = tuple(shape[i] for i in free_axes)
11450    prod_free = 1
11451    for free_dim in free_dims:
11452        prod_free *= free_dim
11453
11454    transpose_perm = contraction_axes + free_axes if position else free_axes + contraction_axes
11455    transpose_perm = tuple([0]) + transpose_perm
11456    new_shape = (prod_contraction, prod_free) if position else (prod_free, prod_contraction)
11457    new_shape = tuple([shape[0]]) + new_shape
11458    return new_shape, transpose_perm, free_dims
11459
11460
11461@_primexpr
11462def _check_batch_size(x1_batch_size, x2_batch_size, prim_name=None):
11463    """
11464    Check whether batch size of two inputs are the same
11465    """
11466    msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
11467    if x1_batch_size != x2_batch_size:
11468        raise ValueError(f"{msg_prefix} inputs 'x1', 'x2' should have the same batch sizes, but got "
11469                         f"'x1_batch_size': {x1_batch_size} and 'x2_batch_size': {x2_batch_size}.")
11470
11471
11472@_primexpr
11473def _get_output_shape(batch_size, x1_ret, x2_ret):
11474    """
11475    Compute output shape for batch dot
11476    """
11477    output_shape = tuple([batch_size]) + x1_ret + x2_ret
11478    return output_shape
11479
11480
11481def batch_dot(x1, x2, axes=None):
11482    """
11483    Computation of batch dot product between samples in two tensors containing batch dims, i.e. `x1` or `x2` 's
11484    first dimension is batch size.
11485
11486    .. math::
11487        output = x1[batch, :] * x2[batch, :]
11488
11489    Args:
11490        x1 (Tensor): First tensor in Batch Dot op with datatype float32 and the rank of `x1` must be greater
11491          than or equal to 2.
11492        x2 (Tensor): Second tensor in Batch Dot op with datatype float32. The datatype of `x2` should
11493          be same as `x1` and the rank of `x2` must be greater than or equal to 2.
11494        axes (Union[int, tuple(int), list(int)]): Single value or tuple/list of length 2 with dimensions
11495          specified for `a` and `b` each. If single value `N` passed, automatically picks up last N dims from
11496          `a` input shape and last N dimensions from `b` input shape in order as axes for each respectively.
11497          Default: ``None`` .
11498
11499    Returns:
11500        Tensor, batch dot product of `x1` and `x2`. For example, the Shape of output
11501        for input `x1` shapes :math:`(batch, d1, axes, d2)` and
11502        `x2` shapes :math:`(batch, d3, axes, d4)` is :math:`(batch, d1, d2, d3, d4)`,
11503        where d1 and d2 means any number.
11504
11505    Raises:
11506        TypeError: If type of x1 and x2 are not the same.
11507        TypeError: If dtype of x1 or x2 is not float32.
11508        ValueError: If rank of x1 or x2 less than 2.
11509        ValueError: If batch dim used in axes.
11510        ValueError: If len(axes) less than 2.
11511        ValueError: If axes is not one of those: None, int, (int, int).
11512        ValueError: If axes reversed from negative int is too low for dimensions of input arrays.
11513        ValueError: If axes value is too high for dimensions of input arrays.
11514        ValueError: If batch size of x1 and x2 are not the same.
11515
11516    Supported Platforms:
11517        ``Ascend`` ``GPU`` ``CPU``
11518
11519    Examples:
11520        >>> import mindspore
11521        >>> from mindspore import Tensor, ops
11522        >>> import numpy as np
11523        >>> x1 = Tensor(np.ones(shape=[2, 2, 3]), mindspore.float32)
11524        >>> x2 = Tensor(np.ones(shape=[2, 3, 2]), mindspore.float32)
11525        >>> axes = (-1, -2)
11526        >>> output = ops.batch_dot(x1, x2, axes)
11527        >>> print(output)
11528        [[[3. 3.]
11529          [3. 3.]]
11530         [[3. 3.]
11531          [3. 3.]]]
11532        >>> x1 = Tensor(np.ones(shape=[2, 2]), mindspore.float32)
11533        >>> x2 = Tensor(np.ones(shape=[2, 3, 2]), mindspore.float32)
11534        >>> axes = (1, 2)
11535        >>> output = ops.batch_dot(x1, x2, axes)
11536        >>> print(output)
11537        [[2. 2. 2.]
11538         [2. 2. 2.]]
11539        >>> print(output.shape)
11540        (2, 3)
11541        >>> x1 = Tensor(np.ones(shape=[6, 2, 3, 4]), mindspore.float32)
11542        >>> x2 = Tensor(np.ones(shape=[6, 5, 4, 8]), mindspore.float32)
11543        >>> output = ops.batch_dot(x1, x2)
11544        >>> print(output.shape)
11545        (6, 2, 3, 5, 8)
11546        >>> x1 = Tensor(np.ones(shape=[2, 2, 4]), mindspore.float32)
11547        >>> x2 = Tensor(np.ones(shape=[2, 5, 4, 5]), mindspore.float32)
11548        >>> output = ops.batch_dot(x1, x2)
11549        >>> print(output.shape)
11550        (2, 2, 5, 5)
11551
11552    """
11553    squeeze_one_op = _get_cache_prim(P.Squeeze)(1)
11554    squeeze_minus_one_op = _get_cache_prim(P.Squeeze)(-1)
11555    # input validity checks
11556    x1_shape = shape_(x1)
11557    x2_shape = shape_(x2)
11558    x1_dim_num = len(x1_shape)
11559    x2_dim_num = len(x2_shape)
11560    x1_type = dtype_(x1)
11561    x2_type = dtype_(x2)
11562
11563    x1_batch_size, x2_batch_size = _get_batch_size(x1_shape, x2_shape, 'batch_dot')
11564
11565    _typecheck_input_batch_dot(x1_type, x2_type, 'batch_dot')
11566    _check_batch_size(x1_batch_size, x2_batch_size, 'batch_dot')
11567    axes = _check_axes_for_batch_dot(x1_shape, x2_shape, axes, 'batch_dot')
11568
11569    if x1_dim_num == 2:
11570        x1 = F.expand_dims(x1, 1)
11571        axes[0] += 1
11572    if x2_dim_num == 2:
11573        x2 = F.expand_dims(x2, 2)
11574
11575    x1_shape = shape_(x1)
11576    x2_shape = shape_(x2)
11577
11578    x1_reshape_fwd, x1_transpose_fwd, x1_ret = _calc_new_shape_batchdot(x1_shape, axes, 0)
11579    x2_reshape_fwd, x2_transpose_fwd, x2_ret = _calc_new_shape_batchdot(x2_shape, axes, 1)
11580    output_shape = _get_output_shape(x1_batch_size, x1_ret, x2_ret)
11581
11582    x1_transposed = transpose_(x1, x1_transpose_fwd)
11583    x2_transposed = transpose_(x2, x2_transpose_fwd)
11584    x1_reshaped = reshape_(x1_transposed, x1_reshape_fwd)
11585    x2_reshaped = reshape_(x2_transposed, x2_reshape_fwd)
11586
11587    # Batch matmal op part
11588    mul_result = batch_matmul_(x1_reshaped, x2_reshaped)
11589
11590    final_result = reshape_(mul_result, output_shape)
11591
11592    # if the original dims are expanded, restore them from 3 to 2
11593    if x1_dim_num == 2:
11594        final_result = squeeze_one_op(final_result)
11595    elif x2_dim_num == 2:
11596        final_result = squeeze_minus_one_op(final_result)
11597
11598    return final_result
11599
11600
11601__all__ = [
11602    'addn',
11603    'absolute',
11604    'abs',
11605    'bucketize',
11606    'tensor_add',
11607    'add',
11608    'addbmm',
11609    'addcdiv',
11610    'addcmul',
11611    'angle',
11612    'argmin',
11613    'arccosh',
11614    'arcsin',
11615    'arctan',
11616    'arctan2',
11617    'bincount',
11618    'neg',
11619    'negative',
11620    'tensor_lt',
11621    'less',
11622    'lt',
11623    'logaddexp2',
11624    'tensor_le',
11625    'lcm',
11626    'le',
11627    'lerp',
11628    'norm',
11629    'vector_norm',
11630    'matrix_norm',
11631    'tensor_gt',
11632    'logaddexp',
11633    'mv',
11634    'addmm',
11635    'addmv',
11636    'adjoint',
11637    'outer',
11638    'gt',
11639    'tensor_ge',
11640    'ge',
11641    'addr',
11642    'tensor_sub',
11643    'sub',
11644    'subtract',
11645    'tensor_mul',
11646    'mul',
11647    'multiply',
11648    'nan_to_num',
11649    'nansum',
11650    'nanmean',
11651    'nanmedian',
11652    'digamma',
11653    'lgamma',
11654    'tensor_div',
11655    'div',
11656    'divide',
11657    'true_divide',
11658    'tensor_floordiv',
11659    'floor_div',
11660    'floor_divide',
11661    'floordiv',
11662    'float_power',
11663    'fmod',
11664    'xdivy',
11665    'tensor_pow',
11666    'pow',
11667    'pows',
11668    'renorm',
11669    'tensor_mod',
11670    'floor_mod',
11671    'floormod',
11672    'tensor_exp',
11673    'exp',
11674    'tensor_expm1',
11675    'expm1',
11676    'eq',
11677    'equal',
11678    'not_equal',
11679    'ne',
11680    'numel',
11681    'permute',
11682    'inplace_update',
11683    'inplace_add',
11684    'inplace_sub',
11685    'isfinite',
11686    'isnan',
11687    'isclose',
11688    'isreal',
11689    'isneginf',
11690    'isposinf',
11691    'is_complex',
11692    'log',
11693    'logdet',
11694    'log_matrix_determinant',
11695    'matrix_determinant',
11696    'det',
11697    'linspace',
11698    'logspace',
11699    'lu_solve',
11700    'matrix_solve',
11701    'std',
11702    'maximum',
11703    'minimum',
11704    'median',
11705    'positive',
11706    'floor',
11707    'logical_not',
11708    'logical_or',
11709    'logical_and',
11710    'logit',
11711    'gcd',
11712    'logcumsumexp',
11713    'logsumexp',
11714    'ldexp',
11715    'rsqrt',
11716    'reciprocal',
11717    'real',
11718    'sqrt',
11719    'square',
11720    't',
11721    'sin',
11722    'cos',
11723    'tan',
11724    'asin',
11725    'acos',
11726    'arccos',
11727    'atan',
11728    'sinc',
11729    'sinh',
11730    'cosh',
11731    'tanh',
11732    'tanhshrink',
11733    'asinh',
11734    'arcsinh',
11735    'acosh',
11736    'atanh',
11737    'arctanh',
11738    'atan2',
11739    'round',
11740    'bitwise_and',
11741    'bitwise_or',
11742    'bitwise_xor',
11743    'bitwise_left_shift',
11744    'bitwise_right_shift',
11745    'inv',
11746    'inverse',
11747    'invert',
11748    'erf',
11749    'erfc',
11750    'cdist',
11751    'ceil',
11752    'bernoulli',
11753    'heaviside',
11754    'hypot',
11755    'i0',
11756    'bessel_j0',
11757    'bessel_j1',
11758    'bessel_i0',
11759    'bessel_i0e',
11760    'bessel_k0',
11761    'bessel_k0e',
11762    'bessel_y0',
11763    'bessel_y1',
11764    'bessel_i1',
11765    'bessel_i1e',
11766    'bessel_k1',
11767    'bessel_k1e',
11768    'exp2',
11769    'deg2rad',
11770    'stft',
11771    'rad2deg',
11772    'truncate_div',
11773    'truncate_mod',
11774    'trunc',
11775    'gumbel_softmax',
11776    'kaiser_window',
11777    'matmul',
11778    'inner',
11779    'cummin',
11780    'cummax',
11781    'cumsum',
11782    'amin',
11783    'amax',
11784    'mean',
11785    'prod',
11786    'all',
11787    'any',
11788    'sparse_segment_mean',
11789    'block_diag',
11790    'atleast_1d',
11791    'dstack',
11792    'diff',
11793    'atleast_2d',
11794    'cartesian_prod',
11795    'atleast_3d',
11796    'view_as_real',
11797    'vstack',
11798    'vander',
11799    'row_stack',
11800    'var',
11801    'var_mean',
11802    'std_mean',
11803    'combinations',
11804    'dist',
11805    'copysign',
11806    'hann_window',
11807    'log2',
11808    'slogdet',
11809    'trace',
11810    'xlogy',
11811    'log10',
11812    'log1p',
11813    'approximate_equal',
11814    'frac',
11815    'kron',
11816    'rot90',
11817    'remainder',
11818    'sgn',
11819    'sign',
11820    'signbit',
11821    'accumulate_n',
11822    'iou',
11823    'baddbmm',
11824    'bmm',
11825    'trapz',
11826    'cholesky',
11827    'cholesky_inverse',
11828    'cholesky_solve',
11829    'conj',
11830    'cosine_similarity',
11831    'cov',
11832    'cross',
11833    'einsum',
11834    'erfinv',
11835    'less_equal',
11836    'cumprod',
11837    'greater',
11838    'greater_equal',
11839    'igamma',
11840    'igammac',
11841    'isinf',
11842    'logical_xor',
11843    'imag',
11844    'roll',
11845    'sum',
11846    'matrix_exp',
11847    'matrix_power',
11848    'orgqr',
11849    'ormqr',
11850    'diag_embed',
11851    'fmax',
11852    'fmin',
11853    'inplace_index_add',
11854    'lu_unpack',
11855    'nanquantile',
11856    'polar',
11857    'polygamma',
11858    'quantile',
11859    'tril_indices',
11860    'histc',
11861    'nextafter',
11862    'triu_indices',
11863    'zeta',
11864    'fft',
11865    'fft2',
11866    'fftn',
11867    'ifft',
11868    'ifft2',
11869    'ifftn',
11870    'count_nonzero',
11871    'tensor_dot',
11872    'vecdot',
11873    'dot',
11874    'batch_dot',
11875    'eps',
11876]
11877__all__.sort()
11878