• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2020-2024 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15
16"""Operators for math."""
17# pylint: disable=unused-import
18from __future__ import absolute_import
19from __future__ import division
20
21import numpy as np
22
23from mindspore import context
24from mindspore import log as logger
25from mindspore.ops import signature as sig
26from mindspore import _checkparam as validator
27from mindspore.common import dtype as mstype
28from mindspore.common.tensor import Tensor
29from mindspore.ops._utils import get_broadcast_shape
30from mindspore.ops.primitive import Primitive, PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register, _run_op
31from mindspore._c_expression import Tensor as Tensor_
32from ..auto_generate import (Add, Addcdiv, Addcmul, ReduceMean, ReduceSum, ReduceAll, ReduceAny,
33                             ReduceMax, ReduceMin, ReduceProd, Betainc, Neg, MatMul, BatchMatMul,
34                             Mul, Square, Rsqrt, Sqrt, Reciprocal, Pow, Exp,
35                             Logit, ReduceStd, Expm1, Log, Log1p, Erf, Erfc,
36                             Minimum, RealDiv, FloorDiv, Floor, FloorMod, Ceil,
37                             Acosh, Cosh, Asinh, Sinc, Sinh, Equal, NotEqual,
38                             Greater, GreaterEqual, Gcd, LogicalNot, LogicalAnd, LogicalOr,
39                             LogicalXor, Cos, ACos, Sin, Asin, Abs, Round, Atan, Atanh, Atan2,
40                             LinSpace, MatrixDeterminant, LogMatrixDeterminant, Erfinv, Conj,
41                             Real, Complex, Angle, MatrixExp, CholeskyInverse, Trace, Cholesky,
42                             FFTWithSize, NextAfter, NanToNum, Eig, Qr, Roll, Maximum, Div, DivMod, CumProd,
43                             CumSum, Less, LessEqual, AssignAdd, IsFinite, IsClose, TanhGrad)
44
45
46def _infer_shape_reduce(x, axis, keep_dims, prim_name):
47    """Common infer for reduce operator"""
48
49    def reduce_one_axis(one_axis):
50        validator.check_int_range(one_axis, -dim, dim, validator.INC_LEFT, 'axis', prim_name)
51        if one_axis < 0:
52            one_axis += dim
53        axis_reduce.add(one_axis)
54
55    validator.check_value_type('axis', axis, [int, tuple, list], prim_name)
56    dim = len(x)
57    axis_reduce = set()
58
59    if isinstance(axis, int):
60        reduce_one_axis(axis)
61    else:
62        if not axis:
63            if keep_dims:
64                return [1] * dim
65            return []
66        for index, one_axis in enumerate(axis):
67            validator.check_value_type('axis[%d]' % index, one_axis, [int], prim_name)
68            reduce_one_axis(one_axis)
69
70    out_shape = []
71    for i in range(dim):
72        if i in axis_reduce:
73            if keep_dims:
74                out_shape.append(1)
75        else:
76            out_shape.append(x[i])
77    return out_shape
78
79
80class _BinaryOp(PrimitiveWithInfer):
81    """
82    Define binary operators.
83    """
84
85    __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
86
87    @prim_attr_register
88    def __init__(self):
89        """Initialize _BinaryOp"""
90        self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
91
92    def infer_shape(self, x_shape, y_shape):
93        return get_broadcast_shape(x_shape, y_shape, self.name)
94
95
96class _MathBinaryOp(_BinaryOp):
97    """
98    Define math binary operators.
99    """
100
101    @staticmethod
102    def do_infer_dtype(x_dtype, y_dtype, valid_dtype=mstype.number_type, prim_name=None):
103        """Staticmethod of infer dtype for _MathBinaryOp."""
104        args_type = {"x": x_dtype, "y": y_dtype}
105        complex_types = [mstype.TensorType(mstype.complex64), mstype.TensorType(mstype.complex128)]
106        if x_dtype in complex_types or y_dtype in complex_types:
107            if (not isinstance(x_dtype, type(mstype.tensor_type))) or \
108               (not isinstance(y_dtype, type(mstype.tensor_type))):
109                raise TypeError('Only Tensor type support Complex')
110            type_infer_dict = {
111                (mstype.complex64, mstype.complex64): mstype.TensorType(mstype.complex64),
112                (mstype.complex64, mstype.float32): mstype.TensorType(mstype.complex64),
113                (mstype.float32, mstype.complex64): mstype.TensorType(mstype.complex64),
114                (mstype.complex128, mstype.complex128): mstype.TensorType(mstype.complex128),
115                (mstype.complex128, mstype.float64): mstype.TensorType(mstype.complex128),
116                (mstype.float64, mstype.complex128): mstype.TensorType(mstype.complex128),
117            }
118            if (x_dtype.element_type(), y_dtype.element_type()) not in type_infer_dict.keys():
119                raise TypeError('Complex math binary op expecting Tensor [Complex64, Complex64],'
120                                + '[Complex64, Float32], [Float32, Complex64], [Complex128, Complex128],'
121                                + '[Complex128, Float64], [Float64, Complex128],'
122                                + f'but got : [{format(x_dtype)},{format(y_dtype)}].')
123            return type_infer_dict.get((x_dtype.element_type(), y_dtype.element_type()))
124
125        validator.check_tensors_dtypes_same_and_valid(args_type, valid_dtype, prim_name)
126        return x_dtype
127
128    def infer_dtype(self, x_dtype, y_dtype):
129        return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type, self.name)
130
131    def _convert_back_shape(self, shape_value, cmp_shape):
132        if isinstance(cmp_shape, (Tensor, Tensor_)):
133            cmp_shape = cmp_shape.asnumpy()
134        if not isinstance(cmp_shape, tuple):
135            return shape_value
136        real_shape = [dim if cmp_dim > 0 else cmp_dim for dim, cmp_dim in zip(shape_value, cmp_shape)]
137        return tuple(real_shape)
138
139class SilentCheck(Primitive):
140    """
141    Implement SilentCheck on `pre_val`, `min_val`, `max_val`, `result` and
142    update them inplace with given parameters.
143
144    Args:
145        c_min_steps (int): an int determines...
146
147        c_thresh_l1 (float): a float determines...
148
149        c_coeff_l1 (float): a float determines...
150
151        c_thresh_l2 (float): a float determines...
152
153        c_coeff_l2 (float): a float determines...
154
155    Inputs:
156        - **val** (Tensor) - Tensor with dtype float32.
157        - **input_grad** (Parameter) - Tensor with dtype float32.
158        - **pre_val** (Parameter) - Input Parameter with dtype float32.
159        - **min_val** (Parameter) - Input Parameter with dtype float32.
160        - **max_val** (Parameter) - Input Parameter with dtype float32.
161        - **val_counter** (Parameter) - Input Parameter with dtype int32.
162
163    Outputs:
164        Tuple of 5 Tensors, the updated parameters.
165        - **input_grad** (Tensor) - Tensor with dtype float32.
166        - **pre_val** (Tensor) - Tensor with dtype float32.
167        - **min_val** (Tensor) - Tensor with dtype float32.
168        - **max_val** (Tensor) - Tensor with dtype float32.
169        - **result** (Tensor) - Tensor with dtype int32.
170
171    Raises:
172        TypeError: If `val` is not Tensor with dtype float32.
173        TypeError: If `result` is not Tensor with dtype int32.
174        TypeError: If `pre_val`, `min_val`, `max_val`, `input_grad` are not all Parameter type with dtype float32.
175        TypeError: If `c_thresh_l1` or `c_coeff_l1` is not a float number.
176        TypeError: If `c_min_steps` is not an int number.
177
178    Supported Platforms:
179        ``Ascend``
180
181    Examples:
182        >>> from mindspore.ops.operations.math_ops import SilentCheck
183        >>> silent_check = SilentCheck()
184        xxx
185    """
186
187    @prim_attr_register
188    def __init__(self, c_min_steps, c_thresh_l1, c_coeff_l1, c_thresh_l2, c_coeff_l2):
189        """Initialize SilentCheck."""
190        validator.check_value_type("c_min_steps", c_min_steps, [int], self.name)
191        validator.check_value_type("c_thresh_l1", c_thresh_l1, [float], self.name)
192        validator.check_value_type("c_coeff_l1", c_coeff_l1, [float], self.name)
193        validator.check_value_type("c_thresh_l2", c_thresh_l2, [float], self.name)
194        validator.check_value_type("c_coeff_l2", c_coeff_l2, [float], self.name)
195        self.add_prim_attr('side_effect_mem', True)
196
197
198class _BitwiseBinaryOp(_MathBinaryOp):
199    """
200    Define bitwise binary operators.
201    """
202
203    @prim_attr_register
204    def __init__(self):
205        """Initialize _BitwiseBinaryOp"""
206        self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['y'])
207
208    @staticmethod
209    def _check_bitwise_op_input_type(x1_type, x2_type, prim):
210        args = {'x1': x1_type, 'x2': x2_type}
211        valid_dtypes = mstype.int_type + mstype.uint_type
212        validator.check_tensors_dtypes_same_and_valid(args, valid_dtypes, prim)
213        return x1_type
214
215    def infer_dtype(self, x1_type, x2_type):
216        return _BitwiseBinaryOp._check_bitwise_op_input_type(x1_type, x2_type, self.name)
217
218
219class Ger(Primitive):
220    r"""
221    Ger product of `x1` and `x2`. Calculate the outer product of two arrays. If `x1` is a 1D Tensor of
222    shape :math:`(m,)` and `x2` is a 1D Tensor of shape :math:`(n,)`, then `output` must be a 2D Tensor of shape
223    :math:`(m, n)`.
224
225    Refer to :func:`mindspore.ops.ger` for more details.
226
227    Inputs:
228        - **x1** - (Tensor) - 1-D input Tensor.
229        - **x2** - (Tensor) - 1-D input Tensor, has the same dtype as `x1`.
230
231    Outputs:
232        Tensor, output matrix with the same dtype as inputs.With `x1` shape :math:`(m,)` and
233        `x2` shape of :math:`(n,)`,the `output` has shape :math:`(m, n)`.
234
235    Supported Platforms:
236        ``Ascend`` ``GPU`` ``CPU``
237
238    Examples:
239        >>> import mindspore
240        >>> from mindspore import Tensor, ops
241        >>> x1 = Tensor([1., 2., 3., 4.], mindspore.float32)
242        >>> x2 = Tensor([1., 2., 3.], mindspore.float32)
243        >>> ger = ops.Ger()
244        >>> output = ger(x1, x2)
245        >>> print(output)
246        [[ 1.  2.  3.]
247         [ 2.  4.  6.]
248         [ 3.  6.  9.]
249         [ 4.  8. 12.]]
250    """
251
252    @prim_attr_register
253    def __init__(self):
254        """Initialize Ger"""
255        self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['y'])
256
257
258class AddV2(Primitive):
259    r"""
260    Adds two input tensors element-wise.
261
262    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
263    The inputs must be two tensors or one tensor and one scalar.
264    When the inputs are two tensors, and the shapes of them can be broadcast.
265    When the inputs are one tensor and one scalar, the scalar could only be a constant.
266    CPU/Ascend does not support broadcast for now.
267
268    .. math::
269
270        out_{i} = x_{i} + y_{i}
271
272    Inputs:
273        - **x** (Union[Tensor]) - The first input is a tensor whose data type is one of
274          uint8, int8, int16, int32, int64, float16, float32, float64,
275          complex64, complex128 currently or scalar.
276        - **y** (Union[Tensor]) - The second input is a tensor whose data type is one of
277          uint8, int8, int16, int32, int64, float16, float32, float64,
278          complex64, complex128 currently or scalar.
279
280    Outputs:
281        Tensor, the shape is the same as the input tensor,
282        and the data type is the one with higher precision or higher digits among the two inputs.
283
284    Raises:
285        TypeError: If neither `x` nor `y` is a Tensor.
286        TypeError: If dtype of `x` or `y` is not in [float16, float32, float64,
287        uint8, int8, int16, int32, int64, complex64, complex128].
288        ValueError: If the shape of 'x' and 'y' is not the same for CPU and Ascend.
289
290
291    Supported Platforms:
292        ``Ascend`` ``GPU`` ``CPU``
293
294    Examples:
295        >>> from mindspore.ops.operations.math_ops import AddV2
296        >>> addv2 = AddV2()
297        >>> x = Tensor(np.array([1, 2, 3]).astype(np.int32))
298        >>> y = Tensor(np.array([4, 5, 6]).astype(np.int32))
299        >>> output = addv2(x, y)
300        >>> print(output)
301        [5 7 9]
302    """
303    __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
304
305    @prim_attr_register
306    def __init__(self):
307        """Initialize AddV2"""
308        self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
309
310
311class AssignSub(Primitive):
312    """
313    Updates a `Parameter` by subtracting a value from it.
314
315    Refer to :func:`mindspore.ops.assign_sub` for more details.
316
317    Inputs:
318        - **variable** (Parameter) - The `Parameter`.
319          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank be should be less than 8.
320        - **value** (Union[numbers.Number, Tensor]) - The value to be subtracted from the `variable`.
321          It must have the same shape as `variable` if it is a Tensor.
322
323    Outputs:
324        Tensor, has the same data type and shape as original `variable`.
325
326    Supported Platforms:
327        ``Ascend`` ``GPU`` ``CPU``
328
329    Examples:
330        >>> import mindspore
331        >>> import numpy as np
332        >>> from mindspore import Tensor, ops, nn
333        >>> from mindspore.common.initializer import initializer
334        >>> class Net(nn.Cell):
335        ...     def __init__(self):
336        ...         super(Net, self).__init__()
337        ...         self.AssignSub = ops.AssignSub()
338        ...         self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int32), name="global_step")
339        ...
340        ...     def construct(self, x):
341        ...         self.AssignSub(self.variable, x)
342        ...         return self.variable
343        ...
344        >>> net = Net()
345        >>> value = Tensor(np.ones([1]).astype(np.int32)*100)
346        >>> output = net(value)
347        >>> print(net.variable.asnumpy())
348        [-99]
349    """
350
351    __mindspore_signature__ = (
352        sig.make_sig('val', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
353        sig.make_sig('value', dtype=sig.sig_dtype.T)
354    )
355
356    @prim_attr_register
357    def __init__(self):
358        """Initialize AssignSub"""
359        self.init_prim_io_names(inputs=['val', 'value'], outputs=['val'])
360        self.add_prim_attr('side_effect_mem', True)
361
362
363class _Reduce(PrimitiveWithCheck):
364    """
365    Definition of base class of reduction class operators.
366
367    Args:
368         keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
369                           If ``False`` , don't keep these dimensions. Default: ``False`` .
370    """
371
372    __mindspore_signature__ = (
373        sig.make_sig('input_x'),
374        sig.make_sig('axis', default=())
375    )
376
377    @prim_attr_register
378    def __init__(self, keep_dims=False):
379        """Initialize Reduce"""
380        validator.check_value_type('keep_dims', keep_dims, [bool], self.name)
381        self.init_prim_io_names(inputs=['input_x', 'axis'], outputs=['y'])
382
383    def __call__(self, x, axis=()):
384        args = [x, axis]
385        output = _run_op(self, self.name, args)
386        return output
387
388    def infer_value(self, input_x, axis):
389        """ return reduce op value"""
390        value = None
391        if input_x is not None and axis is not None:
392            prim_map = {
393                'ReduceMax': np.max,
394                'ReduceMin': np.min,
395                'ReduceProd': np.prod,
396                'ReduceMean': np.mean,
397                'ReduceAll': np.all,
398                'ReduceAny': np.any,
399            }
400            np_reduce_func = prim_map.get(self.name, None)
401
402            if np_reduce_func is not None:
403                value = input_x.asnumpy()
404                if isinstance(axis, int):
405                    pass
406                elif axis:
407                    axis = tuple(set(axis))
408                else:
409                    axis = tuple(range(len(value.shape)))
410                value = np_reduce_func(value, axis, keepdims=self.keep_dims)
411                value = np.array(value)
412                value = Tensor(value)
413        return value
414
415
416class EuclideanNorm(Primitive):
417    """
418    Calculates the Euclidean norm(aka L2 norm) of a Tensor along the specified axes.
419    The specified `axes` are removed by default.
420
421    Args:
422        keep_dims (bool, optional): whether to retain the reduced dimensions. If ``True`` , retains them with length 1.
423            If ``False`` , these dimensions are removed. Default: ``False`` .
424
425    Inputs:
426        - **x** (Tensor) - The input Tensor to reduce.
427        - **axes** (Tensor) - The axes to perform reduction on. Must be one of the following types: int32, int64.
428          It must be in range :math:`[-rank(x), rank(x))`.
429
430    Outputs:
431        Tensor, has the same type as the 'x'.
432
433    Raises:
434        TypeError: If `keep_dims` is not a bool.
435        TypeError: If `x` is not a Tensor.
436        ValueError: If `axes` is out of range.
437
438    Supported Platforms:
439        ``GPU``
440
441    Examples:
442        >>> x = Tensor(np.array([[3, 5], [4, 12]])).astype(np.int32)
443        >>> axes = Tensor([0])
444        >>> op = ops.EuclideanNorm(keep_dims=True)
445        >>> output = op(x, axes)
446        >>> print(output)
447        [[5 13]]
448    """
449
450    @prim_attr_register
451    def __init__(self, keep_dims=False):
452        """Initialize"""
453        self.init_prim_io_names(inputs=['x', 'axes'], outputs=['y'])
454        validator.check_value_type("keep_dims", keep_dims, [bool], self.name)
455
456
457class CumulativeLogsumexp(Primitive):
458    """
459    Compute the cumulative log-sum-exp of the input tensor `x` along `axis` . For example, with all parameters at
460    default values, if the input `x` is a tensor [a, b, c], the output will be [a, log(exp(a) + exp(b)),
461    log(exp(a) + exp(b) + exp(c))].
462
463    Args:
464        exclusive (bool, optional): If ``True`` , the last element will be skipped during the calculation and thus an
465                                    exclusive cumulative log-sum-exp will be performed. For example, this operation
466                                    will output [-inf, a, log(exp(a) * exp(b))] with tensor [a, b, c] as the input.
467                                    Note that the minimal value -inf, for performance reasons, is representable by the
468                                    floating point type. Default: ``False`` .
469        reverse (bool, optional): If ``True`` , the function accumulation values will be calculated after the elements
470                                  of `x` on `axis` are flipped, and the calculation result will be flipped afterwards.
471                                  For example, this operation will output [log(exp(c) + exp(b) + exp(a)), log(exp(c) +
472                                  exp(b)), c] with tensor [a, b, c] as the input. Default: ``False`` .
473
474    Inputs:
475        - **x** (Tensor) - The input tensor. Must be one of the following types: float16, float32, float64. The
476          dimension of `x` must greater than 0.
477        - **axis** (Tensor) - A 0-D tensor describing the dimension to compute the cumulative product. Must be one of
478          the following types: int64, int32, int16. Must be in the range [-rank(x), rank(x)). Default: ``0`` .
479
480    Outputs:
481        Tensor, has the same dtype and shape as the `x`.
482
483    Raises:
484        TypeError: If `x` or `axis` not a Tensor.
485        TypeError: If dtype of `x` is not in [float16, float32, float64].
486        TypeError: If dtype of `axis` is not in [int16, int32, int64].
487        TypeError: If `exclusive` or `reverse` is not a bool.
488        ValueError: If the dimension of `x` is not greater than 0.
489        RuntimeError: If `axis` is out of range [-rank(x), rank(x)).
490
491    Supported Platforms:
492        ``Ascend`` ``CPU`` ``GPU``
493
494    Examples:
495        >>> x = Tensor(np.array([1.0, 2.0, 3.0]).astype(np.float32))
496        >>> op = ops.CumulativeLogsumexp(exclusive=False, reverse=False)
497        >>> output = op(x, Tensor(0))
498        >>> print(output)
499        [1.        2.3132617 3.407606 ]
500        >>> x = Tensor(np.array([1.0, 2.0, 3.0]).astype(np.float32))
501        >>> op = ops.CumulativeLogsumexp(exclusive=True, reverse=False)
502        >>> output = op(x, Tensor(0))
503        >>> print(output)
504        [-3.4028235e+38  1.0000000e+00  2.3132617e+00]
505        >>> x = Tensor(np.array([1.0, 2.0, 3.0]).astype(np.float32))
506        >>> op = ops.CumulativeLogsumexp(exclusive=False, reverse=True)
507        >>> output = op(x, Tensor(0))
508        >>> print(output)
509        [3.407606  3.3132617 3.       ]
510        >>> x = Tensor(np.array([1.0, 2.0, 3.0]).astype(np.float32))
511        >>> op = ops.CumulativeLogsumexp(exclusive=True, reverse=True)
512        >>> output = op(x, Tensor(0))
513        >>> print(output)
514        [ 3.3132617e+00  3.0000000e+00 -3.4028235e+38]
515    """
516
517    @prim_attr_register
518    def __init__(self, exclusive=False, reverse=False):
519        """Initialize  CumulativeLogsumexp"""
520        self.init_prim_io_names(inputs=['x', 'axis'], outputs=['y'])
521        validator.check_bool(exclusive, "exclusive", self.name)
522        validator.check_bool(reverse, "reverse", self.name)
523
524
525class Bucketize(Primitive):
526    """
527    Bucketizes `input` based on `boundaries`.
528
529    Args:
530        boundaries (list[float]): A sorted list of floats gives the boundary of the buckets, and no default value.
531
532    Inputs:
533        - **input** (Tensor) - A tensor containing the search value(s).
534
535    Outputs:
536        Tensor, with the same shape as the input, and data type is int32.
537
538    Raises:
539        TypeError: If `boundaries` is not a listFloat.
540        TypeError: If `input` is not a Tensor.
541
542    Supported Platforms:
543        ``Ascend`` ``GPU`` ``CPU``
544
545    Examples:
546        >>> class Bucketize(nn.Cell):
547        ...     def __init__(self, boundaries):
548        ...         super().__init__()
549        ...         self.bucketize = ops.Bucketize(boundaries=boundaries)
550        ...     def construct(self, input):
551        ...         return self.bucketize(input)
552        >>> input = Tensor(np.array([[3, 6, 9], [3, 6, 9]]).astype(np.int32))
553        >>> boundaries = list(np.array([1., 3., 5., 7., 9.]))
554        >>> net = Bucketize(boundaries)
555        >>> output = net(input)
556        >>> print(output)
557        [[2 3 5]
558         [2 3 5]]
559    """
560
561    @prim_attr_register
562    def __init__(self, boundaries):
563        """Initialize Bucketize"""
564        validator.check_value_type("boundaries", boundaries, [list], self.name)
565        for index, one_boundaries in enumerate(boundaries):
566            validator.check_value_type('boundaries[%d]' % index, one_boundaries, [float], self.name)
567        self.init_prim_io_names(inputs=['input'], outputs=['output'])
568
569
570class Lcm(Primitive):
571    """
572    Computes least common multiplier of input tensors element-wise.
573    The shape of two inputs should be broadcastable, and data type of them should be
574    one of: int32, int64.
575
576    .. warning::
577        This is an experimental API that is subject to change or deletion.
578
579    Inputs:
580        - **x1** (Tensor) - The first input tensor.
581        - **x2** (Tensor) - The second input tensor.
582
583    Outputs:
584        Tensor, the shape is the same as the one after broadcasting, and the data type is one
585        with higher digits in the two inputs.
586
587    Raises:
588        TypeError: If data type `x1` or `x2` is not int32 or int64.
589        ValueError: If shape of two inputs are not broadcastable.
590
591    Supported Platforms:
592        ``Ascend`` ``GPU`` ``CPU``
593
594    Examples:
595        >>> import numpy as np
596        >>> from mindspore import Tensor, ops
597        >>> x1 = Tensor(np.array([7, 8, 9]))
598        >>> x2 = Tensor(np.array([14, 6, 12]))
599        >>> lcm_ = ops.Lcm()
600        >>> y = lcm_(x1, x2)
601        >>> print(y)
602        [14 24 36]
603    """
604
605    __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
606
607    @prim_attr_register
608    def __init__(self):
609        self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['y'])
610
611
612class Cdist(Primitive):
613    """
614    Computes batched the p-norm distance between each pair of the two collections of row vectors.
615
616    Refer to :func:`mindspore.ops.cdist` for more details.
617
618    Args:
619        p (float, optional): P value for the p-norm distance to calculate between each vector pair, P ∈ [0,∞].
620            Default: ``2.0`` .
621
622    Inputs:
623        - **input_x** (Tensor) - Input tensor of shape :math:`(B, P, M)`.
624          When :math:`B` is equal to 0, it means this dimension can be ignored,
625          i.e. shape of the tensor is :math:`(P, M)`.
626        - **input_y** (Tensor) - Input tensor of shape :math:`(B, R, M)` with the same dtype as `input_x`.
627
628    Outputs:
629        Tensor, has the same dtype as `input_x`, which shape is :math:`(B, P, R)`.
630
631    Supported Platforms:
632        ``Ascend`` ``GPU`` ``CPU``
633
634    Examples:
635        >>> import numpy as np
636        >>> import mindspore
637        >>> from mindspore import Tensor, ops
638        >>> input_x = Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))
639        >>> input_y = Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))
640        >>> op = ops.Cdist(p=2.0)
641        >>> output = op(input_x, input_y)
642        >>> print(output)
643        [[[2.8284273 2.8284273]
644          [1.4142137 1.4142137]]]
645    """
646
647    @prim_attr_register
648    def __init__(self, p=2.0):
649        """Initialize Cdist"""
650        validator.check_value_type("p", p, [float], self.name)
651        if (p < 0 or np.isnan(p)):
652            raise ValueError('Cdist p must be a non-negative value, but got `{p}`.')
653        self.init_prim_io_names(inputs=['input_x', 'input_y'], outputs=['output'])
654
655
656class LpNorm(Primitive):
657    r"""
658    Return the p-norm of a matrix or vector.
659
660    .. math::
661        output = \|input\|_{p}=\left(\sum_{i=1}^{n}\left|input\right|^{p}\right)^{1 / p}
662
663    Args:
664        axis(int,list,tuple): Specifies which dimension or dimensions of input to calculate the norm across.
665        p(int, optional): The order of norm. Default: ``2`` .
666        keep_dims(bool, optional): Whether the output tensors have dim retained or not. Default: ``False`` .
667        epsilon(float, optional): The lower bound value, when the calculated norm is less than this value,
668            replace this result with `epsilon`. Default: ``1e-12`` .
669
670    Inputs:
671        - **input** (Tensor) - Input tensor of type float16, float32.
672
673    Outputs:
674        Tensor, has the same dtype as `input`, its shape depends on `axis`. For example, if the shape of input
675        is :math:`(2, 3, 4)`, `axis` is :math:`[0, 1]`, output shape will be :math:`(4,)`.
676
677    Raises:
678        TypeError: If `input` is not a Tensor.
679        TypeError: If dtype of `input` is not one of: float16, float32.
680        TypeError: If `p` is not an int.
681        TypeError: If `axis` is not an int, a tuple or a list.
682        TypeError: If `axis` is a tuple or a list, but the element of `axis` is not an int.
683        TypeError: If `keep_dims` is not a bool.
684        ValueError: If the element of `axis` is out of the range :math:`[-r, r)`,
685            where :math:`r` is the rank of `input`.
686        ValueError: If the length of shape of `axis` is bigger than the length of shape of `input`.
687
688    Supported Platforms:
689        ``Ascend`` ``GPU`` ``CPU``
690
691    Examples:
692        >>> import numpy as np
693        >>> from mindspore import Tensor, ops
694        >>> input_x = Tensor(np.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]]).astype(np.float32))
695        >>> op = ops.LpNorm(axis=[0, 1], p=2, keep_dims=False)
696        >>> output = op(input_x)
697        >>> print(output)
698        [ 9.165152 10.954452]
699    """
700
701    @prim_attr_register
702    def __init__(self, axis, p=2, keep_dims=False, epsilon=1e-12):
703        """Initialize LpNorm"""
704        super().__init__("LpNorm")
705        validator.check_value_type("p", p, [int], self.name)
706        validator.check_value_type("axis", axis, [int, tuple, list], self.name)
707        validator.check_value_type("keep_dims", keep_dims, [bool], self.name)
708        validator.check_value_type("epsilon", epsilon, [float], self.name)
709        validator.check_non_negative_int(p, "p", self.name)
710        validator.check_non_negative_float(epsilon, "epsilon", self.name)
711        if isinstance(axis, int):
712            self.add_prim_attr('axis', [self.axis])
713        else:
714            for element_of_axis in axis:
715                validator.check_value_type("element_of_axis", element_of_axis, [int], self.name)
716        self.init_prim_io_names(inputs=['input'], outputs=['output'])
717
718
719class AddN(Primitive):
720    """
721    Computes addition of all input tensors element-wise.
722
723    Refer to :func:`mindspore.ops.addn` for more details.
724
725    Inputs:
726        - **x** (Union(tuple[Tensor], list[Tensor])) - A tuple or list composed of Tensor, the data type is
727          boolean or numeric.
728
729    Outputs:
730        Tensor, has the same shape and dtype as each Tensor of `x`.
731
732    Supported Platforms:
733        ``Ascend`` ``GPU`` ``CPU``
734
735    Examples:
736        >>> import mindspore
737        >>> import numpy as np
738        >>> from mindspore import Tensor, nn, ops
739        >>> class NetAddN(nn.Cell):
740        ...     def __init__(self):
741        ...         super(NetAddN, self).__init__()
742        ...         self.addN = ops.AddN()
743        ...
744        ...     def construct(self, *z):
745        ...         return self.addN(z)
746        ...
747        >>> net = NetAddN()
748        >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
749        >>> y = Tensor(np.array([4, 5, 6]), mindspore.float32)
750        >>> output = net(x, y, x, y)
751        >>> print(output)
752        [10. 14. 18.]
753    """
754
755    @prim_attr_register
756    def __init__(self):
757        """Initialize AddN."""
758        self.init_prim_io_names(inputs=["inputs"], outputs=["sum"])
759
760    def check_elim(self, inputs):
761        if len(inputs) != 1:
762            return False, None
763        if isinstance(inputs[0], Tensor):
764            return True, inputs[0]
765        raise TypeError(f"For '{self.name}', the type of 'inputs[0]' must be a tensor, but "
766                        f"got {type(inputs[0]).__name__}, "
767                        f"or the length of 'inputs' should not be equal to 1, but got ({len(inputs)}).")
768
769
770class AccumulateNV2(Primitive):
771    """
772    Computes accumulation of all input tensors element-wise.
773
774    Refer to :func:`mindspore.ops.accumulate_n` for more details.
775
776    Inputs:
777        - **x** (Union(tuple[Tensor], list[Tensor])) - The input tuple or list
778          is made up of multiple tensors whose dtype is number to be added together.
779          Each element of tuple or list should have the same shape.
780
781    Outputs:
782        Tensor, has the same shape and dtype as each entry of the `x`.
783
784    Supported Platforms:
785        ``Ascend`` ``GPU``
786
787    Examples:
788        >>> import mindspore
789        >>> import numpy as np
790        >>> from mindspore import Tensor, ops, nn
791        >>> class NetAccumulateNV2(nn.Cell):
792        ...     def __init__(self):
793        ...         super(NetAccumulateNV2, self).__init__()
794        ...         self.accumulateNV2 = ops.AccumulateNV2()
795        ...
796        ...     def construct(self, *z):
797        ...         return self.accumulateNV2(z)
798        ...
799        >>> net = NetAccumulateNV2()
800        >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
801        >>> y = Tensor(np.array([4, 5, 6]), mindspore.float32)
802        >>> output = net(x, y, x, y)
803        >>> print(output)
804        [10. 14. 18.]
805    """
806
807    @prim_attr_register
808    def __init__(self):
809        """Initialize AccumulateNV2."""
810        self.__setattr_flag__ = True
811        self.init_prim_io_names(inputs=["inputs"], outputs=["sum"])
812
813    def check_elim(self, inputs):
814        if len(inputs) != 1:
815            return False, None
816        if isinstance(inputs[0], Tensor):
817            return True, inputs[0]
818        raise TypeError(f"For '{self.name}', the type of 'inputs[0]' must be a tensor, "
819                        f"but got {type(inputs[0]).__name__}, "
820                        f"or the length of 'inputs' should not be equal to 1, but got ({len(inputs)}).")
821
822
823class InplaceUpdateV2(Primitive):
824    r"""
825    Updates specified values in `x` to `v` according to `indices`.
826
827    .. warning::
828        This is an experimental API that is subject to change or deletion.
829
830    Refer to :func:`mindspore.ops.inplace_update` for more details.
831
832    Inputs:
833        - **x** (Tensor) - A tensor which to be inplace updated. It can be one of the following data types:
834          float32, float16 and int32.
835        - **indices** (Union[int, tuple]): Indices into the left-most dimension of `x`, and determines which rows of x
836          to update with v. It is an int or tuple, whose value is in [0, the first dimension size of x).
837        - **v** (Tensor) - A tensor with the same type as `x` and the same dimension size as `x` except
838          the first dimension, which must be the same as the size of `indices`.
839
840    Outputs:
841        Tensor, with the same type and shape as the input `x`.
842
843    Supported Platforms:
844        ``GPU`` ``CPU``
845
846    Examples:
847        >>> import numpy as np
848        >>> import mindspore
849        >>> from mindspore import Tensor, ops
850        >>> indices = (0, 1)
851        >>> x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
852        >>> v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
853        >>> inplace_update_v2 = ops.InplaceUpdateV2()
854        >>> output = inplace_update_v2(x, indices, v)
855        >>> print(output)
856        [[0.5 1. ]
857         [1.  1.5]
858         [5.  6. ]]
859    """
860
861    @prim_attr_register
862    def __init__(self):
863        """Initialize InplaceUpdateV2"""
864        self.init_prim_io_names(inputs=['x', 'indices', 'v'], outputs=['y'])
865
866    def __call__(self, x, indices, v):
867        args = [x, indices, v]
868        output = _run_op(self, self.name, args)
869        return output
870
871
872class InplaceAdd(Primitive):
873    """
874    Adds `v` into specified rows of `x`. Computes `y` = `x`; y[i,] += `v`.
875
876    Refer to :func:`mindspore.ops.inplace_add` for more details.
877
878    Args:
879        indices (Union[int, tuple]): Indices into the left-most dimension of `x`, and determines which rows of `x`
880            to add with `v`. It is an integer or a tuple, whose value is in [0, the first dimension size of `x`).
881
882    Inputs:
883        - **x** (Tensor) - The tensor to be added. It has shape :math:`(N,*)` where :math:`*` means
884          any number of additional dimensions.
885        - **input_v** (Tensor) - The value tensor add to `x`. It has the same dimension sizes as `x` except
886          the first dimension, whose size must be the same as `indices`. It has the same data type with `x`.
887
888    Outputs:
889        Tensor, has the same shape and dtype as `x`.
890
891    Supported Platforms:
892        ``Ascend`` ``GPU`` ``CPU``
893
894    Examples:
895        >>> import numpy as np
896        >>> import mindspore
897        >>> from mindspore import Tensor, ops
898        >>> indices = (0, 1)
899        >>> x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
900        >>> input_v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
901        >>> inplaceAdd = ops.InplaceAdd(indices)
902        >>> output = inplaceAdd(x, input_v)
903        >>> print(output)
904        [[1.5 3. ]
905         [4.  5.5]
906         [5.  6. ]]
907    """
908
909    @prim_attr_register
910    def __init__(self, indices):
911        """Initialize InplaceAdd"""
912        self.init_prim_io_names(inputs=['x', 'v'], outputs=['y'])
913        self.indices = indices
914        validator.check_value_type('indices', indices, [tuple, int], self.name)
915        if isinstance(indices, int):
916            self.indices = (indices,)
917        for item in self.indices:
918            validator.check_value_type("item of indices", item, [int], self.name)
919        self.add_prim_attr("indices", self.indices)
920
921
922class InplaceIndexAdd(Primitive):
923    """
924    Adds Tensor `updates` to specified axis and indices of Tensor `var` element-wise.
925
926    .. warning::
927        This is an experimental API that is subject to change or deletion.
928
929    Refer to :func:`mindspore.ops.inplace_index_add` for more details.
930
931    Args:
932        axis (int): The dimension along which to index. It should be in range :math:`[0, len(var.dim))`.
933
934    Inputs:
935        - **var** (Parameter) - The input Parameter to add to, with data type uint8, int8, int16, int32,
936          float16, float32, float64.
937        - **indices** (Tensor) - The indies along `axis` to perform the addition. A 1D Tensor
938          of shape :math:`(updates.shape[axis],)`, every value of it
939          should be in range :math:`[0, var.shape[axis])` with data type int32.
940        - **updates** (Tensor) - The input Tensor with the value to add. Must have same data type as `var`.
941          The shape must be the same as `var` except the `axis` th dimension.
942
943    Outputs:
944        Tensor, updated result, has the same shape and dtype as `var`.
945
946    Supported Platforms:
947        ``Ascend`` ``CPU``
948
949    Examples:
950        >>> import mindspore
951        >>> import numpy as np
952        >>> from mindspore import Tensor, ops, Parameter
953        >>> var = Parameter(Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32))
954        >>> indices = Tensor(np.array([0, 1]), mindspore.int32)
955        >>> updates = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
956        >>> inplaceIndexAdd = ops.InplaceIndexAdd(axis=0)
957        >>> var = inplaceIndexAdd(var, indices, updates)
958        >>> print(var)
959        [[1.5 3. ]
960         [4.  5.5]
961         [5.  6. ]]
962    """
963
964    __mindspore_signature__ = (
965        sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
966        sig.make_sig('indices', dtype=sig.sig_dtype.T1),
967        sig.make_sig('updates', dtype=sig.sig_dtype.T)
968    )
969
970    @prim_attr_register
971    def __init__(self, axis):
972        """Initialize InplaceIndexAdd"""
973        self.init_prim_io_names(inputs=['var', 'indices', 'updates'], outputs=['var'])
974        self.axis = axis
975        validator.check_value_type('axis', axis, [int], self.name)
976
977
978class InplaceSub(Primitive):
979    r"""
980    Subtracts `v` into specified rows of `x`. Computes :math:`y = x`; :math:`y[i,] -= input\_v`.
981
982    Refer to :func:`mindspore.ops.inplace_sub` for more details.
983
984    Args:
985        indices (Union[int, tuple]): Indices into the left-most dimension of `x`, and determines which rows of `x`
986            to subtract by `v`. It is an integer or a tuple, whose value is in [0, the first dimension size of `x`).
987
988    Inputs:
989        - **x** (Tensor) - The tensor to be subtracted. It has shape :math:`(N,*)` where :math:`*` means
990          any number of additional dimensions.
991        - **input_v** (Tensor) - The value tensor subtract from `x`. It has the same dimension sizes as `x` except
992          the first dimension, whose size must be the same as `indices`. It has the same data type with `x`.
993
994    Outputs:
995        Tensor, has the same shape and dtype as `x`.
996
997    Supported Platforms:
998        ``Ascend`` ``GPU`` ``CPU``
999
1000    Examples:
1001        >>> import numpy as np
1002        >>> import mindspore
1003        >>> from mindspore import Tensor, ops
1004        >>> indices = (0, 1)
1005        >>> x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
1006        >>> input_v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
1007        >>> inplaceSub = ops.InplaceSub(indices)
1008        >>> output = inplaceSub(x, input_v)
1009        >>> print(output)
1010        [[0.5 1. ]
1011         [2.  2.5]
1012         [5.  6. ]]
1013    """
1014
1015    @prim_attr_register
1016    def __init__(self, indices):
1017        """Initialize InplaceSub"""
1018        self.init_prim_io_names(inputs=['x', 'v'], outputs=['y'])
1019        self.indices = indices
1020        validator.check_value_type('indices', indices, [tuple, int], self.name)
1021        if isinstance(indices, int):
1022            self.indices = (indices,)
1023        for item in self.indices:
1024            validator.check_value_type("item of indices", item, [int], self.name)
1025        self.add_prim_attr("indices", self.indices)
1026
1027
1028class Sub(_MathBinaryOp):
1029    r"""
1030    Subtracts the second input tensor from the first input tensor element-wise.
1031
1032    Refer to :func:`mindspore.ops.sub` for more details.
1033
1034    Note:
1035        - When the two inputs have different shapes, they must be able to broadcast to a common shape.
1036        - The two inputs can not be bool type at the same time,
1037          [True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
1038        - The two inputs comply with the implicit type conversion rules to make the data types
1039          consistent.
1040
1041    Inputs:
1042        - **x** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
1043          a bool or a tensor whose data type is
1044          `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
1045          `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
1046        - **y** (Union[Tensor, number.Number, bool]) - The second input, when the first input is a Tensor,
1047          the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
1048
1049    Outputs:
1050        Tensor, the shape is the same as the two inputs after broadcasting,
1051        and the data type is the one with higher precision or higher digits among the two inputs.
1052
1053    Supported Platforms:
1054        ``Ascend`` ``GPU`` ``CPU``
1055
1056    Examples:
1057        >>> import mindspore
1058        >>> import numpy as np
1059        >>> from mindspore import Tensor, ops
1060        >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
1061        >>> y = Tensor(np.array([4, 5, 6]), mindspore.int32)
1062        >>> sub = ops.Sub()
1063        >>> output = sub(x, y)
1064        >>> print(output)
1065        [-3 -3 -3]
1066    """
1067
1068    def infer_value(self, x, y):
1069        if x is not None and y is not None:
1070            x = x.asnumpy()
1071            y = y.asnumpy()
1072            out = x - y
1073            out = np.array(out, x.dtype)
1074            return Tensor(out)
1075        return None
1076
1077
1078class SquaredDifference(Primitive):
1079    """
1080    Subtracts the second input tensor from the first input tensor element-wise and returns square of it.
1081
1082    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
1083    The inputs must be two tensors or one tensor and one scalar.
1084    When the inputs are two tensors,
1085    dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
1086    When the inputs are one tensor and one scalar,
1087    the scalar could only be a constant.
1088
1089    .. math::
1090
1091        out_{i} = (x_{i} - y_{i}) * (x_{i} - y_{i}) = (x_{i} - y_{i})^2
1092
1093    Inputs:
1094        - **x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool, or a tensor.
1095        - **y** (Union[Tensor, Number, bool]) - The second input is a number, or a bool when the first input
1096          is a tensor, or a tensor.
1097
1098    Outputs:
1099        Tensor, the shape is the same as the one after broadcasting,
1100        and the data type is the one with higher precision or higher digits among the two inputs.
1101
1102    Raises:
1103        TypeError: if `x` and `y` is not a Number or a bool or a Tensor.
1104
1105    Supported Platforms:
1106        ``Ascend`` ``GPU`` ``CPU``
1107
1108    Examples:
1109        >>> import mindspore
1110        >>> import numpy as np
1111        >>> from mindspore import Tensor, ops
1112        >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
1113        >>> y = Tensor(np.array([2.0, 4.0, 6.0]), mindspore.float32)
1114        >>> squared_difference = ops.SquaredDifference()
1115        >>> output = squared_difference(x, y)
1116        >>> print(output)
1117        [1. 4. 9.]
1118    """
1119    __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
1120
1121    @prim_attr_register
1122    def __init__(self):
1123        """Initialize _BinaryOp"""
1124        self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
1125
1126
1127class Einsum(Primitive):
1128    """
1129    Sums the product of the elements of the input Tensor along
1130    dimensions specified notation based on the Einstein summation convention(Einsum).
1131    You can use this operator to perform diagonal/reducesum/transpose/matmul/mul/inner product operations, etc.
1132
1133    Args:
1134        equation (str): An attribute, represent the operation you want to do.
1135            the value can contain only letters([a-z][A-Z]), commas(,), ellipsis(...),
1136            and arrow(->). the letters represent inputs's tensor dimension,
1137            commas(,)represent separate tensors, ellipsis(...) indicates
1138            the tensor dimension that you do not care about, the left of the
1139            arrow(->) indicates the input tensors,
1140            and the right of it indicates the desired output dimension.
1141
1142    Inputs:
1143        - **x** () - Input tensor used for calculation.
1144          The inputs must be a tuple/list of Tensors.
1145          When the inputs are only one tensor, you can input (tensor, ).
1146          Dtypes of them should be float16/float32/float64 and dtype of the tensor(s) must be the same.
1147
1148    Outputs:
1149        Tensor, the shape of it can be obtained from the equation,
1150        and the data type is the same as input tensors.
1151
1152    Raises:
1153        TypeError: If equation itself is invalid, or the equation does not match the input tensor.
1154        TypeError: If dtype of the input Tensors are not the same or dtype is not float16, float32 or float64.
1155
1156    Supported Platforms:
1157        ``GPU``
1158
1159    Examples:
1160        >>> import mindspore
1161        >>> import numpy as np
1162        >>> from mindspore import Tensor, ops
1163        >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
1164        >>> equation = "i->"
1165        >>> einsum = ops.Einsum(equation)
1166        >>> output = einsum([x])
1167        >>> print(output)
1168        [7.]
1169        >>>
1170        >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
1171        >>> y = Tensor(np.array([2.0, 4.0, 3.0]), mindspore.float32)
1172        >>> equation = "i,i->i"
1173        >>> einsum = ops.Einsum(equation)
1174        >>> output = einsum((x, y))
1175        >>> print(output)
1176        [ 2. 8. 12.]
1177        >>>
1178        >>> x = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32)
1179        >>> y = Tensor(np.array([[2.0, 3.0], [1.0, 2.0], [4.0, 5.0]]), mindspore.float32)
1180        >>> equation = "ij,jk->ik"
1181        >>> einsum = ops.Einsum(equation)
1182        >>> output = einsum((x, y))
1183        >>> print(output)
1184        [[16. 22.]
1185        [37. 52.]]
1186        >>>
1187        >>> x = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32)
1188        >>> equation = "ij->ji"
1189        >>> einsum = ops.Einsum(equation)
1190        >>> output = einsum((x,))
1191        >>> print(output)
1192        [[1. 4.]
1193        [2. 5.]
1194        [3. 6.]]
1195        >>>
1196        >>> x = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32)
1197        >>> equation = "ij->j"
1198        >>> einsum = ops.Einsum(equation)
1199        >>> output = einsum((x,))
1200        >>> print(output)
1201        [5. 7. 9.]
1202        >>>
1203        >>> x = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32)
1204        >>> equation = "...->"
1205        >>> einsum = ops.Einsum(equation)
1206        >>> output = einsum((x,))
1207        >>> print(output)
1208        [21.]
1209        >>>
1210        >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
1211        >>> y = Tensor(np.array([2.0, 4.0, 1.0]), mindspore.float32)
1212        >>> equation = "j,i->ji"
1213        >>> einsum = ops.Einsum(equation)
1214        >>> output = einsum((x, y))
1215        >>> print(output)
1216        [[ 2. 4. 1.]
1217        [ 4. 8. 2.]
1218        [ 6. 12. 3.]]
1219    """
1220
1221    @prim_attr_register
1222    def __init__(self, equation):
1223        if not isinstance(equation, str):
1224            raise TypeError("the equation must be str!")
1225        seg_equation = equation.split("->")
1226        if len(seg_equation) > 2:
1227            raise TypeError("the equation can contain only one arrow !")
1228        self.init_prim_io_names(inputs=['inputs'], outputs=['output'])
1229
1230
1231class Histogram(Primitive):
1232    """
1233    Computes the histogram of Tensor element distribution.
1234
1235    The elements are sorted into equal width bins between `min` and `max`.
1236    If `min` and `max` are both zero, the minimum and maximum values of the data are used.
1237
1238    Elements lower than min and higher than max are ignored.
1239
1240    Args:
1241        bins (int, optional): Number of histogram bins, optional. Default: ``100`` . If specified, must be positive.
1242        min (float, optional): An optional float of the lower end of the range (inclusive). Default value is ``0.0`` .
1243        max (float, optional): An optional float of the upper end of the range (inclusive). Default value is ``0.0`` .
1244
1245    Inputs:
1246        - **x** (Tensor) - the input tensor, type support list: [float16, float32, int32].
1247
1248    Outputs:
1249        Tensor, 1-D Tensor with type int32.
1250
1251    Raises:
1252        TypeError: If `x` is not a Tensor.
1253        TypeError: If `x` datetype not in support list.
1254        TypeError: If attr `min` or `max` is not float.
1255        TypeError: If attr `bins` is not int.
1256        ValueError: If attr value `min` > `max`.
1257        ValueError: If attr `bins` <= 0.
1258
1259    Supported Platforms:
1260        ``Ascend`` ``CPU``
1261
1262    Examples:
1263        >>> x = Tensor([1., 2, 1])
1264        >>> op = ops.Histogram(bins=4, min=0.0, max=3.0)
1265        >>> y = op(x)
1266        >>> print(y)
1267        [0 2 1 0]
1268    """
1269
1270    @prim_attr_register
1271    def __init__(self, bins=100, min=0.0, max=0.0):  # pylint: disable=W0622
1272        """Initialize Histogram."""
1273        self.init_prim_io_names(inputs=['x'], outputs=['y'])
1274        validator.check_value_type("bins", bins, [int], self.name)
1275        validator.check_value_type("min", min, [float], self.name)
1276        validator.check_value_type("max", max, [float], self.name)
1277        validator.check_positive_int(bins, 'bins', self.name)
1278        validator.check('min', min, 'max', max, validator.LE, self.name)
1279
1280
1281class HistogramFixedWidth(PrimitiveWithInfer):
1282    """
1283    Returns a rank 1 histogram counting the number of entries in values that fall into every bin. The bins are equal
1284    width and determined by the inputs `range` and the arguments `nbins`.
1285
1286    Args:
1287        nbins (int): The number of histogram bins, the type is a positive integer.
1288        dtype (str, optional): An optional attribute. The dtype must be str. Default: ``'int32'`` .
1289
1290    Inputs:
1291        - **x** (Tensor) - Numeric Tensor. Must be one of the following types: int32, float32, float16.
1292        - **range** (Tensor) - Must have the same data type as `x`, and the shape is :math:`(2,)`.
1293          x <= range[0] will be mapped to histogram[0], x >= range[1] will be mapped to histogram[-1].
1294
1295    Outputs:
1296        1-D Tensor, whose length is the type is `nbins` with dtype of int32.
1297
1298    Raises:
1299        TypeError: If `dtype` is not a str or `nbins` is not an int.
1300        ValueError: If `nbins` is less than 1.
1301        ValueError: If `dtype` is not 'int32'.
1302
1303    Supported Platforms:
1304        ``Ascend`` ``GPU``
1305
1306    Examples:
1307        >>> import mindspore
1308        >>> from mindspore import Tensor, ops
1309        >>> x = Tensor([-1.0, 0.0, 1.5, 2.0, 5.0, 15], mindspore.float16)
1310        >>> range_op = Tensor([0.0, 5.0], mindspore.float16)
1311        >>> hist = ops.HistogramFixedWidth(5)
1312        >>> output = hist(x, range_op)
1313        >>> print(output)
1314        [2 1 1 0 2]
1315    """
1316
1317    @prim_attr_register
1318    def __init__(self, nbins, dtype='int32'):
1319        """Initialize HistogramFixedWidth."""
1320        self.nbins = validator.check_value_type("nbins", nbins, [int], self.name)
1321        validator.check_int(nbins, 1, validator.GE, "nbins", self.name)
1322        valid_values = ['int32']
1323        self.dtype = validator.check_string(dtype, valid_values, "dtype", self.name)
1324        self.init_prim_io_names(inputs=['x', 'range'], outputs=['y'])
1325        self.add_prim_attr('dtype', 3)
1326
1327
1328class Hypot(Primitive):
1329    """
1330    Computes hypotenuse of input tensors element-wise as legs of a right triangle.
1331    The shape of two inputs should be broadcastable, and data type of them should be
1332    one of: float32, float64.
1333
1334    .. warning::
1335        This is an experimental API that is subject to change or deletion.
1336
1337    Inputs:
1338        - **x1** (Tensor) - The first input tensor.
1339        - **x2** (Tensor) - The second input tensor.
1340
1341    Outputs:
1342        Tensor, the shape is the same as the one after broadcasting, and the data type is one
1343        with higher precision in the two inputs.
1344
1345    Raises:
1346        TypeError: If data type `x1` or `x2` is not float32 or float64.
1347        ValueError: If shape of two inputs are not broadcastable.
1348
1349    Supported Platforms:
1350        ``Ascend`` ``GPU`` ``CPU``
1351
1352    Examples:
1353        >>> import mindspore
1354        >>> import numpy as np
1355        >>> from mindspore import Tensor, ops
1356        >>> x1 = Tensor(np.array([3., 5., 7.]))
1357        >>> x2 = Tensor(np.array([4., 12., 24.]))
1358        >>> hypot_ = ops.Hypot()
1359        >>> y = hypot_(x1, x2)
1360        >>> print(y)
1361        [ 5. 13. 25.]
1362        >>> x1 = Tensor(2.1, mindspore.float32)
1363        >>> x2 = Tensor(2.1, mindspore.float32)
1364        >>> hypot_ = ops.Hypot()
1365        >>> y = hypot_(x1, x2)
1366        >>> print(y)
1367        2.9698484
1368    """
1369
1370    __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
1371
1372    @prim_attr_register
1373    def __init__(self):
1374        self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['y'])
1375
1376
1377class Heaviside(Primitive):
1378    r"""
1379    Applies the Heaviside step function for input `x` element-wise.
1380
1381    .. math::
1382            \text { heaviside }(\text { x, values })=\left\{\begin{array}{ll}
1383            0, & \text { if x }<0 \\
1384            \text { values, } & \text { if x }==0 \\
1385            1, & \text { if x }>0
1386            \end{array}\right
1387
1388    .. warning::
1389        This is an experimental API that is subject to change or deletion.
1390
1391    Inputs:
1392        - **x** (Tensor) - The input tensor. With real number data type.
1393        - **values** (Tensor) - The values to use where `x` is zero.
1394          It should be able to broadcast with `x` have the same dtype as `x`.
1395
1396    Outputs:
1397        Tensor, has the same type as `x` and `values`.
1398
1399    Raises:
1400        TypeError: If `x` or `values` is not Tensor.
1401        TypeError: If data type `x` and `values` is different.
1402        ValueError: If shape of two inputs are not broadcastable.
1403
1404    Supported Platforms:
1405        ``Ascend`` ``GPU`` ``CPU``
1406
1407    Examples:
1408        >>> import numpy as np
1409        >>> from mindspore import Tensor, ops
1410        >>> x = Tensor(np.array([-1.5, 0., 2.]))
1411        >>> values = Tensor(np.array([0.5]))
1412        >>> heaviside = ops.Heaviside()
1413        >>> y = heaviside(x, values)
1414        >>> print(y)
1415        [0.  0.5 1. ]
1416    """
1417
1418    @prim_attr_register
1419    def __init__(self):
1420        self.init_prim_io_names(inputs=['x', 'values'], outputs=['y'])
1421
1422
1423class DivNoNan(Primitive):
1424    r"""
1425    Operates a safe division between `x1` and `x2` element-wise. Returns 0 if element of `x2` is zero.
1426
1427    Inputs of `x1` and `x2` comply with the implicit type conversion rules to make the data types consistent.
1428    The inputs must be two tensors or one tensor and one scalar.
1429    When the inputs are two tensors,
1430    dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
1431    When the inputs are one tensor and one scalar,
1432    the scalar could only be a constant.
1433
1434    .. math::
1435        output_{i} = \begin{cases}
1436        0, & \text{ if } x2_{i} = 0\\
1437        x1_{i} / x2_{i}, & \text{ if } x2_{i} \ne 0
1438        \end{cases}
1439
1440    Inputs:
1441        - **x1** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
1442          a bool or a tensor whose data type is
1443          `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
1444          `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
1445        - **x2** (Union[Tensor, number.Number, bool]) - The second input is a number.Number or
1446          a bool when the first input is a bool or a tensor whose data type is number or bool\_.
1447          When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
1448
1449    Outputs:
1450        Tensor, the shape is the same as the one after broadcasting,
1451        and the data type is the one with higher precision or higher digits among the two inputs.
1452
1453    Raises:
1454        TypeError: If `x1` and `x2` is not a number.Number or a bool or a Tensor.
1455
1456    Supported Platforms:
1457        ``Ascend`` ``GPU`` ``CPU``
1458
1459    Examples:
1460        >>> import mindspore
1461        >>> import numpy as np
1462        >>> from mindspore import Tensor, ops
1463        >>> x1 = Tensor(np.array([-1.0, 0., 1.0, 5.0, 6.0]), mindspore.float32)
1464        >>> x2 = Tensor(np.array([0., 0., 0., 2.0, 3.0]), mindspore.float32)
1465        >>> div_no_nan = ops.DivNoNan()
1466        >>> output = div_no_nan(x1, x2)
1467        >>> print(output)
1468        [0.  0.  0.  2.5 2. ]
1469    """
1470
1471    __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
1472
1473    @prim_attr_register
1474    def __init__(self):
1475        """Initialize DivNoNan"""
1476        self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['y'])
1477
1478
1479class MulNoNan(_MathBinaryOp):
1480    r"""
1481    Computes `x` * `y` element-wise. If `y` is zero, no matter what `x` is, it will return 0.
1482
1483    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
1484    The inputs must be two tensors or one tensor and one scalar.
1485    When the inputs are two tensors, the shapes of them could be broadcasted.
1486    When the inputs are one tensor and one scalar, the scalar could only be a constant.
1487
1488    .. math::
1489        output_{ij} = \begin{cases}
1490        0, & y_{ij} = 0;\\
1491        x_{ij} * y_{ij}, & otherwise.
1492        \end{cases}
1493
1494    Note:
1495        The shapes of `x` and `y` should be the same or can be broadcasted.
1496        This is noncommutative: if `y` is NaN or infinite and `x` is 0, the result will be NaN.
1497
1498    Inputs:
1499        - **x** (Union[Tensor]) - The first input is a tensor whose data type is one of
1500          int32, int64, float16, float32, float64, complex64, complex128 currently or scalar.
1501        - **y** (Union[Tensor]) - The second input is a tensor whose data type is one of
1502          int32, int64, float16, float32, float64, complex64, complex128 currently or scalar.
1503
1504    Outputs:
1505        Tensor, the shape is the same as the shape after broadcasting,
1506        and the data type is the one with higher precision among the two inputs.
1507
1508    Raises:
1509        TypeError: If neither `x` nor `y` is a Tensor.
1510
1511    Supported Platforms:
1512        ``Ascend`` ``GPU`` ``CPU``
1513
1514    Examples:
1515        >>> import mindspore
1516        >>> import numpy as np
1517        >>> from mindspore import Tensor, ops
1518        >>> # case 1 : same data type and shape of two inputs, there are some 0 in y.
1519        >>> x = Tensor(np.array([[-1.0, 6.0, np.inf], [np.nan, -7.0, 4.0]]), mindspore.float32)
1520        >>> y = Tensor(np.array([[-1.0, 4.0, 0], [0, -3.0, 1.0]]), mindspore.float32)
1521        >>> mul_no_nan = ops.MulNoNan()
1522        >>> output = mul_no_nan(x, y)
1523        >>> print(output)
1524        [[ 1. 24. 0.]
1525        [ 0. 21. 4.]]
1526        >>> # case 2 : the shape of two inputs is same, there are some 0 in x, y.
1527        >>> x = Tensor(np.array([[-1.0, 6.0, 0], [0, np.nan, 4.0]]), mindspore.float32)
1528        >>> y = Tensor(np.array([[-1.0, 4.0, np.inf], [np.nan, 0, 1.0]]), mindspore.float32)
1529        >>> output = mul_no_nan(x, y)
1530        >>> print(output)
1531        [[ 1. 24. nan]
1532         [nan  0. 4.]]
1533        >>> print(output.dtype)
1534        Float32
1535        >>> # case 3 : the y is a scalar.
1536        >>> x = Tensor(np.array([[-1.0, 6.0, 0], [0, np.nan, 4.0]]), mindspore.float32)
1537        >>> y = Tensor(0, mindspore.float32)
1538        >>> output = mul_no_nan(x, y)
1539        >>> print(output)
1540        [[0. 0. 0.]
1541         [0. 0. 0.]]
1542    """
1543
1544    @prim_attr_register
1545    def __init__(self):
1546        """Initialize _BinaryOp"""
1547        self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
1548
1549
1550class TruncateDiv(Primitive):
1551    """
1552    Divides the first input tensor by the second input tensor element-wise and rounds the results
1553    of division towards zero. Equivalent to C-style integer division.
1554
1555    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
1556    When the inputs are two tensors,
1557    dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
1558    When the inputs are one tensor and one scalar,
1559    the scalar could only be a constant.
1560
1561    Note:
1562        Broadcasting is supported.
1563
1564    Inputs:
1565        - **x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
1566          or a tensor whose data type is number or bool.
1567        - **y** (Union[Tensor, Number, bool]) - The second input is a number, or a bool when the first input
1568          is a tensor, or a tensor whose data type is number or bool.
1569
1570    Outputs:
1571        Tensor, the shape is the same as the one after broadcasting,
1572        and the data type is the one with higher precision or higher digits among the two inputs.
1573
1574    Raises:
1575        TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
1576
1577    Supported Platforms:
1578        ``Ascend`` ``GPU`` ``CPU``
1579
1580    Examples:
1581        >>> import mindspore
1582        >>> import numpy as np
1583        >>> from mindspore import Tensor, ops
1584        >>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
1585        >>> y = Tensor(np.array([3, 3, 3]), mindspore.int32)
1586        >>> truncate_div = ops.TruncateDiv()
1587        >>> output = truncate_div(x, y)
1588        >>> print(output)
1589        [0 1 0]
1590    """
1591
1592    __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
1593
1594    @prim_attr_register
1595    def __init__(self):
1596        """Initialize TruncateDiv."""
1597        self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
1598
1599
1600class TruncateMod(Primitive):
1601    r"""
1602    Returns the remainder of division element-wise.
1603
1604    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
1605    When the inputs are two tensors,
1606    dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
1607    When the inputs are one tensor and one scalar,
1608    the scalar could only be a constant.
1609
1610    .. warning::
1611        - The input data does not support 0.
1612        - When the elements of input exceed 2048, the accuracy of operator cannot guarantee the requirement of
1613          double thousandths in the mini form.
1614        - Due to different architectures, the calculation results of this operator on NPU and CPU may be inconsistent.
1615        - If shape is expressed as (D1,D2... ,Dn), then D1\*D2... \*DN<=1000000,n<=8.
1616
1617    Inputs:
1618        - **x** (Union[Tensor, numbers.Number, bool]) - The first input is a number, or a bool,
1619          or a tensor whose data type is number or bool.
1620        - **y** (Union[Tensor, numbers.Number, bool]) - The second input is a number, or a bool when the first input
1621          is a tensor, or a tensor whose data type is number or bool.
1622
1623    Outputs:
1624        Tensor, the shape is the same as the one after broadcasting,
1625        and the data type is the one with higher precision among the two inputs.
1626
1627    Raises:
1628        TypeError: If neither `x` nor `y` is one of the following: Tensor, number, bool.
1629        TypeError: If neither `x` nor `y` is a Tensor.
1630        ValueError: If the shape `x` and `y` cannot be broadcasted to each other.
1631
1632    Supported Platforms:
1633        ``Ascend`` ``GPU`` ``CPU``
1634
1635    Examples:
1636        >>> import mindspore
1637        >>> import numpy as np
1638        >>> from mindspore import Tensor, ops
1639        >>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
1640        >>> y = Tensor(np.array([3, 3, 3]), mindspore.int32)
1641        >>> truncate_mod = ops.TruncateMod()
1642        >>> output = truncate_mod(x, y)
1643        >>> print(output)
1644        [ 2  1 -1]
1645    """
1646    __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
1647
1648    @prim_attr_register
1649    def __init__(self):
1650        """Initialize TruncateMod."""
1651        self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
1652
1653
1654class Mod(_MathBinaryOp):
1655    r"""
1656    Computes the remainder of dividing the first input tensor by the second input tensor element-wise.
1657
1658    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
1659    The inputs must be two tensors or one tensor and one scalar. When the inputs are two tensors,
1660    both dtypes cannot be bool, and the shapes of them could be broadcast. When the inputs are one tensor
1661    and one scalar, the scalar could only be a constant.
1662
1663    .. math::
1664
1665        out_{i} = x_{i} \text{ % } y_{i}
1666
1667    .. warning::
1668        - The input data does not support 0.
1669        - When the elements of input exceed 2048, the accuracy of operator cannot guarantee the requirement of
1670          double thousandths in the mini form.
1671        - Due to different architectures, the calculation results of this operator on NPU and CPU may be inconsistent.
1672        - If shape is expressed as :math:`(D1, D2, ..., Dn)`, then :math:`D1*D2... *DN<=1000000,n<=8`.
1673
1674    Inputs:
1675        - **x** (Union[Tensor, numbers.Number, bool]) - The first input is a number, a bool
1676          or a tensor whose data type is number.
1677        - **y** (Union[Tensor, numbers.Number, bool]) - When the first input is a tensor, The second input
1678          could be a number, a bool or a tensor whose data type is number. When the first input is a number or a bool
1679          the second input must be a tensor whose data type is number.
1680
1681    Outputs:
1682        Tensor, the shape is the same as the one after broadcasting,
1683        and the data type is the one with higher precision or higher digits among the two inputs.
1684
1685    Raises:
1686        TypeError: If neither `x` nor `y` is one of the following: Tensor, number, bool.
1687        TypeError: If neither `x` nor `y` is a Tensor.
1688        ValueError: If the shape `x` and `y` cannot be broadcasted to each other.
1689
1690
1691    Supported Platforms:
1692        ``Ascend`` ``GPU`` ``CPU``
1693
1694    Examples:
1695        >>> import mindspore
1696        >>> import numpy as np
1697        >>> from mindspore import Tensor, ops
1698        >>> x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
1699        >>> y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32)
1700        >>> mod = ops.Mod()
1701        >>> output = mod(x, y)
1702        >>> print(output)
1703        [-1.  1.  0.]
1704    """
1705
1706    def infer_value(self, x, y):
1707        if x is not None and y is not None:
1708            x = x.asnumpy()
1709            y = y.asnumpy()
1710            return Tensor(np.fmod(x, y))
1711        return None
1712
1713
1714class Xdivy(Primitive):
1715    """
1716    Divides the first input tensor by the second input tensor element-wise. Returns zero when `x` is zero.
1717
1718    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
1719    When the inputs are two tensors,
1720    dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
1721    If one of the inputs is scalar, the scalar could only be a constant.
1722
1723    Inputs:
1724        - **x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
1725          or a tensor whose data type is float16, float32, float64, complex64, complex128 or bool.
1726        - **y** (Union[Tensor, Number, bool]) - The second input is a number,
1727          or a bool when the first input is a tensor, or a tensor whose data type is float16,
1728          float32, float64, complex64, complex128 or bool.
1729
1730    Outputs:
1731        Tensor, the shape is the same as the one after broadcasting,
1732        and the data type is the one with higher precision or higher digits among the two inputs.
1733
1734    Raises:
1735        TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
1736        TypeError: If dtype of `x` and 'y' is not in [float16, float32, float64, complex64, complex128, bool].
1737        ValueError: If `x` could not be broadcast to a tensor with shape of `y`.
1738        RuntimeError: If the data type of `x`, `y` conversion of Parameter is given
1739                      but data type conversion of Parameter is not supported.
1740
1741    Supported Platforms:
1742        ``Ascend`` ``GPU`` ``CPU``
1743
1744    Examples:
1745        >>> import mindspore
1746        >>> import numpy as np
1747        >>> from mindspore import Tensor, ops
1748        >>> x = Tensor(np.array([2, 4, -1]), mindspore.float32)
1749        >>> y = Tensor(np.array([2, 2, 2]), mindspore.float32)
1750        >>> xdivy = ops.Xdivy()
1751        >>> output = xdivy(x, y)
1752        >>> print(output)
1753        [ 1.   2.  -0.5]
1754    """
1755
1756    # Let x/y using same sig_dtype to enable implicit conversion for compatibility
1757    __mindspore_signature__ = (
1758        sig.make_sig('x', rw=sig.sig_rw.RW_READ, dtype=sig.sig_dtype.T),
1759        sig.make_sig('y', rw=sig.sig_rw.RW_READ, dtype=sig.sig_dtype.T)
1760    )
1761
1762    @prim_attr_register
1763    def __init__(self):
1764        """Initialize Xdivy."""
1765        self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
1766
1767    def infer_shape(self, x_shape, y_shape):
1768        """
1769        Infer shape for output of Xdivy
1770        :param x_shape: input shape of x
1771        :param y_shape: input shape of y
1772        :return:
1773        """
1774        output_shape = get_broadcast_shape(x_shape, y_shape, self.name)
1775        return output_shape
1776
1777    def infer_dtype(self, x_dtype, y_dtype):
1778        """
1779        Infer type for output of Xdivy
1780        :param x_dtype: input type of x
1781        :param y_dtype: input type of y
1782        :return:
1783        """
1784        args = {'x': x_dtype, 'y': y_dtype}
1785        validator.check_scalar_or_tensor_types_same(args,
1786                                                    [mstype.float16, mstype.float32, mstype.float64, mstype.complex64,
1787                                                     mstype.complex128], self.name, True)
1788        return x_dtype
1789
1790    def infer_value(self, x, y):
1791        """
1792        Infer value for constant folding
1793        :param x:
1794        :param y:
1795        :return:
1796        """
1797        if x is not None and y is not None:
1798            x = x.asnumpy()
1799            y = y.asnumpy()
1800            out = x / y
1801            out = np.array(out, x.dtype)
1802            return Tensor(out)
1803        return None
1804
1805
1806class Xlogy(Primitive):
1807    r"""
1808    Computes the first input tensor multiplied by the logarithm of second input tensor element-wise.
1809    Returns zero when `x` is zero.
1810
1811    Refer to :func:`mindspore.ops.xlogy` for more details.
1812
1813    Inputs:
1814        - **x** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
1815          a bool or a tensor whose data type is
1816          `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
1817          `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
1818        - **y** (Union[Tensor, number.Number, bool]) - The second input is a number.Number or
1819          a bool when the first input is a tensor or a tensor whose data type is number or bool\_.
1820          When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
1821
1822    Outputs:
1823        Tensor, the shape is the same as the one after broadcasting,
1824        and the data type is the one with higher precision or higher digits among the two inputs.
1825
1826    Supported Platforms:
1827        ``Ascend`` ``GPU`` ``CPU``
1828
1829    Examples:
1830        >>> import mindspore
1831        >>> import numpy as np
1832        >>> from mindspore import Tensor, ops
1833        >>> x = Tensor(np.array([-5, 0, 4]), mindspore.float32)
1834        >>> y = Tensor(np.array([2, 2, 2]), mindspore.float32)
1835        >>> xlogy = ops.Xlogy()
1836        >>> output = xlogy(x, y)
1837        >>> print(output)
1838        [-3.465736   0.        2.7725887]
1839    """
1840    __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
1841
1842    @prim_attr_register
1843    def __init__(self):
1844        """Initialize Xlogy."""
1845        self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
1846
1847
1848class _LogicBinaryOp(_BinaryOp):
1849    """
1850    Define logic binary operators.
1851    """
1852
1853    @staticmethod
1854    def do_infer_dtype(x_dtype, y_dtype, valid_type=mstype.number_type, prim_name=None):
1855        """Staticmethod of infer dtype for _LogicBinaryOp."""
1856        args_dtype = {"x": x_dtype, "y": y_dtype}
1857        validator.check_tensors_dtypes_same_and_valid(args_dtype, valid_type, prim_name)
1858        return mstype.TensorType(mstype.bool_)
1859
1860    def infer_dtype(self, x_dtype, y_dtype):
1861        return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, prim_name=self.name)
1862
1863
1864class Quantile(Primitive):
1865    r"""
1866    Computes the q-th quantiles of all elements in the input tensor, doing a linear interpolation when the
1867    q-th quantile lies between two data points.
1868
1869    Refer to :func:`mindspore.ops.quantile` and :func:`mindspore.ops.nanquantile` for more details.
1870
1871    Args:
1872        dim (int, optional): The dimension to reduce. By default, `axis` is ``None`` resulting in the
1873            input tensor being flattened before computation. Default: ``None`` .
1874        keep_dims (bool, optional): Whether the output tensor has dim retained or not. Default: ``False`` .
1875        ignore_nan (bool, optional): Whether to ignore NaN values in the input. Default: ``False`` .
1876
1877    Inputs:
1878        - **input** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
1879          Supported dtypes: float32, float64.
1880        - **q** (Union[float, Tensor]) - A scalar or 1D tensor of quantile values in the range [0, 1].
1881          Supported dtypes: float32, float64.
1882
1883    Outputs:
1884        Tensor, has the same dtype as the `input`.
1885
1886    Supported Platforms:
1887
1888
1889    Examples:
1890        >>> quantile = ops.Quantile()
1891        >>> input = Tensor(np.array([0.0700, -0.5446,  0.9214]), mindspore.float32)
1892        >>> q = Tensor(np.array([0, 0.5, 1]), mindspore.float32)
1893        >>> output = quantile(input, q)
1894        >>> print(output)
1895        [-0.5446  0.07  0.9214]
1896    """
1897
1898    @prim_attr_register
1899    def __init__(self, dim=None, keep_dims=False, ignore_nan=False):
1900        """Initialize Quantile"""
1901        if dim is not None:
1902            validator.check_value_type("dim", dim, [int], self.name)
1903        else:
1904            self.add_prim_attr("dim", 10000)
1905        if keep_dims is not None:
1906            validator.check_value_type("keep_dims", keep_dims, [bool], self.name)
1907        else:
1908            self.add_prim_attr("keep_dims", False)
1909        if ignore_nan is not None:
1910            validator.check_value_type("ignore_nan", ignore_nan, [bool], self.name)
1911        else:
1912            self.add_prim_attr("ignore_nan", False)
1913
1914
1915class ApproximateEqual(_LogicBinaryOp):
1916    r"""
1917    Returns ``True`` if abs(x-y) is smaller than tolerance element-wise, otherwise False.
1918
1919    .. math::
1920
1921        out_i = \begin{cases}
1922        & \text{ if } \left | x_{i} - y_{i} \right | < \text{tolerance},\ \ True  \\
1923        & \text{ if } \left | x_{i} - y_{i} \right | \ge \text{tolerance},\ \  False
1924        \end{cases}
1925
1926    where `tolerance` indicates Acceptable maximum tolerance.
1927
1928    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
1929    If they have different data types, the lower precision data type will be converted to
1930    the relatively highest precision data type.
1931
1932    Args:
1933        tolerance (float): The maximum deviation that two elements can be considered equal. Default: ``1e-05`` .
1934
1935    Inputs:
1936        - **x** (Tensor) - A tensor. Must be one of the following types: float32, float16.
1937          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
1938        - **y** (Tensor) - A tensor of the same type and shape as `x`.
1939
1940    Outputs:
1941        Tensor, the shape is the same as the shape of `x`, and the data type is bool.
1942
1943    Raises:
1944        TypeError: If `tolerance` is not a float.
1945        TypeError: If the data type of `x`, `y` conversion of Parameter is given
1946                      but data type conversion of Parameter is not supported.
1947
1948    Supported Platforms:
1949        ``Ascend`` ``GPU`` ``CPU``
1950
1951    Examples:
1952        >>> import mindspore
1953        >>> import numpy as np
1954        >>> from mindspore import Tensor, ops
1955        >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
1956        >>> y = Tensor(np.array([2, 3, 6]), mindspore.float32)
1957        >>> approximate_equal = ops.ApproximateEqual(2.)
1958        >>> output = approximate_equal(x, y)
1959        >>> print(output)
1960        [ True  True  False]
1961    """
1962
1963    @prim_attr_register
1964    def __init__(self, tolerance=1e-05):
1965        """Initialize ApproximateEqual"""
1966        validator.check_value_type("tolerance", tolerance, [float], self.name)
1967
1968
1969class EqualCount(PrimitiveWithInfer):
1970    """
1971    Computes the number of the same elements of two tensors.
1972
1973    The two input tensors must have the same data type and shape.
1974
1975    Inputs:
1976        - **x** (Tensor) - The first input tensor. If the data type and shape of `y` are determined, then `x`
1977          must be the same as `y`, and vice versa.
1978          :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
1979        - **y** (Tensor) - The second input tensor. If the data type and shape of `x` are determined, then `y`
1980          must be the same as `x`, and vice versa.
1981
1982    Outputs:
1983        Tensor, with the type same as input tensor and shape as :math:`(1,)`.
1984
1985    Raises:
1986        TypeError: If `x` or `y` is not a Tensor.
1987        ValueError: If shape of `x` is not equal to shape of `y`.
1988
1989    Supported Platforms:
1990        ``Ascend`` ``GPU`` ``CPU``
1991
1992    Examples:
1993        >>> import mindspore
1994        >>> import numpy as np
1995        >>> from mindspore import Tensor, ops
1996        >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
1997        >>> y = Tensor(np.array([1, 2, 4]), mindspore.int32)
1998        >>> equal_count = ops.EqualCount()
1999        >>> output = equal_count(x, y)
2000        >>> print(output)
2001        [2]
2002    """
2003
2004    @prim_attr_register
2005    def __init__(self):
2006        """Initialize EqualCount"""
2007        self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
2008
2009
2010class Lerp(Primitive):
2011    """
2012    Does a linear interpolation of two tensors start and end based on a float or tensor weight.
2013
2014    Refer to :func:`mindspore.ops.lerp` for more details.
2015
2016    Inputs:
2017        - **start** (Tensor) - The tensor with the starting points. Data type must be float16, float32 or float64.
2018        - **end** (Tensor) - The tensor with the ending points. Data type must be the same as `start`.
2019        - **weight** (Union[float, Tensor]) - The weight for the interpolation formula. Must be a float
2020          or a scalar tensor with float16 or float32 data type.
2021
2022    Outputs:
2023        Tensor, has the same type and shape as input `start`.
2024
2025    Supported Platforms:
2026        ``Ascend`` ``GPU`` ``CPU``
2027
2028    Examples:
2029        >>> import mindspore
2030        >>> import numpy as np
2031        >>> from mindspore import Tensor, ops
2032        >>> start = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
2033        >>> end = Tensor(np.array([10., 10., 10., 10.]), mindspore.float32)
2034        >>> lerp = ops.Lerp()
2035        >>> output = lerp(start, end, 0.5)
2036        >>> print(output)
2037        [5.5 6. 6.5 7. ]
2038    """
2039
2040    @prim_attr_register
2041    def __init__(self):
2042        self.init_prim_io_names(inputs=['start', 'end', 'weight'], outputs=['output'])
2043
2044
2045class IsNan(Primitive):
2046    r"""
2047    Determines which elements are NaN for each position.
2048
2049    Refer to :func:`mindspore.ops.isnan` for more details.
2050
2051    Inputs:
2052        - **x** (Tensor) - The input tensor.
2053
2054    Outputs:
2055        Tensor, has the same shape of input, and the dtype is bool.
2056
2057    Supported Platforms:
2058        ``Ascend`` ``GPU`` ``CPU``
2059
2060    Examples:
2061        >>> import mindspore
2062        >>> import numpy as np
2063        >>> from mindspore import Tensor, ops
2064        >>> is_nan = ops.IsNan()
2065        >>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
2066        >>> output = is_nan(x)
2067        >>> print(output)
2068        [ True False False]
2069        >>> x = Tensor(2.1, mindspore.float64)
2070        >>> output = is_nan(x)
2071        >>> print(output)
2072        False
2073    """
2074
2075    @prim_attr_register
2076    def __init__(self):
2077        """Initialize IsNan"""
2078        self.init_prim_io_names(inputs=['x'], outputs=['output'])
2079
2080
2081class IsInf(Primitive):
2082    r"""
2083    Determines which elements are inf or -inf for each position.
2084
2085    Refer to :func:`mindspore.ops.isinf` for more details.
2086
2087    Inputs:
2088        - **x** (Tensor) - The input tensor.
2089
2090    Outputs:
2091        Tensor, has the same shape of input, and the dtype is bool.
2092
2093    Supported Platforms:
2094        ``Ascend`` ``GPU`` ``CPU``
2095
2096    Examples:
2097        >>> import mindspore
2098        >>> import numpy as np
2099        >>> from mindspore import Tensor, ops
2100        >>> is_inf = ops.IsInf()
2101        >>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
2102        >>> output = is_inf(x)
2103        >>> print(output)
2104        [False False True]
2105        >>> x = Tensor(2.1, mindspore.float64)
2106        >>> output = is_inf(x)
2107        >>> print(output)
2108        False
2109    """
2110
2111    @prim_attr_register
2112    def __init__(self):
2113        """Initialize IsInf"""
2114        self.init_prim_io_names(inputs=['x'], outputs=['output'])
2115
2116
2117class FloatStatus(Primitive):
2118    """
2119    Determines if the elements contain Not a Number(NaN), infinite or negative infinite. 0 for normal, 1 for overflow.
2120
2121    Inputs:
2122        - **x** (Tensor) - The input tensor. The data type must be float16, float32 or float64.
2123          :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
2124
2125    Outputs:
2126        Tensor, has the shape of :math:`(1,)`, and the dtype is `mindspore.dtype.float32`.
2127
2128    Raises:
2129        TypeError: If dtype of `x` is not in [float16, float32, float64].
2130
2131    Supported Platforms:
2132        ``GPU``
2133
2134    Examples:
2135        >>> import mindspore
2136        >>> import numpy as np
2137        >>> from mindspore import Tensor, ops
2138        >>> float_status = ops.FloatStatus()
2139        >>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
2140        >>> result = float_status(x)
2141        >>> print(result)
2142        [1.]
2143    """
2144
2145    @prim_attr_register
2146    def __init__(self):
2147        """Initialize FloatStatus"""
2148        self.init_prim_io_names(inputs=['x'], outputs=['output'])
2149
2150
2151class NPUAllocFloatStatus(Primitive):
2152    """
2153    Allocates a flag to store the overflow status.
2154
2155    The flag is a tensor whose shape is :math:`(8,)` and data type is `mindspore.dtype.float32`.
2156
2157    Note:
2158        Please refer to the Examples of :class:`mindspore.ops.NPUGetFloatStatus`.
2159
2160    Outputs:
2161        Tensor, has the shape of :math:`(8,)`.
2162
2163    Supported Platforms:
2164        ``Ascend``
2165
2166    Examples:
2167        >>> from mindspore import ops
2168        >>> alloc_status = ops.NPUAllocFloatStatus()
2169        >>> output = alloc_status()
2170        >>> print(output)
2171        [0. 0. 0. 0. 0. 0. 0. 0.]
2172    """
2173
2174    @prim_attr_register
2175    def __init__(self):
2176        """Initialize NPUAllocFloatStatus"""
2177        logger.warning("The 'NPUAllocFloatStatus' operator will be deprecated in the future, "
2178                       "please use 'nn.TrainOneStepWithLossScaleCell' or 'amp.all_finite'.")
2179
2180
2181class NPUGetFloatStatus(Primitive):
2182    """
2183    `mindspore.ops.NPUGetFloatStatus` updates the flag which is
2184    the output tensor of :class:`mindspore.ops.NPUAllocFloatStatus` with the latest overflow status.
2185
2186
2187    Note:
2188        The flag is a tensor whose shape is :math:`(8,)` and data type is `mindspore.dtype.float32`.
2189        If the sum of the flag equals to 0, there is no overflow happened. If the sum of the
2190        flag is bigger than 0, there is overflow happened.
2191        In addition, there are strict sequencing requirements for use, i.e., before
2192        using the NPUGetFloatStatus operator, need to ensure that the NPUClearFlotStatus
2193        and your compute has been executed. We use :class:`mindspore.ops.Depend` to ensure the execution order.
2194
2195    Inputs:
2196        - **x** (Tensor) - The output tensor of `NPUAllocFloatStatus`.
2197          The data type must be float16 or float32.
2198          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
2199
2200    Outputs:
2201        Tensor, has the same shape as `x`. All the elements in the tensor will be zero.
2202
2203    Raises:
2204        TypeError: If `x` is not a Tensor.
2205        TypeError: If dtype of `x` is neither float16 nor float32.
2206
2207    Supported Platforms:
2208        ``Ascend``
2209
2210    Examples:
2211        >>> import numpy as np
2212        >>> import mindspore.nn as nn
2213        >>> from mindspore import ops
2214        >>> from mindspore import dtype as mstype
2215        >>> from mindspore import Tensor
2216        >>> class Net(nn.Cell):
2217        ...     def __init__(self):
2218        ...         super().__init__()
2219        ...         self.alloc_status = ops.NPUAllocFloatStatus()
2220        ...         self.get_status = ops.NPUGetFloatStatus()
2221        ...         self.clear_status = ops.NPUClearFloatStatus()
2222        ...         self.sub = ops.Sub()
2223        ...         self.neg = ops.Neg()
2224        ...
2225        ...     def construct(self, x):
2226        ...         init = self.alloc_status()
2227        ...         clear_status = self.clear_status(init)
2228        ...         x = ops.depend(x, clear_status)
2229        ...         res = self.sub(x, self.neg(x))
2230        ...         init = ops.depend(init, res)
2231        ...         get_status = self.get_status(init)
2232        ...         res = ops.depend(res, get_status)
2233        ...         return res
2234        >>>
2235        >>> value = 5
2236        >>> data = np.full((2, 3), value, dtype=np.float16)
2237        >>> x = Tensor(data, dtype=mstype.float16)
2238        >>> net = Net()
2239        >>> res = net(x)
2240        >>> print(res)
2241        [[10. 10. 10.]
2242         [10. 10. 10.]]
2243    """
2244
2245    @prim_attr_register
2246    def __init__(self):
2247        """Initialize NPUGetFloatStatus"""
2248        logger.warning("The 'NPUGetFloatStatus' operator will be deprecated in the future, "
2249                       "please use 'nn.TrainOneStepWithLossScaleCell' or 'amp.all_finite'.")
2250
2251
2252class NPUClearFloatStatus(Primitive):
2253    """
2254    Clears the flag which stores the overflow status.
2255
2256    Note:
2257        The flag is in the register on the `Ascend` device. It will be reset and can not be reused again after the
2258        `NPUClearFloatStatus` is called.
2259        In addition, there are strict sequencing requirements for use, i.e., before using the NPUGetFloatStatus
2260        operator, need to ensure that the NPUClearFlotStatus and your compute has been executed.
2261        We use :class:`mindspore.ops.Depend` on ensure the execution order.
2262
2263        Please refer to the Examples of :class:`mindspore.ops.NPUGetFloatStatus`.
2264
2265    Inputs:
2266        - **x** (Tensor) - The output tensor of `NPUAllocFloatStatus`.
2267          The data type must be float16 or float32.
2268
2269    Outputs:
2270        Tensor, has the same shape as `x`. All the elements in the tensor will be zero.
2271
2272    Supported Platforms:
2273        ``Ascend``
2274
2275    Examples:
2276        >>> import numpy as np
2277        >>> import mindspore.nn as nn
2278        >>> from mindspore import ops
2279        >>> from mindspore import dtype as mstype
2280        >>> from mindspore import Tensor
2281        >>> class Net(nn.Cell):
2282        ...     def __init__(self):
2283        ...         super().__init__()
2284        ...         self.alloc_status = ops.NPUAllocFloatStatus()
2285        ...         self.get_status = ops.NPUGetFloatStatus()
2286        ...         self.clear_status = ops.NPUClearFloatStatus()
2287        ...         self.sub = ops.Sub()
2288        ...         self.neg = ops.Neg()
2289        ...
2290        ...     def construct(self, x):
2291        ...         init = self.alloc_status()
2292        ...         clear_status = self.clear_status(init)
2293        ...         x = ops.depend(x, clear_status)
2294        ...         res = self.sub(x, self.neg(x))
2295        ...         init = ops.depend(init, res)
2296        ...         get_status = self.get_status(init)
2297        ...         res = ops.depend(res, get_status)
2298        ...         return res
2299        >>>
2300        >>> value = 5
2301        >>> data = np.full((2, 3), value, dtype=np.float16)
2302        >>> x = Tensor(data, dtype=mstype.float16)
2303        >>> net = Net()
2304        >>> res = net(x)
2305        >>> print(res)
2306        [[10. 10. 10.]
2307         [10. 10. 10.]]
2308    """
2309
2310    @prim_attr_register
2311    def __init__(self):
2312        """Initialize NPUClearFloatStatus"""
2313        logger.warning("The 'NPUClearFloatStatus' operator will be deprecated in the future,"
2314                       "please use 'nn.TrainOneStepWithLossScaleCell' or 'amp.all_finite'.")
2315
2316
2317class NPUGetFloatStatusV2(Primitive):
2318    """
2319    Get the flag for storage overflow status. This flag is located in a register at a
2320    fixed address on the `Ascend` device, and overflow information is automatically
2321    written to this register.
2322    The flag is a one-dimensional Tensor with shape :math:`(8,)` and data type `mindspore.dtype.int32`.
2323    If the value of flag is zero, no overflow has occurred, otherwise, overflow.
2324    When performing overflow detection on the network, you should first call `NPUClearFloatStatusV2` to
2325    reset the register before the detection, and then call `NPUGetFloatStatusV2` to get the register
2326    status after the network execution is completed.
2327
2328    Note:
2329        - In order to avoid mis-optimization by the compiler, additional input is added to
2330          this operator. The input is defined as a shape of: math:`(8,)` and data type of
2331          `mindspore.dtype.int32` Tensor, meaningless.
2332        - Since this op lacks contextual dependencies with parameters in the network,
2333          :class:`mindspore.ops.Depend` needs to be used to ensure order of execution.
2334
2335    Inputs:
2336        Tensor, an additional input created to avoid compiler optimization, is specified as shape :math:`(8,)`,
2337        data type is `mindspore.dtype.int32`, and has no actual meaning.
2338        Usually use the output of `NPUClearFloatStatusV2`.
2339
2340    Outputs:
2341        Tensor, shape and data type are the same as input. If all are zero, it means no overflow, otherwise, overflow.
2342
2343    Raises:
2344        TypeError: If `x` is not a Tensor.
2345        TypeError: If dtype of `x` is not int32.
2346        ValueError: If shape of `x` is not equal to :math:`(8,)`.
2347
2348    Supported Platforms:
2349        ``Ascend``
2350
2351    Examples:
2352        >>> import mindspore as ms
2353        >>> import numpy as np
2354        >>> from mindspore import ops, nn, Tensor
2355        >>> from mindspore.ops.operations.math_ops import NPUGetFloatStatusV2, NPUClearFloatStatusV2
2356        >>> class Net(nn.Cell):
2357        ...     def __init__(self):
2358        ...         super().__init__()
2359        ...         self.clear_status = NPUClearFloatStatusV2()
2360        ...         self.get_status = NPUGetFloatStatusV2()
2361        ...         self.sub = ops.Sub()
2362        ...         self.neg = ops.Neg()
2363        ...         self.equal = ops.Equal()
2364        ...         self.reduce_all = ops.ReduceAll(keep_dims=False)
2365        ...         self.base = Tensor([0], dtype=ms.int32)
2366        ...         self.logic_not = ops.LogicalNot()
2367        ...
2368        ...     def construct(self, x):
2369        ...         init = Tensor([0]*8, dtype=ms.int32)
2370        ...         clear_status = self.clear_status(init)
2371        ...         x = ops.depend(x, clear_status)
2372        ...         res = self.sub(x, self.neg(x))
2373        ...         init = ops.depend(init, res)
2374        ...         get_status = self.get_status(init)
2375        ...         flag = self.equal(self.base, get_status)
2376        ...         overall_finite = self.reduce_all(flag)
2377        ...         overflow = self.logic_not(overall_finite)
2378        ...         return overflow
2379        ...
2380        >>> value = 65504
2381        >>> data = np.full((2, 3), value, dtype=np.float16)
2382        >>> x = Tensor(data, dtype=ms.float16)
2383        >>> net = Net()
2384        >>> res = net(x)
2385        >>> print(res)
2386        True
2387        >>> value = 10
2388        >>> data = np.full((2, 3), value, dtype=np.float16)
2389        >>> x = Tensor(data, dtype=ms.float16)
2390        >>> net = Net()
2391        >>> res = net(x)
2392        >>> print(res)
2393        False
2394    """
2395
2396    @prim_attr_register
2397    def __init__(self):
2398        """Initialize NPUGetFloatStatusV2"""
2399
2400
2401
2402class NPUClearFloatStatusV2(Primitive):
2403    """
2404    Clear the flag for storage overflow status. This flag is located in a register at a
2405    fixed address on the `Ascend` device, and overflow information is automatically
2406    written to this register.
2407    The flag is a one-dimensional Tensor with shape :math:`(8,)` and data type `mindspore.dtype.int32`.
2408    If the value of flag is zero, no overflow has occurred, otherwise, overflow.
2409    When performing overflow detection on the network, you should first call `NPUClearFloatStatusV2` to
2410    reset the register before the detection, and then call `NPUGetFloatStatusV2` to get the register
2411    status after the network execution is completed.
2412
2413    Note:
2414        - In order to avoid mis-optimization by the compiler, additional input and output are added to
2415          this operator. The input and output are defined as a shape of: math:`(8,)` and data type of
2416          `mindspore.dtype.int32` Tensor, meaningless.
2417        - Since this op lacks contextual dependencies with parameters in the network,
2418          :class:`mindspore.ops.Depend` needs to be used to ensure order of execution.
2419
2420    Inputs:
2421        Tensor, an additional input created to avoid compiler optimization, is specified as shape :math:`(8,)`,
2422        data type is `mindspore.dtype.int32`, and has no actual meaning.
2423
2424    Outputs:
2425        Tensor, shape and data type are the same as input, meaningless.
2426
2427    Raises:
2428        TypeError: If `x` is not a Tensor.
2429        TypeError: If dtype of `x` is not int32.
2430        ValueError: If shape of `x` is not equal to :math:`(8,)`.
2431
2432    Supported Platforms:
2433        ``Ascend``
2434
2435    Examples:
2436        >>> import mindspore as ms
2437        >>> import numpy as np
2438        >>> from mindspore import ops, nn, Tensor
2439        >>> from mindspore.ops.operations.math_ops import NPUGetFloatStatusV2, NPUClearFloatStatusV2
2440        >>> class Net(nn.Cell):
2441        ...     def __init__(self):
2442        ...         super().__init__()
2443        ...         self.clear_status = NPUClearFloatStatusV2()
2444        ...         self.get_status = NPUGetFloatStatusV2()
2445        ...         self.sub = ops.Sub()
2446        ...         self.neg = ops.Neg()
2447        ...         self.equal = ops.Equal()
2448        ...         self.reduce_all = ops.ReduceAll(keep_dims=False)
2449        ...         self.base = Tensor([0], dtype=ms.int32)
2450        ...         self.logic_not = ops.LogicalNot()
2451        ...
2452        ...     def construct(self, x):
2453        ...         init = Tensor([0]*8, dtype=ms.int32)
2454        ...         clear_status = self.clear_status(init)
2455        ...         x = ops.depend(x, clear_status)
2456        ...         res = self.sub(x, self.neg(x))
2457        ...         init = ops.depend(init, res)
2458        ...         get_status = self.get_status(init)
2459        ...         flag = self.equal(self.base, get_status)
2460        ...         overall_finite = self.reduce_all(flag)
2461        ...         overflow = self.logic_not(overall_finite)
2462        ...         return overflow
2463        ...
2464        >>> value = 65504
2465        >>> data = np.full((2, 3), value, dtype=np.float16)
2466        >>> x = Tensor(data, dtype=ms.float16)
2467        >>> net = Net()
2468        >>> res = net(x)
2469        >>> print(res)
2470        True
2471        >>> value = 10
2472        >>> data = np.full((2, 3), value, dtype=np.float16)
2473        >>> x = Tensor(data, dtype=ms.float16)
2474        >>> net = Net()
2475        >>> res = net(x)
2476        >>> print(res)
2477        False
2478    """
2479
2480    @prim_attr_register
2481    def __init__(self):
2482        """Initialize NPUClearFloatStatusV2"""
2483
2484
2485class NMSWithMask(PrimitiveWithInfer):
2486    r"""
2487    Non-maximum Suppression. When object detection problem is performed in the computer vision field,
2488    object detection algorithm generates
2489    a plurality of bounding boxes. Use the box with the highest score, calculate the overlap between other boxes and
2490    the current box, and delete the box based on a certain threshold(IOU). On Ascend platform, the input box score is
2491    ignored, which only selects boexs based on the IOU between boxes, which means if you want to remove boxes that has
2492    lower scores, you need to sort the input boxes by score in descending order in advance. The IOU is as follows:
2493
2494    .. math::
2495        \text{IOU} = \frac{\text{Area of Overlap}}{\text{Area of Union}}
2496
2497    .. warning::
2498        Only supports up to 2864 input boxes at one time.
2499
2500    Args:
2501        iou_threshold (float): Specifies the threshold of overlap boxes with respect to
2502            IOU. Default: ``0.5`` .
2503
2504    Inputs:
2505        - **bboxes** (Tensor) - The shape of tensor is :math:`(N, 5)`. Input bounding boxes.
2506          `N` is the number of input bounding boxes. Every bounding box
2507          contains 5 values, the first 4 values are the coordinates(x0, y0, x1, y1) of bounding box which
2508          represents the point of top-left and bottom-right, and the last value is the score of this bounding box.
2509          The data type must be float16 or float32.
2510
2511    Outputs:
2512        tuple[Tensor], tuple of three tensors, they are output_boxes, output_idx and selected_mask.
2513
2514        - **output_boxes** (Tensor) - The shape of tensor is :math:`(N, 5)`. On GPU and CPU platform, it is a sorted
2515          list of bounding boxes by sorting the input `bboxes` in descending order of score. On Ascend platform,
2516          it is same as input `bboxes`.
2517        - **output_idx** (Tensor) - The shape of tensor is :math:`(N,)`. The indexes list of `output_boxes`.
2518        - **selected_mask** (Tensor) - The shape of tensor is :math:`(N,)`. A mask list of
2519          valid output bounding boxes. Apply this mask on `output_boxes` to get the list of bounding boxes after
2520          non-max suppression calculation, or apply this mask on `output_idx` to get the indexes list of bounding boxes
2521          after non-max suppression calculation.
2522
2523    Raises:
2524        ValueError: If the `iou_threshold` is not a float number.
2525        ValueError:  if the first dimension of input Tensor is less than or equal to 0.
2526        TypeError: if the dtype of the `bboxes` is not float16 or float32.
2527
2528    Supported Platforms:
2529        ``Ascend`` ``GPU`` ``CPU``
2530
2531    Examples:
2532        >>> import mindspore
2533        >>> import numpy as np
2534        >>> from mindspore import Tensor, ops
2535        >>> bbox = np.array([[100.0, 100.0, 50.0, 68.0, 0.63], [150.0, 75.0, 165.0, 115.0, 0.55],
2536        ...                  [12.0, 190.0, 288.0, 200.0, 0.9], [28.0, 130.0, 106.0, 172.0, 0.3]])
2537        >>> bbox[:, 2] += bbox[:, 0]
2538        >>> bbox[:, 3] += bbox[:, 1]
2539        >>> inputs = Tensor(bbox, mindspore.float32)
2540        >>> nms = ops.NMSWithMask(0.1)
2541        >>> output_boxes, indices, mask = nms(inputs)
2542        >>> indices_np = indices.asnumpy()
2543        >>> print(indices_np[mask.asnumpy()])
2544        [0 1 2]
2545    """
2546
2547    @prim_attr_register
2548    def __init__(self, iou_threshold=0.5):
2549        """Initialize NMSWithMask"""
2550        validator.check_value_type("iou_threshold", iou_threshold, [float], self.name)
2551        self.init_prim_io_names(inputs=['bboxes'], outputs=['selected_boxes', 'selected_idx', 'selected_mask'])
2552
2553    def infer_shape(self, bboxes_shape):
2554        cls_name = self.name
2555        validator.check_equal_int(len(bboxes_shape), 2, "bboxes rank", cls_name)
2556        if bboxes_shape[0] != -1:
2557            validator.check_positive_int(bboxes_shape[0], "bboxes.shape[0]", cls_name)
2558        validator.check_equal_int(bboxes_shape[1], 5, "bboxes.shape[1]", cls_name)
2559        num = bboxes_shape[0]
2560        return bboxes_shape, (num,), (num,)
2561
2562    def infer_dtype(self, bboxes_dtype):
2563        validator.check_tensor_dtype_valid("bboxes", bboxes_dtype, [mstype.float16, mstype.float32], self.name)
2564        return bboxes_dtype, mstype.int32, mstype.bool_
2565
2566
2567class Sign(Primitive):
2568    r"""
2569    Performs sign on the tensor element-wise.
2570
2571    .. math::
2572        sign(x) = \begin{cases} -1, &if\ x < 0 \cr
2573        0, &if\ x = 0 \cr
2574        1, &if\ x > 0\end{cases}
2575
2576    Inputs:
2577        - **x** (Tensor) - The input tensor of any dimension.
2578
2579    Outputs:
2580        Tensor, has the same shape and dtype as the `x`.
2581
2582    Raises:
2583        TypeError: If `x` is not a Tensor.
2584
2585    Supported Platforms:
2586        ``Ascend`` ``GPU`` ``CPU``
2587
2588    Examples:
2589         >>> import mindspore
2590         >>> import numpy as np
2591         >>> from mindspore import Tensor, ops
2592         >>> x = Tensor(np.array([[2.0, 0.0, -1.0]]), mindspore.float32)
2593         >>> sign = ops.Sign()
2594         >>> output = sign(x)
2595         >>> print(output)
2596         [[ 1.  0. -1.]]
2597    """
2598
2599    @prim_attr_register
2600    def __init__(self):
2601        pass
2602
2603
2604class Tan(Primitive):
2605    r"""
2606    Computes tangent of `x` element-wise.
2607
2608    Refer to :func:`mindspore.ops.tan` for more details.
2609
2610    Inputs:
2611        - **x** (Tensor) - Input tensor of any dimension.
2612
2613    Outputs:
2614        Tensor, has the same shape as `x`.
2615
2616    Supported Platforms:
2617        ``Ascend`` ``GPU`` ``CPU``
2618
2619    Examples:
2620        >>> import mindspore
2621        >>> import numpy as np
2622        >>> from mindspore import Tensor, ops
2623        >>> tan = ops.Tan()
2624        >>> x = Tensor(np.array([-1.0, 0.0, 1.0]), mindspore.float32)
2625        >>> output = tan(x)
2626        >>> print(output)
2627        [-1.5574081 0. 1.5574081]
2628    """
2629
2630    @prim_attr_register
2631    def __init__(self):
2632        """Initialize Tan"""
2633        self.init_prim_io_names(inputs=['x'], outputs=['y'])
2634
2635
2636class SquareSumAll(Primitive):
2637    r"""
2638    Returns the square sum of a tensor element-wise.
2639
2640    .. math::
2641        \left\{\begin{matrix}out_{x} = {\textstyle \sum_{0}^{N}} (x_{i})^2
2642        \\out_{y} = {\textstyle \sum_{0}^{N}} (y_{i})^2
2643        \end{matrix}\right
2644
2645    Note:
2646        SquareSumAll only supports float16 and float32 data type.
2647
2648    Inputs:
2649        - **x** (Tensor) - The input tensor. The data type must be float16 or float32.
2650          :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
2651        - **y** (Tensor) - The input tensor has the same type and shape as the `x`.
2652
2653    Outputs:
2654        - **output_x** (Tensor) - The same type as the `x`.
2655        - **output_y** (Tensor) - The same type as the `x`.
2656
2657    Raises:
2658        TypeError: If neither `x` nor `y` is a Tensor.
2659        ValueError: If `x` and `y` are not the same shape.
2660
2661    Supported Platforms:
2662        ``Ascend`` ``GPU`` ``CPU``
2663
2664    Examples:
2665        >>> import numpy as np
2666        >>> import mindspore
2667        >>> from mindspore import ops
2668        >>> from mindspore import Tensor
2669        >>> x = Tensor(np.array([0, 0, 2, 0]), mindspore.float32)
2670        >>> y = Tensor(np.array([0, 0, 2, 4]), mindspore.float32)
2671        >>> square_sum_all = ops.SquareSumAll()
2672        >>> output = square_sum_all(x, y)
2673        >>> print(output)
2674        (Tensor(shape=[], dtype=Float32, value= 4),
2675         Tensor(shape=[], dtype=Float32, value= 20))
2676    """
2677
2678    @prim_attr_register
2679    def __init__(self):
2680        """Initialize SquareSumAll"""
2681        self.init_prim_io_names(inputs=['x', 'y'], outputs=['output_x', 'output_y'])
2682
2683
2684class BitwiseAnd(_BitwiseBinaryOp):
2685    r"""
2686    Returns bitwise `and` of two tensors element-wise.
2687
2688    Refer to :func:`mindspore.ops.bitwise_and` for more details.
2689
2690    Inputs:
2691        - **x** (Tensor) - The first input tensor with shape
2692          :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
2693        - **y** (Tensor) - The second input tensor with same type as the `x`.
2694
2695    Outputs:
2696        Tensor, has the same type as the `x`.
2697
2698    Supported Platforms:
2699        ``Ascend`` ``GPU`` ``CPU``
2700
2701    Examples:
2702        >>> import mindspore
2703        >>> import numpy as np
2704        >>> from mindspore import Tensor, ops
2705        >>> x = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
2706        >>> y = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
2707        >>> bitwise_and = ops.BitwiseAnd()
2708        >>> output = bitwise_and(x, y)
2709        >>> print(output)
2710        [ 0  0  1 -1  1  0  1]
2711    """
2712
2713
2714class BitwiseOr(_BitwiseBinaryOp):
2715    r"""
2716    Returns bitwise `or` of two tensors element-wise.
2717
2718    Refer to :func:`mindspore.ops.bitwise_or` for more details.
2719
2720    Inputs:
2721        - **x** (Tensor) - The first input tensor with shape
2722          :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
2723        - **y** (Tensor) - The second input tensor with same type as the `x`.
2724
2725    Outputs:
2726        Tensor, has the same type as the `x`.
2727
2728    Supported Platforms:
2729        ``Ascend`` ``GPU`` ``CPU``
2730
2731    Examples:
2732        >>> import mindspore
2733        >>> import numpy as np
2734        >>> from mindspore import Tensor, ops
2735        >>> x = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
2736        >>> y = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
2737        >>> bitwise_or = ops.BitwiseOr()
2738        >>> output = bitwise_or(x, y)
2739        >>> print(output)
2740        [ 0  1  1 -1 -1  3  3]
2741    """
2742
2743
2744class BitwiseXor(_BitwiseBinaryOp):
2745    r"""
2746    Returns bitwise `xor` of two tensors element-wise.
2747
2748    Refer to :func:`mindspore.ops.bitwise_xor` for more details.
2749
2750    Inputs:
2751        - **x** (Tensor) - The first input tensor with shape
2752          :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
2753        - **y** (Tensor) - The second input tensor with same type as the `x`.
2754
2755    Outputs:
2756        Tensor, has the same type as the `x`.
2757
2758    Supported Platforms:
2759        ``Ascend`` ``GPU`` ``CPU``
2760
2761    Examples:
2762        >>> import mindspore
2763        >>> import numpy as np
2764        >>> from mindspore import Tensor, ops
2765        >>> x = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
2766        >>> y = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
2767        >>> bitwise_xor = ops.BitwiseXor()
2768        >>> output = bitwise_xor(x, y)
2769        >>> print(output)
2770        [ 0  1  0  0 -2  3  2]
2771    """
2772
2773
2774class BesselI0(Primitive):
2775    r"""
2776    Computes modified Bessel function of the first kind, order 0 element-wise.
2777
2778    The formula is defined as:
2779
2780    .. math::
2781        \begin{array}{ll} \\
2782            I_{0}(x)=J_{0}(\mathrm{i} x)=\sum_{m=0}^{\infty}
2783            \frac{x^{2 m}}{2^{2 m} (m !)^{2}}
2784        \end{array}
2785
2786    where :math:`J_{0}` is Bessel function of the first kind, order 0.
2787
2788    .. warning::
2789        This is an experimental API that is subject to change or deletion.
2790
2791    Refer to :func:`mindspore.ops.bessel_i0` for more details.
2792
2793    Inputs:
2794        - **x** (Tensor) - The input tensor.
2795          Data type must be float16, float32 or float64.
2796
2797    Outputs:
2798        Tensor, has the same shape as `x`.
2799
2800    Supported Platforms:
2801        ``GPU`` ``CPU``
2802
2803    Examples:
2804        >>> import mindspore
2805        >>> import numpy as np
2806        >>> from mindspore import Tensor, ops
2807        >>> bessel_i0 = ops.BesselI0()
2808        >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
2809        >>> output = bessel_i0(x)
2810        >>> print(output)
2811        [1.0144521 1.1797839 1.0241698 1.0020262]
2812    """
2813
2814    @prim_attr_register
2815    def __init__(self):
2816        self.init_prim_io_names(inputs=['x'], outputs='y')
2817
2818
2819class BesselI1(Primitive):
2820    r"""
2821    Computes modified Bessel function of the first kind, order 1 element-wise.
2822
2823    The formula is defined as:
2824
2825    .. math::
2826        \begin{array}{ll} \\
2827            I_{1}(x)=\mathrm{i}^{-1} J_{1}(\mathrm{i} x)=\sum_{m=0}^
2828            {\infty} \frac{x^{2m+1}}{2^{2m+1} m ! (m+1) !}
2829        \end{array}
2830
2831    where :math:`J_{1}` is Bessel function of the first kind, order 1.
2832
2833    .. warning::
2834        This is an experimental API that is subject to change or deletion.
2835
2836    Refer to :func:`mindspore.ops.bessel_i1` for more details.
2837
2838    Inputs:
2839        - **x** (Tensor) - The input tensor.
2840          Data type must be float16, float32 or float64.
2841
2842    Outputs:
2843        Tensor, has the same shape as `x`.
2844
2845    Supported Platforms:
2846        ``GPU`` ``CPU``
2847
2848    Examples:
2849        >>> import mindspore
2850        >>> import numpy as np
2851        >>> from mindspore import Tensor, ops
2852        >>> bessel_i1 = ops.BesselI1()
2853        >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
2854        >>> output = bessel_i1(x)
2855        >>> print(output)
2856        [0.1208661  0.45177728 0.1568694  0.04504559]
2857    """
2858
2859    @prim_attr_register
2860    def __init__(self):
2861        """Initialize BesselI1"""
2862
2863
2864class BesselI0e(Primitive):
2865    r"""
2866    Computes exponential scaled modified Bessel function of the first kind, order 0 element-wise.
2867
2868    The formula is defined as:
2869
2870    .. math::
2871        \begin{array}{ll} \\
2872            \text I_{0}e(x)=e^{(-|x|)} * I_{0}(x)=e^{(-|x|)} * \sum_{m=0}^
2873            {\infty} \frac{x^{2 m}}{2^{2 m} (m !)^{2}}
2874        \end{array}
2875
2876    where :math:`I_{0}` is modified Bessel function of the first kind, order 0.
2877
2878    Inputs:
2879        - **x** (Tensor) - The input tensor.
2880          Data type must be float16, float32 or float64.
2881
2882    Outputs:
2883        Tensor, has the same shape as `x`.
2884
2885    Raises:
2886        TypeError: If `x` is not a Tensor.
2887        TypeError: If dtype of `x` is not float16, float32 or float64.
2888
2889    Supported Platforms:
2890        ``Ascend`` ``GPU`` ``CPU``
2891
2892    Examples:
2893        >>> import mindspore
2894        >>> import numpy as np
2895        >>> from mindspore import Tensor, ops
2896        >>> bessel_i0e = ops.BesselI0e()
2897        >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
2898        >>> output = bessel_i0e(x)
2899        >>> print(output)
2900        [0.7979961  0.5144438  0.75117415  0.9157829 ]
2901    """
2902
2903    @prim_attr_register
2904    def __init__(self):
2905        """Initialize BesselI0e"""
2906        self.init_prim_io_names(inputs=['x'], outputs='output')
2907
2908
2909class BesselI1e(Primitive):
2910    r"""
2911    Computes exponential scaled modified Bessel function of the first kind, order 1 element-wise.
2912
2913    The formula is defined as:
2914
2915    .. math::
2916        \begin{array}{ll} \\
2917            \text I_{1}e(x)=e^{(-|x|)} * I_{1}(x)=e^{(-|x|)} * \sum_{m=0}^
2918            {\infty} \frac{x^{2m+1}}{2^{2m+1} m ! (m+1) !}
2919        \end{array}
2920
2921    where :math:`I_{1}` is  modified Bessel function of the first kind, order 1.
2922
2923    Inputs:
2924        - **x** (Tensor) - The input tensor.
2925          Data type must be float16 or float32, float64.
2926
2927    Outputs:
2928        Tensor, has the same shape as `x`.
2929
2930    Raises:
2931        TypeError: If `x` is not a Tensor.
2932        TypeError: If dtype of `x` is not float16, float32 or float64.
2933
2934    Supported Platforms:
2935        ``Ascend`` ``GPU`` ``CPU``
2936
2937    Examples:
2938        >>> import mindspore
2939        >>> import numpy as np
2940        >>> from mindspore import Tensor, ops
2941        >>> bessel_i1e = ops.BesselI1e()
2942        >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
2943        >>> output = bessel_i1e(x)
2944        >>> print(output)
2945        [0.09507662 0.19699717 0.11505538 0.04116856]
2946    """
2947
2948    @prim_attr_register
2949    def __init__(self):
2950        """Initialize BesselI1e"""
2951        self.init_prim_io_names(inputs=['x'], outputs='output')
2952
2953
2954class BesselK0(Primitive):
2955    r"""
2956    Computes modified Bessel function of the second kind, order 0 element-wise.
2957
2958    The formula is defined as:
2959
2960    .. math::
2961        \begin{array}{ll} \\
2962            K_{0}(x)= \lim_{\nu \to 0} \left(\frac{\pi}{2}\right) \frac
2963            {I_{-\nu}(x)-I_{\nu}(x)}{\sin (\nu \pi)} = \int_{0}^{\infty} e^{-x \cosh t} d t
2964        \end{array}
2965
2966    where :math:`I_{0}` is modified Bessel function of the first kind, order 0.
2967
2968    .. warning::
2969        This is an experimental API that is subject to change or deletion.
2970
2971    Inputs:
2972        - **x** (Tensor) - The input tensor.
2973          Data type must be float16, float32, float64.
2974
2975    Outputs:
2976        Tensor, has the same shape as `x`.
2977
2978    Raises:
2979        TypeError: If `x` is not a Tensor of float16, float32, float64.
2980
2981    Supported Platforms:
2982        ``GPU`` ``CPU``
2983
2984    Examples:
2985        >>> import mindspore
2986        >>> import numpy as np
2987        >>> from mindspore import Tensor, ops
2988        >>> bessel_k0 = ops.BesselK0()
2989        >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
2990        >>> output = bessel_k0(x)
2991        >>> print(output)
2992        [1.579826  0.5402144 1.3424659 2.5310173]
2993    """
2994
2995    @prim_attr_register
2996    def __init__(self):
2997        """Initialize BesselK0"""
2998
2999
3000class BesselK1(Primitive):
3001    r"""
3002    Computes modified Bessel function of the second kind, order 1 element-wise.
3003
3004    The formula is defined as:
3005
3006    .. math::
3007        \begin{array}{ll} \\
3008            K_{1}(x)=\lim_{\nu \to 1} \left(\frac{\pi}{2}\right) \frac{I_{-\nu}(x)-
3009            I_{\nu}(x)}{\sin (\nu \pi)} = \int_{0}^{\infty} e^{-x \cosh t} \cosh (t) d t
3010        \end{array}
3011
3012    where :math:`I_{1}` is modified Bessel function of the first kind, order 1.
3013
3014    .. warning::
3015        This is an experimental API that is subject to change or deletion.
3016
3017    Inputs:
3018        - **x** (Tensor) - The input tensor.
3019          Data type must be float16, float32, float64.
3020
3021    Outputs:
3022        Tensor, has the same shape as `x`.
3023
3024    Raises:
3025        TypeError: If `x` is not a Tensor of float16, float32, float64.
3026
3027    Supported Platforms:
3028        ``GPU`` ``CPU``
3029
3030    Examples:
3031        >>> import mindspore
3032        >>> import numpy as np
3033        >>> from mindspore import Tensor, ops
3034        >>> bessel_k1 = ops.BesselK1()
3035        >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
3036        >>> output = bessel_k1(x)
3037        >>> print(output)
3038        [3.9190812  0.8143549  2.9440577 10.974864]
3039    """
3040
3041    @prim_attr_register
3042    def __init__(self):
3043        """Initialize BesselK1"""
3044
3045
3046class BesselK0e(Primitive):
3047    r"""
3048    Computes exponential scaled modified Bessel function of the second kind, order 0 element-wise.
3049
3050    The formula is defined as:
3051
3052    .. math::
3053        \begin{array}{ll} \\
3054            K_{0}e(x)= e^{(-|x|)} * K_{0}(x) = e^{(-|x|)} * \int_{0}^
3055            {\infty} e^{-x \cosh t} d t
3056        \end{array}
3057
3058    where :math:`K_{0}` is modified Bessel function of the second kind, order 0.
3059
3060    .. warning::
3061        This is an experimental API that is subject to change or deletion.
3062
3063    Inputs:
3064        - **x** (Tensor) - The input tensor.
3065          Data type must be float16, float32, float64.
3066
3067    Outputs:
3068        Tensor, has the same shape as `x`.
3069
3070    Raises:
3071        TypeError: If `x` is not a Tensor of float16, float32, float64.
3072
3073    Supported Platforms:
3074        ``GPU`` ``CPU``
3075
3076    Examples:
3077        >>> import mindspore
3078        >>> import numpy as np
3079        >>> from mindspore import Tensor, ops
3080        >>> bessel_k0e = ops.BesselK0e()
3081        >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
3082        >>> output = bessel_k0e(x)
3083        >>> print(output)
3084        [2.0083523 1.2388839 1.8303517 2.769374 ]
3085    """
3086
3087    @prim_attr_register
3088    def __init__(self):
3089        """Initialize BesselK0e"""
3090
3091
3092class BesselK1e(Primitive):
3093    r"""
3094    Computes exponential scaled modified Bessel function of the second kind, order 1 element-wise.
3095
3096    The formula is defined as:
3097
3098    .. math::
3099        \begin{array}{ll} \\
3100            K_{1}e(x)= e^{(-|x|)} * K_{1}(x) = e^{(-|x|)} * \int_{0}
3101            ^{\infty} e^{-x \cosh t} \cosh (t) d t
3102        \end{array}
3103
3104    where :math:`K_{1}` is modified Bessel function of the second kind, order 1.
3105
3106    .. warning::
3107        This is an experimental API that is subject to change or deletion.
3108
3109    Inputs:
3110        - **x** (Tensor) - The input tensor.
3111          Data type must be float16, float32, float64.
3112
3113    Outputs:
3114        Tensor, has the same shape as `x`.
3115
3116    Raises:
3117        TypeError: If `x` is not a Tensor of float16, float32, float64.
3118
3119    Supported Platforms:
3120        ``GPU`` ``CPU``
3121
3122    Examples:
3123        >>> import mindspore
3124        >>> import numpy as np
3125        >>> from mindspore import Tensor, ops
3126        >>> bessel_k1e = ops.BesselK1e()
3127        >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
3128        >>> output = bessel_k1e(x)
3129        >>> print(output)
3130        [ 4.9821286  1.8675754  4.0140023 12.008413 ]
3131    """
3132
3133    @prim_attr_register
3134    def __init__(self):
3135        """Initialize BesselK1e"""
3136
3137
3138class BesselJ0(Primitive):
3139    r"""
3140    Computes Bessel function of the first kind, order 0 element-wise.
3141
3142    The formula is defined as:
3143
3144    .. math::
3145        \begin{array}{ll} \\
3146            J_{0}(x) = \frac{1}{\pi} \int_{0}^{\pi} \cos (x \sin \theta) d \theta
3147            =\sum_{m=0}^{\infty} \frac{(-1)^{m} x^{2 m}}{2^{2 m} (m !)^2}
3148        \end{array}
3149
3150    .. warning::
3151        This is an experimental API that is subject to change or deletion.
3152
3153    Inputs:
3154        - **x** (Tensor) - The input tensor.
3155          Data type must be float16, float32 or float64.
3156
3157    Outputs:
3158        Tensor, has the same shape as `x`.
3159
3160    Raises:
3161        TypeError: If `x` is not a Tensor of float16, float32 or float64.
3162
3163    Supported Platforms:
3164        ``GPU`` ``CPU``
3165
3166    Examples:
3167        >>> import mindspore
3168        >>> import numpy as np
3169        >>> from mindspore import Tensor, ops
3170        >>> bessel_j0 = ops.BesselJ0()
3171        >>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
3172        >>> output = bessel_j0(x)
3173        >>> print(output)
3174        [0.93846981  0.76519769  0.22389078  -0.39714981]
3175    """
3176
3177    @prim_attr_register
3178    def __init__(self):
3179        """Initialize BesselJ0"""
3180        self.init_prim_io_names(inputs=['x'], outputs=['output'])
3181
3182
3183class BesselJ1(Primitive):
3184    r"""
3185    Computes Bessel function of the first kind, order 1 element-wise.
3186
3187    The formula is defined as:
3188
3189    .. math::
3190        \begin{array}{ll} \\
3191            J_{1}(x) = \frac{1}{\pi} \int_{0}^{\pi} \cos (x \sin \theta- \theta) d \theta
3192            =\sum_{m=0}^{\infty} \frac{(-1)^{m} x^{2 m+1}}{2^{2 m+1} m !(m+1) !}
3193        \end{array}
3194
3195    .. warning::
3196        This is an experimental API that is subject to change or deletion.
3197
3198    Inputs:
3199        - **x** (Tensor) - The input tensor.
3200          Data type must be float16, float32 or float64.
3201
3202    Outputs:
3203        Tensor, has the same shape as `x`.
3204
3205    Raises:
3206        TypeError: If `x` is not a Tensor of float16, float32 or float64.
3207
3208    Supported Platforms:
3209        ``GPU`` ``CPU``
3210
3211    Examples:
3212        >>> import mindspore
3213        >>> import numpy as np
3214        >>> from mindspore import Tensor, ops
3215        >>> bessel_j1 = ops.BesselJ1()
3216        >>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
3217        >>> output = bessel_j1(x)
3218        >>> print(output)
3219        [0.24226846  0.44005059  0.57672481  -0.06604333]
3220    """
3221
3222    @prim_attr_register
3223    def __init__(self):
3224        """Initialize BesselJ1"""
3225        self.init_prim_io_names(inputs=['x'], outputs=['output'])
3226
3227
3228class BesselY0(Primitive):
3229    r"""
3230    Computes Bessel function of the second kind, order 0 element-wise.
3231
3232    The formula is defined as:
3233
3234    .. math::
3235        \begin{array}{ll} \\
3236            Y_{0}(x)=\lim_{n \to 0} \frac{J_{n}(x) \cos n \pi-J_{-n}(x)}{\sin n \pi}
3237        \end{array}
3238
3239    where :math:`J_{0}` is Bessel function of the first kind, order 0.
3240
3241    .. warning::
3242        This is an experimental API that is subject to change or deletion.
3243
3244    Inputs:
3245        - **x** (Tensor) - The input tensor.
3246          Data type must be float16, float32 or float64.
3247
3248    Outputs:
3249        Tensor, has the same shape as `x`.
3250
3251    Raises:
3252        TypeError: If `x` is not a Tensor of float16, float32, float64.
3253
3254    Supported Platforms:
3255        ``GPU`` ``CPU``
3256
3257    Examples:
3258        >>> import mindspore
3259        >>> import numpy as np
3260        >>> from mindspore import Tensor, ops
3261        >>> bessel_y0 = ops.BesselY0()
3262        >>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
3263        >>> output = bessel_y0(x)
3264        >>> print(output)
3265        [-0.44451873  0.08825696  0.51037567  -0.01694074]
3266    """
3267
3268    @prim_attr_register
3269    def __init__(self):
3270        """Initialize BesselY0"""
3271        self.init_prim_io_names(inputs=['x'], outputs=['output'])
3272
3273
3274class BesselY1(Primitive):
3275    r"""
3276    Computes Bessel function of the second kind, order 1 element-wise.
3277
3278    The formula is defined as:
3279
3280    .. math::
3281        \begin{array}{ll} \\
3282            Y_{1}(x)=\lim_{n \to 1} \frac{J_{n}(x) \cos n \pi-J_{-n}(x)}{\sin n \pi}
3283        \end{array}
3284
3285    where :math:`J_{1}` is Bessel function of the first kind, order 1.
3286
3287    .. warning::
3288        This is an experimental API that is subject to change or deletion.
3289
3290    Inputs:
3291        - **x** (Tensor) - The input tensor.
3292          Data type must be float16, float32 or float64.
3293
3294    Outputs:
3295        Tensor, has the same shape as `x`.
3296
3297    Raises:
3298        TypeError: If `x` is not a Tensor of float16, float32, float64.
3299
3300    Supported Platforms:
3301        ``GPU`` ``CPU``
3302
3303    Examples:
3304        >>> import mindspore
3305        >>> import numpy as np
3306        >>> from mindspore import Tensor, ops
3307        >>> bessel_y1 = ops.BesselY1()
3308        >>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
3309        >>> output = bessel_y1(x)
3310        >>> print(output)
3311        [-1.47147239  -0.78121282  -0.10703243  0.39792571]
3312    """
3313
3314    @prim_attr_register
3315    def __init__(self):
3316        """Initialize BesselY1"""
3317        self.init_prim_io_names(inputs=['x'], outputs=['output'])
3318
3319
3320class Inv(Primitive):
3321    r"""
3322    Computes Reciprocal of input tensor element-wise.
3323
3324    Refer to :func:`mindspore.ops.inv` for more details.
3325
3326    Inputs:
3327        - **x** (Tensor) - Input tensor, it must be one of the following types: float16, float32 or int32.
3328
3329    Outputs:
3330        Tensor, has the same shape and data type as `x`.
3331
3332    Supported Platforms:
3333        ``Ascend`` ``GPU`` ``CPU``
3334
3335    Examples:
3336        >>> import mindspore
3337        >>> import numpy as np
3338        >>> from mindspore import Tensor, ops
3339        >>> inv = ops.Inv()
3340        >>> x = Tensor(np.array([0.25, 0.4, 0.31, 0.52]), mindspore.float32)
3341        >>> output = inv(x)
3342        >>> print(output)
3343        [4.        2.5       3.2258065 1.923077 ]
3344    """
3345
3346    @prim_attr_register
3347    def __init__(self):
3348        pass
3349
3350
3351class Invert(Primitive):
3352    r"""
3353    Flips all bits of input tensor element-wise.
3354
3355    Refer to :func:`mindspore.ops.invert` for more details.
3356
3357    Supported Platforms:
3358        ``Ascend`` ``GPU`` ``CPU``
3359
3360    Examples:
3361        >>> import mindspore
3362        >>> import numpy as np
3363        >>> from mindspore import Tensor, ops
3364        >>> invert = ops.Invert()
3365        >>> x = Tensor(np.array([25, 4, 13, 9]), mindspore.int16)
3366        >>> output = invert(x)
3367        >>> print(output)
3368        [-26 -5 -14 -10]
3369    """
3370
3371    @prim_attr_register
3372    def __init__(self):
3373        """Initialize Invert"""
3374        self.init_prim_io_names(inputs=['x'], outputs=['y'])
3375
3376
3377class Eps(Primitive):
3378    """
3379    Create a Tensor with the same data type and shape as input, and the element value is the minimum value that the
3380    corresponding data type can express.
3381
3382    Refer to :func:`mindspore.ops.eps` for more detail.
3383
3384    Inputs:
3385        - **x** (Tensor) - Tensor of any dimension used to obtain the minimum value that its data type can express.
3386          The data type must be float16, float32 or float64.
3387
3388    Outputs:
3389        Tensor, has the same type and shape as `x`, but filled with `x` dtype minimum val.
3390
3391    Raises:
3392        TypeError: If `x` is not a Tensor.
3393        TypeError: If data type of `x` is neither float16, float32, nor float64.
3394
3395    Supported Platforms:
3396        ``Ascend`` ``GPU`` ``CPU``
3397
3398    Examples:
3399        >>> import mindspore
3400        >>> from mindspore import Tensor, ops
3401        >>> x = Tensor([4, 1, 2, 3], mindspore.float32)
3402        >>> output = ops.Eps()(x)
3403        >>> print(output)
3404        [1.1920929e-07 1.1920929e-07 1.1920929e-07 1.1920929e-07]
3405    """
3406
3407    @prim_attr_register
3408    def __init__(self):
3409        """Initialize Eps"""
3410        self.init_prim_io_names(inputs=['x'], outputs=['y'])
3411
3412
3413class MatrixInverse(Primitive):
3414    """
3415    Returns the inverse of the input matrix. If the matrix is irreversible, an error may be reported or an unknown
3416    result may be returned.
3417
3418    Note:
3419        The parameter 'adjoint' is only supporting ``False``  right now, because complex number is not supported at
3420        present.
3421
3422    .. warning::
3423        This is an experimental API that is subject to change or deletion.
3424
3425    Args:
3426        adjoint (bool) : An optional bool. Default: ``False`` .
3427
3428    Inputs:
3429        - **x** (Tensor) - A matrix to be calculated. The matrix must be at least two dimensions, and the last two
3430          dimensions must be the same size.
3431
3432    Outputs:
3433        Tensor, has the same type and shape as input `x`.
3434
3435    Raises:
3436        TypeError: If `adjoint` is not a bool.
3437        TypeError: If `x` is not a Tensor.
3438        ValueError: If the last two dimensions of `x` is not same size.
3439        ValueError: If the dimension of `x` is less than 2.
3440
3441    Supported Platforms:
3442        ``Ascend`` ``GPU`` ``CPU``
3443
3444    Examples:
3445        >>> import mindspore
3446        >>> import numpy as np
3447        >>> from mindspore import Tensor, ops
3448        >>> x = Tensor(np.array([[[-0.710504  , -1.1207525],
3449        ...                       [-1.7651395 , -1.7576632]],
3450        ...                      [[ 0.52412605,  1.9070215],
3451        ...                       [ 1.3384849 ,  1.4274558]]]), mindspore.float32)
3452        >>> matrix_inverse = ops.MatrixInverse(adjoint=False)
3453        >>> output = matrix_inverse(x)
3454        >>> print(output)
3455        [[[ 2.4095478  -1.5364188 ]
3456          [-2.419797    0.9740167 ]]
3457         [[-0.79111797  1.0569006 ]
3458          [ 0.74180895 -0.2904787 ]]]
3459    """
3460
3461    @prim_attr_register
3462    def __init__(self, adjoint=False):
3463        """Initialize MatrixInverse"""
3464        self.init_prim_io_names(inputs=['x'], outputs=['y'])
3465        validator.check_value_type('adjoint', adjoint, [bool], self.name)
3466
3467
3468class MatrixPower(Primitive):
3469    """
3470    Calculates the n-th power of a batch of square matrices.
3471    When n equals 0, it returns a group of identity matrices. If n is negative,
3472    it computes the inverse of each matrix (if possible) raised to the power of abs(n).
3473
3474    .. warning::
3475        This is an experimental API that is subject to change or deletion.
3476
3477    Args:
3478        n (int) : The exponent, a required int.
3479
3480    Inputs:
3481        - **x** (Tensor) - A 3-D Tensor. The shape is :math:`(b, m, m)`, represents b m-D square matrices.
3482
3483    Outputs:
3484        - **y** (Tensor) - A 3-D Tensor. Data type and shape are the same as `x`'s.
3485
3486    Raises:
3487        TypeError: If the data type of `n` is not int.
3488        TypeError: If x is not a Tensor.
3489        ValueError: If `x` is not a 3-D tensor.
3490        ValueError: If shape[1] and shape[2] of `x` are not the same.
3491        ValueError: If n is negative but got input x has singular matrices.
3492        ValueError: If `n` < 0 and input is int type.
3493
3494    Supported Platforms:
3495        ``Ascend`` ``CPU``
3496
3497    Examples:
3498        >>> import mindspore
3499        >>> from mindspore import Tensor, ops
3500        >>> x = Tensor([[[0, 1], [-1, 0]], [[1, 0], [0, -1]]], dtype=ms.float32)
3501        >>> matrix_power = ops.MatrixPower(n=2)
3502        >>> y = matrix_power(x)
3503        >>> print(y)
3504        [[[-1.  0.]
3505          [-0. -1.]]
3506         [[ 1.  0.]
3507          [ 0.  1.]]]
3508    """
3509
3510    @prim_attr_register
3511    def __init__(self, n):
3512        super().__init__(name="MatrixPower")
3513        self.n = validator.check_value_type("n", n, [int], self.name)
3514
3515
3516class MatrixLogarithm(Primitive):
3517    """
3518    Return the matrix logarithm of one or more square matrices.
3519
3520    Inputs:
3521        - **x** (Tensor) - x is a tensor. The shape of tensor is :math:`[..., M, M]`.
3522          Must be one of the following types:complex64, complex128. And shape must be 2D-7D.
3523
3524    Outputs:
3525        - **y** (Tensor) - has the same shape and type as input.
3526
3527    Raises:
3528        TypeError: If `x` is not a Tensor.
3529        TypeError: If dtype of `x` is not one of: complex64, complex128.
3530        ValueError: If the dimension of `x` is less to 2.
3531        ValueError: If the size of last two dimensions are not equal.
3532
3533    Supported Platforms:
3534        ``Ascend`` ``CPU``
3535
3536    Examples:
3537        >>> x = Tensor([[1 + 2j, 2 + 1j], [4 + 1j, 5 + 2j]])
3538        >>> matrix_logarithm = ops.MatrixLogarithm()
3539        >>> y = matrix_logarithm(x)
3540        >>> print(y)
3541        [[0.69155775+1.71618359j 0.64665196-0.34928196j]
3542         [1.02426074-0.88736831j 1.44677531+0.6400109j ]]
3543    """
3544
3545    @prim_attr_register
3546    def __init__(self):
3547        """Initialize MatrixLogarithm"""
3548        self.init_prim_io_names(inputs=['x'], outputs=['y'])
3549
3550
3551class IndexAdd(Primitive):
3552    """
3553    Adds tensor `y` to specified axis and indices of tensor `x`. The axis should be in [-len(x.dim),  len(x.dim) - 1],
3554    and indices should be in [0, the size of `x` - 1] at the axis dimension.
3555
3556    Args:
3557        axis (int): The dimension along which to index.
3558        use_lock (bool, optional): Whether to enable a lock to protect the updating process of variable tensors.
3559            If ``True`` , when updating the value of `x`, this process will be protected by a lock by using atomic
3560            operation.
3561            If ``False`` , the result may be unpredictable. Default: ``True`` .
3562        check_index_bound (bool, optional): If ``True`` , check index boundary. If ``False`` ,
3563            don't check index boundary. Default: ``True`` .
3564
3565    Inputs:
3566        - **x** (Parameter) - The input Parameter to add to.
3567        - **indices** (Tensor) - Add the value of `x` and `y` along the dimension of the `axis` according to the
3568          specified index value, with data type int32.
3569          The `indices` must be 1D with the same size as the size of `y` in the `axis` dimension. The values
3570          of `indices` should be in [0, b), where the b is the size of `x` in the `axis` dimension.
3571        - **y** (Tensor) - The input tensor with the value to add. Must have same data type as `x`.
3572          The shape must be the same as `x` except the `axis` th dimension.
3573
3574    Outputs:
3575        Tensor, has the same shape and dtype as `x`.
3576
3577    Raises:
3578        TypeError: If `x` is not a Parameter.
3579        TypeError: If neither `indices` nor `y` is a Tensor.
3580        ValueError: If axis is out of `x` rank's range.
3581        ValueError: If `x` rank is not the same as `y` rank.
3582        ValueError: If shape of `indices` is not 1D or size of `indices` is not equal to dimension of y[axis].
3583        ValueError: If `y`'s shape is not the same as `x` except the `axis` th dimension.
3584
3585    Supported Platforms:
3586        ``Ascend`` ``GPU`` ``CPU``
3587
3588    Examples:
3589        >>> import mindspore
3590        >>> import numpy as np
3591        >>> from mindspore import Tensor, nn, ops, Parameter
3592        >>> class Net(nn.Cell):
3593        ...     def __init__(self):
3594        ...         super(Net, self).__init__()
3595        ...         self.index_add = ops.IndexAdd(axis=1)
3596        ...         self.x = Parameter(Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32),
3597        ...                 name="name_x")
3598        ...         self.indices = Tensor(np.array([0, 2]), mindspore.int32)
3599        ...
3600        ...     def construct(self, y):
3601        ...         return self.index_add(self.x, self.indices, y)
3602        ...
3603        >>> y = Tensor(np.array([[0.5, 1.0], [1.0, 1.5], [2.0, 2.5]]), mindspore.float32)
3604        >>> net = Net()
3605        >>> output = net(y)
3606        >>> print(output)
3607        [[ 1.5  2.   4. ]
3608         [ 5.   5.   7.5]
3609         [ 9.   8.  11.5]]
3610    """
3611    __mindspore_signature__ = (
3612        sig.make_sig('input_x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
3613        sig.make_sig('indices', dtype=sig.sig_dtype.T1),
3614        sig.make_sig('input_y', dtype=sig.sig_dtype.T)
3615    )
3616
3617    @prim_attr_register
3618    def __init__(self, axis, use_lock=True, check_index_bound=True):
3619        """Initialize InplaceAdd"""
3620        self.init_prim_io_names(inputs=['input_x', 'indices', 'input_y'], outputs=['output'])
3621        self.axis = axis
3622        validator.check_value_type('axis', axis, [int], self.name)
3623        self.add_prim_attr('side_effect_mem', True)
3624
3625
3626class ComplexAbs(Primitive):
3627    r"""
3628    Returns a Tensor that contains the magnitudes of the input.
3629
3630    The complex numbers in input must be of the form :math:`a + bj`,
3631    where :math:`a` is the real part and :math:`b` is the imaginary part.
3632
3633    .. math::
3634
3635        y = \sqrt{a^2+b^2}
3636
3637    .. warning::
3638        This is an experimental API that is subject to change or deletion.
3639
3640    Inputs:
3641        - **x** (Tensor) - A Tensor, types: complex64, complex128.
3642
3643    Outputs:
3644        Tensor, has the same shape as x. If the type of x is complex64, the type of output is float32.
3645        If the type of x is complex128, the type of output is float64.
3646
3647    Raises:
3648       TypeError: If the input is not a Tensor.
3649       TypeError: If the input type is not complex64 or complex128.
3650
3651    Supported Platforms:
3652        ``Ascend`` ``GPU`` ``CPU``
3653
3654    Examples:
3655        >>> import mindspore
3656        >>> import numpy as np
3657        >>> from mindspore import Tensor, ops
3658        >>> x = Tensor(np.asarray(np.complex(3+4j)), mindspore.complex64)
3659        >>> complex_abs = ops.ComplexAbs()
3660        >>> output = complex_abs(x)
3661        >>> print(output)
3662        5.0
3663    """
3664
3665    @prim_attr_register
3666    def __init__(self):
3667        """Initialize ComplexAbs"""
3668        self.init_prim_io_names(inputs=['x'], outputs=['y'])
3669
3670
3671class Imag(Primitive):
3672    """
3673    Returns a new tensor containing imaginary value of the input.
3674    If input is real, it is returned zeros.
3675
3676    Inputs:
3677        - **input** (Tensor) - The input tensor.
3678
3679    Outputs:
3680        Tensor, the shape is the same as the input.
3681
3682    Raises:
3683       TypeError: If the input is not a Tensor.
3684
3685    Supported Platforms:
3686        ``Ascend`` ``GPU`` ``CPU``
3687
3688    Examples:
3689        >>> import mindspore
3690        >>> import numpy as np
3691        >>> from mindspore import Tensor, ops
3692        >>> x = Tensor(np.asarray(np.complex(1.3+0.4j)), mindspore.complex64)
3693        >>> imag = ops.Imag()
3694        >>> output = imag(x)
3695        >>> print(output)
3696        0.4
3697    """
3698
3699    @prim_attr_register
3700    def __init__(self):
3701        """Initialize Imag"""
3702        self.init_prim_io_names(inputs=['input'], outputs=['output'])
3703
3704
3705class Trunc(Primitive):
3706    """
3707    Returns a new tensor with the truncated integer values of the elements of input.
3708
3709    Refer to :func:`mindspore.ops.trunc` for more details.
3710
3711    Inputs:
3712        - **input_x** (Tensor) - Input tensor of any dimension.
3713
3714    Outputs:
3715        Tensor, the same shape and data type as `input_x`.
3716
3717    Supported Platforms:
3718        ``Ascend`` ``GPU`` ``CPU``
3719
3720    Examples:
3721        >>> import mindspore
3722        >>> import numpy as np
3723        >>> from mindspore import Tensor, ops
3724        >>> x = Tensor(np.array([3.4742, 0.5466, -0.8008, -3.9079]), mindspore.float32)
3725        >>> output = ops.Trunc()(x)
3726        >>> print(output)
3727        [ 3.  0. -0. -3.]
3728    """
3729
3730    @prim_attr_register
3731    def __init__(self):
3732        """Initialize Trunc"""
3733        self.init_prim_io_names(inputs=['input'], outputs=['output'])
3734
3735
3736class TridiagonalMatMul(Primitive):
3737    """
3738    Return the result of a multiplication of two matrices, where the left one is a Tridiagonal Matrix.
3739
3740    Inputs:
3741        - **superdiag** (Tensor) - Superdiagonals of Tridiagonal Matrices to the left of multiplication.
3742          Data types must be: float16, float32, double, complex64, complex128.
3743          The shape is :math:`(..., 1, M)`.
3744          Last element is ignored.
3745        - **maindiag** (Tensor) - Maindiagonals of Tridiagonal Matrices to the left of multiplication.
3746          Data types must be: float16, float32, double, complex64, complex128.
3747          The shape is :math:`(..., 1, M)`.
3748        - **subdiag** (Tensor) - Subdiagonals of Tridiagonal Matrices to the left of multiplication.
3749          Data types must be: float16, float32, double, complex64, complex128.
3750          The shape is :math:`(..., 1, M)`.
3751          First element is ignored.
3752        - **rhs** (Tensor) - MxN Matrices to the right of multiplication.
3753          Data types must be: float16, float32, double, complex64, complex128.
3754          The shape is :math:`(..., 1, M)`.
3755
3756    Outputs:
3757        Tensor, with the same shape and data type as the `rhs`.
3758
3759    Raises:
3760        TypeError: If dtypes of `superdiag`, `maindiag`, `subdiag` and `rhs`
3761                   are not float16, float32, double, complex64, complex128.
3762        ValueError: If the col of input `superdiag`, the col of input `maindiag`,
3763                    the col of input `subdiag` and the row of input `rhs` are not equal.
3764        ValueError: If the row of input `superdiag`, the row of input `maindiag` and
3765                    the row of input `subdiag` are not 1.
3766        ValueError: If the rank of input `superdiag`, the rank of input `maindiag`,
3767                    the rank of input `subdiag` and rank row of input `rhs`
3768                    are not equal to or greater than 2.
3769        ValueError: If the shape of input `superdiag`, the shape of input `maindiag` and
3770                    the shape of input `subdiag` are not same.
3771        ValueError: If the shape of input `superdiag` ignoring the last two elements,
3772                    the shape of input `maindiag` ignoring the last two elements,
3773                    the shape of input `subdiag` ignoring the last two elements and
3774                    the shape of input `rhs` ignoring the last two elements
3775                    are not same.
3776
3777    Supported Platforms:
3778        ``CPU``
3779
3780    Examples:
3781        >>> tridiagonalmatmul = ops.TridiagonalMatMul()
3782        >>> superdiag = Tensor(np.array([[1, 2, 3]]).astype(np.float32))
3783        >>> maindiag = Tensor(np.array([[1, 2, 3]]).astype(np.float32))
3784        >>> subdiag = Tensor(np.array([[1, 2, 3]]).astype(np.float32))
3785        >>> rhs = Tensor(np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]).astype(np.float32))
3786        >>> output = tridiagonalmatmul(superdiag,maindiag,subdiag,rhs)
3787        >>> print(output)
3788        [[ 2.  2.  2. ]
3789         [ 6.  6.  6.]
3790         [ 6.  6.  6.]]
3791    """
3792
3793    @prim_attr_register
3794    def __init__(self):
3795        """Initialize TridiagonalMatMul"""
3796        self.init_prim_io_names(
3797            inputs=['superdiag', 'maindiag', 'subdiag', 'rhs'],
3798            outputs=['y'])
3799
3800
3801class Igamma(Primitive):
3802    r"""
3803    Calculates lower regularized incomplete Gamma function.
3804
3805    .. warning::
3806        This is an experimental API that is subject to change or deletion.
3807
3808    Refer to :func:`mindspore.ops.igamma` for more details.
3809
3810    Inputs:
3811        - **a** (Tensor) - The input tensor.
3812        - **x** (Tensor) - The input tensor. It should have the same dtype with `a`.
3813
3814    Outputs:
3815        Tensor, has the same dtype as `a` and `x`.
3816
3817    Supported Platforms:
3818        ``Ascend`` ``GPU`` ``CPU``
3819
3820    Examples:
3821        >>> import numpy as np
3822        >>> import mindspore
3823        >>> from mindspore import Tensor, ops
3824        >>> a = Tensor(np.array([2.0, 4.0, 6.0, 8.0]).astype(np.float32))
3825        >>> x = Tensor(np.array([2.0, 3.0, 4.0, 5.0]).astype(np.float32))
3826        >>> igamma = ops.Igamma()
3827        >>> output = igamma(a, x)
3828        >>> print (output)
3829        [0.593994  0.35276785  0.21486944  0.13337152]
3830        >>> a = Tensor(2.1, mindspore.float32)
3831        >>> x = Tensor(2.1, mindspore.float32)
3832        >>> igamma = ops.Igamma()
3833        >>> output = igamma(a, x)
3834        >>> print (output)
3835        0.5917439
3836    """
3837
3838    @prim_attr_register
3839    def __init__(self):
3840        """Initialize Igamma"""
3841        self.init_prim_io_names(inputs=['a', 'x'], outputs=['z'])
3842
3843
3844class Igammac(Primitive):
3845    r"""
3846    Compute the upper regularized incomplete Gamma function Q(a, x).
3847
3848    Refer to :func:`mindspore.ops.igammac` for more details.
3849
3850    Inputs:
3851        - **a** (Tensor) - The input tensor.
3852        - **x** (Tensor) - The input tensor. It should have the same dtype with `a`.
3853
3854    Outputs:
3855        Tensor, has the same dtype as `a` and `x`.
3856
3857    Supported Platforms:
3858        ``Ascend`` ``GPU`` ``CPU``
3859
3860    Examples:
3861        >>> import mindspore
3862        >>> import numpy as np
3863        >>> from mindspore import Tensor, ops
3864        >>> a = Tensor(np.array([2.0, 4.0, 6.0, 8.0]).astype(np.float32))
3865        >>> x = Tensor(np.array([2.0, 3.0, 4.0, 5.0]).astype(np.float32))
3866        >>> igammac = ops.Igammac()
3867        >>> output = igammac(a, x)
3868        >>> print (output)
3869        [0.40600586 0.6472318  0.7851304  0.8666283 ]
3870        >>> a = Tensor(2.1, mindspore.float32)
3871        >>> x = Tensor(2.1, mindspore.float32)
3872        >>> igammac = ops.Igammac()
3873        >>> output = igammac(a, x)
3874        >>> print (output)
3875        0.40825662
3876    """
3877
3878    @prim_attr_register
3879    def __init__(self):
3880        """Initialize Igammac"""
3881        self.init_prim_io_names(inputs=['a', 'x'], outputs=['z'])
3882
3883
3884class MatrixSolve(Primitive):
3885    """
3886    Solves systems of linear equations.
3887
3888    Args:
3889        adjoint (bool, optional): Indicates whether the adjoint of the
3890            matrix is used during the computation. Default: ``False`` ,  use its transpose instead.
3891
3892    Inputs:
3893        - **matrix** (Tensor) - A tensor of shape :math:`(..., M, M)`,
3894          is a matrix of coefficients for a system of linear equations.
3895        - **rhs** (Tensor) - A tensor of shape :math:`(..., M, K)`,
3896          is a matrix of the resulting values of a system of linear equations.
3897          `rhs` must have the same type as `matrix`.
3898
3899    Outputs:
3900        Tensor, a matrix composed of solutions to a system of linear equations,
3901        which has the same type and shape as `rhs`.
3902
3903    Raises:
3904        TypeError: If `adjoint` is not the type of bool.
3905        TypeError: If the type of `matrix` is not one of the following dtype:
3906                   mstype.float16, mstype.float32, mstype.float64, mstype.complex64,
3907                   mstype.complex128.
3908        TypeError: If the type of `matrix` is not the same as that of `rhs`.
3909        ValueError: If the rank of `matrix` less than 2.
3910        ValueError: If the dimension of `matrix` is not the same as `rhs` .
3911        ValueError: If the inner-most 2 dimension of `matrix` is not the same.
3912        ValueError: If the inner-most 2 dimension of `rhs` does not match `matrix` .
3913
3914    Supported Platforms:
3915        ``Ascend`` ``CPU``
3916
3917    Examples:
3918        >>> import mindspore
3919        >>> import numpy as np
3920        >>> from mindspore import Tensor, ops
3921        >>> matrix = Tensor(np.array([[1.0  , 4.0],
3922        ...                       [2.0 , 7.0]]), mindspore.float32)
3923        >>> rhs = Tensor(np.array([[1.0]  , [3.0]]), mindspore.float32)
3924        >>> matrix_solve = ops.MatrixSolve(adjoint = False)
3925        >>> output = matrix_solve(matrix, rhs)
3926        >>> print(output)
3927        [[5.0]
3928         [-1.0]]
3929    """
3930
3931    @prim_attr_register
3932    def __init__(self, adjoint=False):
3933        super().__init__(name="MatrixSolve")
3934        self.adjoint = validator.check_value_type("adjoint", adjoint, [bool], self.name)
3935
3936
3937class MatrixSolveLs(Primitive):
3938    r"""
3939    Solves one or more linear least-squares problems.
3940
3941    If `fast` is `True`,then the solution is computed by solving the normal equations using Cholesky decomposition.
3942    If `fast` is `False` an algorithm based on the numerically robust complete orthogonal decomposition is used. This
3943    path is typically 6-7 times slower than the fast path. If `fast` is `False` then `l2_regularizer` is ignored.
3944
3945    Args:
3946        fast (bool): An optional bool. Default: ``True`` .
3947
3948    Inputs:
3949        - **matrix** (Tensor) -  A Tensor. Must be one of the following data types: float64, float32, complex64,
3950          complex128. Shape is :math:`(*, M, N)`.
3951        - **rhs** (Tensor) -  A Tensor. Must have the same data type as matrix. Shape is :math:`(*, M, K)`.
3952          `matrix` and `rhs` should have the same dimensions except the last one.
3953        - **l2_regularizer** (Tensor) - A Tensor of type float64. Scalar tensor.
3954
3955    Outputs:
3956        Tensor of shape :math:`(*, N, K)` with the same data type as `matrix`.
3957
3958    Raises:
3959        TypeError: If `matrix`, `rhs` or `l2_regularizer` is not tensor.
3960        TypeError: If either of `matrix` and `rhs` is not float32, float64, complex64 or complex128.
3961        TypeError: If `l2_regularizer` is not float64.
3962        TypeError: If `fast` is not bool.
3963        ValueError: If dimensions of `matrix` or `rhs` is less than 2.
3964        ValueError: If shape of `matrix` dose not match the shape of `rhs`.
3965
3966    Supported Platforms:
3967        ``CPU``
3968
3969    Examples:
3970        >>> matrix_solve_ls = ops.MatrixSolveLs(fast=True)
3971        >>> matrix = Tensor([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]], mstype.float32)
3972        >>> rhs = Tensor(np.array([[4], [2], [4], [2]]), mstype.float32)
3973        >>> l2 = Tensor(0.0, mstype.float64)
3974        >>> output = matrix_solve_ls(matrix, rhs, l2)
3975        >>> print(output)
3976        [[ 1.3333334]
3977        [-0.6666667]
3978        [ 2.6666665]
3979        [-1.3333333]]
3980    """
3981
3982    @prim_attr_register
3983    def __init__(self, fast=True):
3984        """Initialize MatrixSolveLs"""
3985        validator.check_value_type('fast', fast, [bool], self.name)
3986
3987
3988class Lu(Primitive):
3989    """
3990    Computes the LU decomposition of one or more square matrices.
3991
3992    Args:
3993        output_idx_type (:class:`mindspore.dtype`): An optional data type of `mindspore.dtype.int32`.
3994            Default: ``mindspore.dtype.int32`` .
3995
3996    Inputs:
3997        - **input** (Tensor) - A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form
3998          matrices of size `[M, M]`, with data type float32, float64, complex64, complex128.
3999
4000    Outputs:
4001        - **lu** (Tensor) - A tensor of shape `[..., M, M]` whose strictly lower triangular part denotes the lower
4002          triangular factor `L` with unit diagonal. Upper triangular part denotes the upper triangular factor `U`.
4003        - **p** (Tensor) - Permutation of the rows encoded as a list of indices in `0..M-1`, shape is `[..., M]`.
4004
4005    Raises:
4006        TypeError: If the dtype of `input` is not one of the following dtype:
4007            float32, float64, complex64, complex128.
4008        TypeError: If `output_idx_type` is neither int32 nor int64.
4009        ValueError: If `input` rank is less than 2.
4010        ValueError: If input[-1] is not equal to input[-2].
4011
4012    Supported Platforms:
4013        ``GPU``
4014
4015    Examples:
4016        >>> input = Tensor(np.array([[2.5,3.1,3.5], [4.7,1.9,0.2], [1.1,3.6,2.0]]), mindspore.float32)
4017        >>> lu, p = ops.Lu(output_idx_type=mindspore.int32)(input)
4018        >>> print(lu)
4019        [[4.7        1.9        0.2       ]
4020         [0.23404257 3.155319   1.9531915 ]
4021         [0.5319149  0.6621713  2.1002696 ]]
4022        >>> print(p)
4023        [1 2 0]
4024    """
4025
4026    @prim_attr_register
4027    def __init__(self, output_idx_type):
4028        super().__init__(name="Lu")
4029        self.init_prim_io_names(inputs=['input'], outputs=['lu', 'p'])
4030        validator.check_type_name("output_idx_type", output_idx_type, [mstype.int32, mstype.int64], self.name)
4031        self.add_prim_attr('output_idx_type', output_idx_type)
4032
4033
4034class LuSolve(Primitive):
4035    r"""
4036    Computes the solution y to the system of linear equations :math:`Ay = b` ,
4037    given LU decomposition A and column vector b.
4038
4039    LU decomposition of a matrix can be generated from :func:`mindspore.scipy.linalg.lu` .
4040
4041    Note:
4042        The batch dimensions of lu_pivots must match the batch dimensions of lu_data, the size of the dimension and the
4043        number of each dimension must be the same. For example, lu_data is :math:`(3, 3, 2, 2)` lu_pivots is
4044        :math:`(3, 3, 2)`,
4045        lu_data's batch dimensions is :math:`(3, 3)`, lu_pivots's batch dimensions is :math:`(3, 3)`.
4046
4047        The batch dimensions of lu_data must match the batch dimensions of x, the batch dimensions may have
4048        different sizes, from right to left, the corresponding dimensions must be equal. For example, lu_data
4049        is :math:`(3, 3, 2, 2)` x is :math:`(2, 3, 3, 2, 1)`, lu_data's batch dimensions is
4050        :math:`(3, 3)`, x's batch dimensions is :math:`(2, 3, 3)`.
4051
4052    Inputs:
4053        - **x** (Tensor) - Column vector `b` in the above equation. It has shape :math:`(*, m, k)`,
4054          where :math:`*` is batch dimensions, with data type float32, float16.
4055        - **lu_data** (Tensor) - LU decomposition. It has shape :math:`(*, m, m)`, where * is batch
4056          dimensions, that can be decomposed into an upper triangular matrix U and a lower triangular
4057          matrix L, with data type float32, float16.
4058        - **lu_pivots** (Tensor) - Permutation matrix P of LU decomposition. It has
4059          shape :math:`(*, m)`, where :math:`*` is batch dimensions, that can be converted
4060          to a permutation matrix P, with data type int32.
4061
4062    Outputs:
4063        Tensor, the same data type as the x and lu_data.
4064
4065    Raises:
4066        TypeError: If dtype of `x` or `lu_data` is not one of: float32, float16.
4067        TypeError: If dtype of `lu_pivots` is not: int32.
4068        TypeError: If `x`, `lu_data` or `lu_pivots` is not Tensor.
4069        TypeError: If dtype of `x` is not same as dtype of `lu_data`.
4070        ValueError: If the batch dimensions of lu_pivots does not match the batch dimensions of lu_data.
4071        ValueError: If `x` dimension less than 2, `lu_data` dimension less than 2 or `lu_pivots` dimension less than 1.
4072
4073    Supported Platforms:
4074        ``Ascend`` ``GPU`` ``CPU``
4075
4076    Examples:
4077        >>> x = Tensor(np.array([[1], [3], [3]]), mindspore.float32)
4078        >>> lu_data = Tensor(np.array([[2, 1, 1], [0.5, 1, 1.5], [0.5, 0, 2.5]]), mindspore.float32)
4079        >>> lu_pivots = Tensor(np.array([2, 2, 3]), mindspore.int32)
4080        >>> net = ops.LuSolve()
4081        >>> y = net(x, lu_data, lu_pivots)
4082        >>> print(y)
4083        [[ 1.9000002]
4084         [-1.4000001]
4085         [ 0.6      ]]
4086    """
4087
4088    @prim_attr_register
4089    def __init__(self):
4090        pass
4091
4092
4093class LuUnpack(Primitive):
4094    """
4095    Converts `LU_data` and `LU_pivots` back into P, L and U matrices, where
4096    P is a permutation matrix, L is a lower triangular matrix, and U is an
4097    upper triangular matrix. Typically, `LU_data` and `LU_pivots` are generated
4098    from the LU decomposition of a matrix.
4099
4100    .. warning::
4101        This is an experimental API that is subject to change or deletion.
4102
4103    Refer to :func:`mindspore.ops.lu_unpack` for more details.
4104
4105    Args:
4106        unpack_data (bool, optional): A flag indicating if the LU_data should be unpacked.
4107            If ``False`` , then the returned L and U are None. Default: ``True`` .
4108        unpack_pivots (bool, optional): A flag indicating if the LU_pivots should be unpacked
4109            into a permutation matrix P. If ``False`` , then the returned P is None. Default: ``True`` .
4110
4111    Inputs:
4112        - **LU_data** (Tensor) - The packed LU factorization data. The shape of a tensor is :math:`(*, M, N)`,
4113          where :math:`*` is batch dimensions, with data type int8, uint8, int16, int32, int64, float16,
4114          float32, float64. The dims of LU_data must be equal to or greater than 2.
4115        - **LU_pivots** (Tensor) - The packed LU factorization pivots. The shape of a tensor is :math:`(*, min(M, N))`,
4116          where :math:`*` is batch dimensions, with data type int8, uint8, int16, int32, int64.
4117
4118    Outputs:
4119        - **pivots** (Tensor) - The permutation matrix of LU factorization. The shape is :math:`(*, M, M)`,
4120          the dtype is same as `LU_data`.
4121        - **L** (Tensor) - The L matrix of LU factorization. The dtype is the same as `LU_data`.
4122        - **U** (Tensor) - The U matrix of LU factorization. The dtype is the same as `LU_data`.
4123
4124    Supported Platforms:
4125        ``GPU`` ``CPU``
4126
4127    Examples:
4128        >>> import numpy as np
4129        >>> from mindspore import Tensor, ops
4130        >>> from mindspore import dtype as mstype
4131        >>> LU_data = Tensor(np.array([[[-0.3806, -0.4872,  0.5536],
4132        ...                             [-0.1287,  0.6508, -0.2396],
4133        ...                             [ 0.2583,  0.5239,  0.6902]],
4134        ...                             [[ 0.6706, -1.1782,  0.4574],
4135        ...                             [-0.6401, -0.4779,  0.6701],
4136        ...                             [ 0.1015, -0.5363,  0.6165]]]), mstype.float32)
4137        >>> LU_pivots = Tensor(np.array([[1, 3, 3],
4138        ...                              [2, 3, 3]]), mstype.int32)
4139        >>> lu_unpack = ops.LuUnpack()
4140        >>> pivots, L, U = lu_unpack(LU_data, LU_pivots)
4141        >>> print(pivots)
4142        [[[1. 0. 0.]
4143          [0. 0. 1.]
4144          [0. 1. 0.]]
4145        <BLANKLINE>
4146         [[0. 0. 1.]
4147          [1. 0. 0.]
4148          [0. 1. 0.]]]
4149        >>> print(L)
4150        [[[ 1.      0.      0.    ]
4151          [-0.1287  1.      0.    ]
4152          [ 0.2583  0.5239  1.    ]]
4153        <BLANKLINE>
4154         [[ 1.      0.      0.    ]
4155          [-0.6401  1.      0.    ]
4156          [ 0.1015 -0.5363  1.    ]]]
4157        >>> print(U)
4158        [[[-0.3806 -0.4872  0.5536]
4159          [ 0.      0.6508 -0.2396]
4160          [ 0.      0.      0.6902]]
4161        <BLANKLINE>
4162         [[ 0.6706 -1.1782  0.4574]
4163          [ 0.     -0.4779  0.6701]
4164          [ 0.      0.      0.6165]]]
4165    """
4166
4167    @prim_attr_register
4168    def __init__(self, unpack_data=True, unpack_pivots=True):
4169        """Initialize LuUnpack"""
4170        validator.check_value_type("unpack_data", unpack_data, [bool], self.name)
4171        validator.check_value_type("unpack_pivots", unpack_pivots, [bool], self.name)
4172
4173
4174class Lgamma(Primitive):
4175    r"""
4176    Computes the natural logarithm of the absolute value of the gamma function on input.
4177
4178    Refer to :func:`mindspore.ops.lgamma` for more details.
4179
4180    Inputs:
4181        - **x** (Tensor) - The input tensor. The dtype can be float16, float32 or float64.
4182
4183    Outputs:
4184        Tensor, has the same dtype as `x`.
4185
4186    Supported Platforms:
4187        ``GPU`` ``CPU``
4188
4189    Examples:
4190        >>> x = Tensor(np.array([0.5, 3.2, 8.5]), mindspore.float32)
4191        >>> lgamma = ops.Lgamma()
4192        >>> output = lgamma(x)
4193        >>> print(output)
4194        [0.5723649 0.8854049 9.549267 ]
4195        >>> x = Tensor(2.1, mindspore.float32)
4196        >>> output = lgamma(x)
4197        >>> print(output)
4198        0.045437694
4199    """
4200
4201    @prim_attr_register
4202    def __init__(self):
4203        """Initialize Lgamma"""
4204        self.init_prim_io_names(inputs=['x'], outputs=['y'])
4205
4206
4207class Digamma(Primitive):
4208    r"""
4209    Computes the grad of the lgamma function on input.
4210
4211    .. math::
4212        P(x) = grad(ln(gamma(x)))
4213
4214    .. warning::
4215        This is an experimental API that is subject to change or deletion.
4216
4217    Inputs:
4218        - **x** (Tensor) - The input tensor. With type of float16 or float32 or float64.
4219
4220    Outputs:
4221        Tensor, has the same dtype as `x`.
4222
4223    Raises:
4224        TypeError: If x is not a Tensor.
4225        TypeError: If dtype of input x is not float16 or float32 or float64.
4226
4227    Supported Platforms:
4228        ``GPU`` ``CPU``
4229
4230    Examples:
4231        >>> import numpy as np
4232        >>> from mindspore import Tensor, ops
4233        >>> x = Tensor(np.array([1.5, 0.5, 9]).astype(np.float16))
4234        >>> digamma = ops.Digamma()
4235        >>> output = digamma(x)
4236        >>> print(output)
4237        [ 0.0365 -1.964   2.14  ]
4238    """
4239
4240    @prim_attr_register
4241    def __init__(self):
4242        """Initialize Digamma"""
4243        self.init_prim_io_names(inputs=['input'], outputs=['output'])
4244
4245
4246class Polygamma(Primitive):
4247    r"""
4248    Computes the :math:`a`th derivative of the polygamma function on `x`.
4249
4250    .. warning::
4251        This is an experimental API that is subject to change or deletion.
4252
4253    Refer to :func:`mindspore.ops.polygamma` for more details.
4254
4255    Inputs:
4256        - **a** (Tensor) - The order of the polygamma function, it has shape :math:`()`,
4257          supported types: int32, int64.
4258        - **x** (Tensor) - The tensor to compute the :math:`a`-th derivative of the polygamma function with,
4259          supported types: float16, float32, float64.
4260
4261    Outputs:
4262        Tensor, has the same dtype as `x`.
4263
4264    Supported Platforms:
4265        ``GPU`` ``CPU``
4266
4267    Examples:
4268        >>> import mindspore
4269        >>> import numpy as np
4270        >>> from mindspore import Tensor, ops
4271        >>> x = Tensor(np.array([1.0, -0.5]), mindspore.float32)
4272        >>> a = Tensor(np.array(1), mindspore.int64)
4273        >>> polygamma = ops.Polygamma()
4274        >>> output = polygamma(a, x)
4275        >>> print(output)
4276        [1.644934 8.934802]
4277        >>> a = Tensor(np.array(2), mindspore.int64)
4278        >>> output = polygamma(a, x)
4279        >>> print(output)
4280        [-2.404114  -0.8287967]
4281        >>> a = Tensor(np.array(3), mindspore.int64)
4282        >>> output = polygamma(a, x)
4283        >>> print(output)
4284        [  6.4939404 193.40909  ]
4285        >>> a = Tensor(np.array(4), mindspore.int64)
4286        >>> output = polygamma(a, x)
4287        >>> print(output)
4288        [-24.886265   -3.4742498]
4289    """
4290
4291    @prim_attr_register
4292    def __init__(self):
4293        """Initialize Polygamma"""
4294        self.init_prim_io_names(inputs=['a', 'x'], outputs=['y'])
4295
4296
4297class Cross(Primitive):
4298    """
4299    Returns the cross product of vectors in dimension `dim` of x1 and x2.
4300
4301    .. warning::
4302        This is an experimental API that is subject to change or deletion.
4303
4304    Refer to :func:`mindspore.ops.cross` for more details.
4305
4306    Args:
4307        dim (int): Spefcified dim along which to cumpute cross product with. Default: ``-65530`` .
4308
4309    Inputs:
4310        - **x1** (Tensor) - Input Tensor.
4311        - **x2** (Tensor) - Another input Tensor, must have the same shape and
4312          the same type as `x1`, and the size of their `dim` dimension should be 3.
4313
4314    Outputs:
4315        Tensor, has the same shape and type as inputs.
4316
4317    Supported Platforms:
4318        ``Ascend`` ``CPU``
4319
4320    Examples:
4321        >>> import mindspore
4322        >>> import numpy as np
4323        >>> from mindspore import Tensor
4324        >>> from mindspore import dtype as mstype
4325        >>> from mindspore import ops
4326        >>> cross = ops.Cross(dim = 0)
4327        >>> x1 = Tensor([1, 2, 3], mstype.int8)
4328        >>> x2 = Tensor([1, 2, 3], mstype.int8)
4329        >>> output = cross(x1, x2)
4330        >>> print(output)
4331        [0 0 0]
4332    """
4333
4334    @prim_attr_register
4335    def __init__(self, dim=-65530):
4336        validator.check_value_type('dim', dim, [int], self.name)
4337        self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['y'])
4338
4339
4340class RaggedRange(Primitive):
4341    """
4342    Returns a `RaggedTensor` containing the specified sequences of numbers.
4343
4344    Args:
4345        Tsplits (mindspore.dtype): An mindspore.dtype from: mindspore.int32, mindspore.int64.
4346
4347    Inputs:
4348        - **starts** (Tensor) - The starts of each range, whose type is int32, int64, float32 or float64,
4349          and shape is 0D or 1D.
4350        - **limits** (Tensor) - The limits of each range, whose type and shape should be same as input `starts`.
4351        - **deltas** (Tensor) - The deltas of each range, whose type and shape should be same as input `starts`,
4352          and each element in the tensor should not be equal to 0.
4353
4354    Outputs:
4355        - **rt_nested_splits** (Tensor) - The nested splits of the return `RaggedTensor`,
4356          and type of the tensor is `Tsplits`,
4357          shape of the tensor is equal to shape of input `starts` plus 1.
4358        - **rt_dense_values**  (Tensor) - The dense values of the return `RaggedTensor`,
4359          and type of the tensor should be same as input `starts`.
4360          Let size of input `starts`, input `limits` and input `deltas` are i,
4361
4362          - if type of the input `starts`, input `limits` and input `deltas`
4363            are int32 or int64, shape of the output `rt_dense_values` is equal to
4364            :math:`sum(abs(limits[i] - starts[i]) + abs(deltas[i] - 1) / abs(deltas[i]))`.
4365          - if type of the input `starts`, input `limits` and input `deltas`
4366            are float32 or float64, shape of the output `rt_dense_values` is equal to
4367            :math:`sum(ceil(abs((limits[i] - starts[i]) / deltas[i])))`.
4368
4369    Raises:
4370        TypeError: If any input is not Tensor.
4371        TypeError: If the type of `starts` is not one of the following dtype: int32, int64, float32, float64.
4372        TypeError: If the type of `starts`, `limits` and `deltas` are not same.
4373        TypeError: If the type of `Tsplits` is not one of the following dtype: mstype.int32, mstype.int64.
4374        ValueError: If the inputs `starts`, `limits`, and `deltas` are not 0D or 1D.
4375        ValueError: If the input `deltas` is equal to 0.
4376        ValueError: If the shape of `starts`, `limits` and `deltas` are not same.
4377
4378    Supported Platforms:
4379        ``Ascend`` ``GPU`` ``CPU``
4380
4381    Examples:
4382        >>> raggedrange = ops.RaggedRange(Tsplits=mstype.int64)
4383        >>> starts = Tensor(np.array([2, 5, 8]).astype(np.int32))
4384        >>> limits = Tensor(np.array([3, 5, 12]).astype(np.int32))
4385        >>> deltas = Tensor(np.array([1, 1, 1]).astype(np.int32))
4386        >>> (rt_nested_splits, rt_dense_values) = raggedrange(starts, limits, deltas)
4387        >>> print(rt_nested_splits)
4388        [0 1 1 5]
4389        >>> print(rt_dense_values)
4390        [ 2  8  9 10 11]
4391    """
4392
4393    @prim_attr_register
4394    def __init__(self, Tsplits):
4395        """Initialize RaggedRange."""
4396        self.add_prim_attr("max_length", 1000000)
4397        self.init_prim_io_names(inputs=['starts', 'limits', 'deltas'], outputs=['rt_nested_splits', 'rt_dense_values'])
4398        validator.check_value_type("Tsplits", Tsplits, [mstype.Type], self.name)
4399        valid_values = (mstype.int64, mstype.int32)
4400        validator.check_type_name("Tsplits", Tsplits, valid_values, self.name)
4401
4402
4403class Median(Primitive):
4404    """
4405    Computes the median and its corresponding indices of input tensor in the `axis` dimension.
4406    If `global_median` is True, computes the  median of all elements of tensor.
4407
4408    .. warning::
4409        - `indices` does not necessarily contain the first occurrence of each median value found in the `input`,
4410          unless it is unique. The specific implementation of this API is device-specific.
4411          The results may be different on CPU and GPU.
4412        - When attr `global_median` is ``True`` , the value of the second output tensor `indices` is meaningless.
4413
4414    Args:
4415        global_median (bool, optional): Whether the output tensor is the median of all
4416            input tensor elements or not. Default: ``False`` .
4417        axis (int, optional): The specified dimension to compute median. Default: ``0`` .
4418        keep_dims (bool, optional): Whether the output tensor need to retain `axis` dimension or not.
4419            Default: ``False`` .
4420        ignore_nan (bool, optional): Whether to ignore the NaN values in input Tensor. Default: ``False`` .
4421
4422    Inputs:
4423        - **x** (Tensor) - A Tensor to calculate median with.
4424
4425    Outputs:
4426        - **y** (Tensor) - Median, has the same dtype as the `x`.
4427
4428          - If `global_median` is ``True`` , the `y` has only one element.
4429          - If `keep_dims` is ``True`` , the `y` has the same shape as the `x` except the size
4430            of `y` in dimension `axis` is 1.
4431          - Otherwise, the `y` lacks `axis` dimension than input.
4432
4433        - **indices** (Tensor) - Indices, Has the same shape as the `y`, with dtype int64.
4434
4435    Raises:
4436        TypeError: If input `x` is not a Tensor.
4437        TypeError: If `global_median` , `keep_dims` or `ignore_nan` is assigned a nonboolean value.
4438        TypeError: If `axis` is not int.
4439        ValueError: If `axis` is not in range of [-x.dim, x.dim-1].
4440
4441    Supported Platforms:
4442        ``GPU`` ``CPU``
4443
4444    Examples:
4445        >>> # case 1 : common median compute
4446        >>> from mindspore import Tensor, ops
4447        >>> import numpy as np
4448        >>> x = Tensor(np.array([[5, 1, 2],[3, 5, 7], [1, 6, 4]]).astype(np.int64))
4449        >>> median = ops.Median(global_median=False, axis=0, keep_dims=False)
4450        >>> y = median(x)
4451        >>> print(y)
4452        (Tensor(shape=[3], dtype=Int64, value= [3, 5, 4]), Tensor(shape=[3], dtype=Int64, value= [1, 1, 2]))
4453        >>> # case 2 : global median compute
4454        >>> from mindspore import Tensor, ops
4455        >>> import numpy as np
4456        >>> x = Tensor(np.array([[1, 7, 6],[5, 1, 3],[9, 17, 1]]).astype(np.int32))
4457        >>> median = ops.Median(global_median=True)
4458        >>> y = median(x)
4459        >>> print(y)
4460        (Tensor(shape=[], dtype=Int32, value= 5), Tensor(shape=[], dtype=Int64, value= 0))
4461    """
4462
4463    @prim_attr_register
4464    def __init__(self, global_median=False, axis=0, keep_dims=False, ignore_nan=False):
4465        self.add_prim_attr("cust_aicpu", self.name)
4466        validator.check_value_type("global_median", global_median, [bool], self.name)
4467        self.global_median = global_median
4468        if global_median is False:
4469            validator.check_value_type("axis", axis, [int], self.name)
4470            validator.check_value_type("keep_dims", keep_dims, [bool], self.name)
4471        self.init_prim_io_names(inputs=['x'], outputs=['y', 'indices'])
4472        validator.check_value_type("ignore_nan", ignore_nan, [bool], self.name)
4473
4474
4475class SparseSegmentMean(Primitive):
4476    """
4477    Computes the mean along sparse segments of a Tensor.
4478
4479    Refer to :func:`mindspore.ops.sparse_segment_mean` for more details.
4480
4481    Supported Platforms:
4482        ``GPU`` ``CPU``
4483
4484    Examples:
4485        >>> from mindspore import Tensor
4486        >>> from mindspore.ops.operations.math_ops import SparseSegmentMean
4487        >>> x = Tensor([[0, 1, 2], [1, 2, 3], [3, 6, 7]], dtype=mindspore.float32)
4488        >>> indices = Tensor([0, 1, 2], dtype=mindspore.int32)
4489        >>> segment_ids = Tensor([1,2,2], dtype=mindspore.int32)
4490        >>> sparse_segment_mean = SparseSegmentMean()
4491        >>> out = sparse_segment_mean(x, indices, segment_ids)
4492        >>> print(out)
4493        [[0. 0. 0.]
4494         [0. 1. 2.]
4495         [2. 4. 5.]]
4496    """
4497
4498    @prim_attr_register
4499    def __init__(self):
4500        """Initialize SparseSegmentMean"""
4501        self.init_prim_io_names(inputs=['x', 'indices', 'segment_ids'], outputs=['y'])
4502
4503
4504class Zeta(Primitive):
4505    r"""
4506    Compute the Hurwitz zeta function ζ(x,q) of input Tensor.
4507
4508    .. math::
4509        \zeta \left ( x,q \right )=  \textstyle \sum_{n=0} ^ {\infty} \left ( q+n\right )^{-x}
4510
4511    .. warning::
4512        This is an experimental API that is subject to change or deletion.
4513
4514    Inputs:
4515        - **x** (Tensor) - A Tensor, types: float32, float64.
4516        - **q** (Tensor) - A Tensor, must have the same shape and type as `x`.
4517
4518    Outputs:
4519        Tensor, has the same dtype and shape as the x.
4520
4521    Raises:
4522        TypeError: If either of `x` and `q` is not tensor.
4523        TypeError: If dtype of `x` is neither float32 nor float64.
4524        TypeError: If dtype of `q` is neither float32 nor float64.
4525        ValueError: If shape of `x` is not same as the `q`.
4526
4527    Supported Platforms:
4528        ``Ascend`` ``GPU`` ``CPU``
4529
4530    Examples:
4531        >>> import mindspore
4532        >>> import numpy as np
4533        >>> from mindspore import Tensor, ops
4534        >>> x = Tensor(np.array([10.]), mindspore.float32)
4535        >>> q = Tensor(np.array([1.]), mindspore.float32)
4536        >>> zeta = ops.Zeta()
4537        >>> z = zeta(x, q)
4538        >>> print(z)
4539        [1.0009946]
4540    """
4541
4542    @prim_attr_register
4543    def __init__(self):
4544        """Initialize Zeta"""
4545
4546
4547class Bernoulli(Primitive):
4548    """
4549    Randomly set the elements of output to 0 or 1 with the probability of P which follows the Bernoulli distribution.
4550
4551    .. warning::
4552        This is an experimental API that is subject to change or deletion.
4553
4554    Refer to :func:`mindspore.ops.bernoulli` for more details.
4555
4556    Args:
4557        seed (int, optional): The seed value for random generating. The value of `seed` must be -1 or a
4558            positive integer, and -1 means using the current timestamp. Default: ``-1`` .
4559        offset (int, optional): Used to change the starting position during the generation of
4560            random number sequence. Default: ``0`` .
4561
4562    Inputs:
4563        - **x** (Tensor) - Input Tensor.
4564        - **p** (Union[Tensor, float], optional) - Success probability, representing the probability of
4565          setting 1 for the corresponding position of the current Tensor. It has the same shape as `x`,
4566          the value of `p` must be in the range `[0, 1]`. Default: ``0.5`` .
4567
4568    Outputs:
4569        - **y** (Tensor) - with the same shape and type as `x` .
4570
4571    Supported Platforms:
4572        ``GPU`` ``CPU``
4573
4574    Examples:
4575        >>> import mindspore
4576        >>> from mindspore import Tensor, ops
4577        >>> input_x = Tensor([0.1, 0.2, 0.3], mindspore.float32)
4578        >>> bernoulli = ops.Bernoulli()
4579        >>> output = bernoulli(input_x, Tensor([1.0]))
4580        >>> print(output)
4581        [1. 1. 1.]
4582        >>> input_p = Tensor([0.0, 1.0, 1.0], mindspore.float32)
4583        >>> output = bernoulli(input_x, input_p)
4584        >>> print(output)
4585        [0. 1. 1.]
4586    """
4587
4588    @prim_attr_register
4589    def __init__(self, seed=-1, offset=0):
4590        """Initialize Bernoulli"""
4591        self.init_prim_io_names(inputs=['x', 'p'], outputs=['y'])
4592        validator.check_value_type("seed", seed, [int], self.name)
4593        if seed != -1 and seed < 0:
4594            raise ValueError(f"Seed must be -1 or a non-negative integer, but got {seed}.")
4595
4596
4597class TridiagonalSolve(Primitive):
4598    """
4599    Return the results of tridiagonal systems of equations.
4600
4601    Solve the tridiagonal systems of equations like:AX = B.
4602    and only the main diagonal, superdiagonal and subdiagonal has values.
4603    The type of diagonals and rhs should be the same.
4604    The penultimate dimension of diagonals must be 3.
4605
4606    Args:
4607        partial_pivoting (bool): decide if use the method of partial_pivoting. Default: ``True`` .
4608
4609    Inputs:
4610        - **diagonals** [Tensor] - The input tensor A of the equation AX = B, with data type of float32,
4611          float64, complex64, complex128.
4612          The penultimate dimension of diagonals must be 3.
4613          Diagonals and rhs must have the same rank and the same type.
4614        - **rhs** [Tensor] - The input tensor B of the equation AX = B, with data type of float32,
4615          float64, complex64, complex128.
4616          The penultimate dimension of rhs should be the same to the last dimension of diagonals.
4617          Diagonals and rhs must have the same rank and the same type.
4618
4619    Outputs:
4620        Tensor, has the same type and shape as the input "rhs".
4621
4622    Raises:
4623        TypeError: If `diagonals` and "rhs" are not a float32, float64, complex64 or complex128.
4624        TypeError: If the args `partial_pivoting` is not bool.
4625        ValueError: If the last second value of the "diagonals" is not "3".
4626        ValueError: If the last value of the "diagonals" is not equal to the last second value of the "rhs".
4627        ValueError: If diagonals and rhs have different rank of shape.
4628
4629    Supported Platforms:
4630        ``CPU``
4631    Examples:
4632        >>> diagonals = Tensor(np.array([[1.0,2.0,3.0],[2.0,3.0,4.0],[3.0,4.0,5.0]]).astype(np.float32))
4633        >>> rhs = Tensor(np.array([[1.0],[2.0],[3.0]]).astype(np.float32))
4634        >>> y = P.TridiagonalSolve()(diagonals,rhs)
4635        >>> print(output)
4636        [[ 0. ]
4637         [ 1. ]
4638         [-0.5]]
4639    """
4640
4641    @prim_attr_register
4642    def __init__(self, partial_pivoting=True):
4643        self.init_prim_io_names(inputs=['diagonals', 'rhs'], outputs=['y'])
4644        self.partial_pivoting = validator.check_value_type(
4645            "partial_pivoting", partial_pivoting, [bool], self.name)
4646
4647
4648class Renorm(Primitive):
4649    """
4650    Renormalizes the sub-tensors along dimension `dim`, and each sub-tensor's p-norm should not exceed the
4651    'maxnorm'. The values of current sub-tensor don't need change if the p-norm of the sub-tensor is less than
4652    `maxnorm`. Otherwise the sub-tensor needs to be modified to the original value of the corresponding position
4653    divided by the p-norm of the substensor and then multiplied by `maxnorm`.
4654
4655    Refer to :func:`mindspore.ops.renorm` for more details.
4656
4657    Args:
4658        p (int): Power of norm calculation.
4659        dim (int): The dimension that expected to get the slice-tensor.
4660        maxnorm (float32): Max norm.
4661
4662    Inputs:
4663        - **x** (Tensor) - A Tensor, types: float32 or float16.
4664
4665    Outputs:
4666        Tensor, has the same dtype and shape as input.
4667
4668    Supported Platforms:
4669        ``Ascend`` ``GPU`` ``CPU``
4670
4671    Examples:
4672        >>> import mindspore
4673        >>> import numpy as np
4674        >>> from mindspore import Tensor, ops
4675        >>> x = Tensor(np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]), mindspore.float32)
4676        >>> y = ops.Renorm(p=1, dim=0, maxnorm=5.)(x)
4677        >>> print(y)
4678        [[1.       1.        1.        ]
4679        [1.6666666 1.6666666 1.6666666 ]
4680        [1.6666667 1.6666667 1.6666667 ]]
4681    """
4682
4683    @prim_attr_register
4684    def __init__(self, p, dim, maxnorm):
4685        """Initialize Renorm."""
4686        if int(p) <= 0:
4687            raise ValueError(f"Renorm op don't support non-positive-norm, but got{p}")
4688        validator.check_value_type("p", p, [int], self.name)
4689        validator.check_value_type("dim", dim, [int], self.name)
4690        validator.check_value_type("maxnorm", maxnorm, [float], self.name)
4691        self.init_prim_io_names(inputs=['x'], outputs=['y'])
4692        self.add_prim_attr("p", float(p))
4693
4694
4695class STFT(Primitive):
4696    """
4697    Applies Short-time Fourier transform (STFT) on input signal.
4698
4699    STFT segments the signal into narrow time intervals and takes the Fourier transform
4700    of each segment to quantify the change of a nonstationary signal’s frequency
4701    and phase content over time.
4702
4703    Refer to :func:`mindspore.ops.stft` for more details.
4704
4705    Args:
4706        n_fft (int): The size of Fourier transform.
4707        hop_length (int): The distance between neighboring sliding window frames.
4708        win_length (int): the size of window frame and STFT filter.
4709        normalized (bool): controls whether to return the normalized STFT results.
4710        onesided (bool): controls whether to return half of results to
4711            avoid redundancy for real inputs.
4712        return_complex (bool): If ``True`` , return a complex tensor. If False, return
4713            a real tensor with an extra last dimension for the real and imaginary components.
4714
4715    Inputs:
4716        - **x** (Tensor) - Time sequence of stft, must be either a 1-D time tensor or a 2-D tensor.
4717        - **window** (Tensor) - the optional window function.
4718
4719    Outputs:
4720        Tensor, containing the result after STFT.
4721
4722    Supported Platforms:
4723        ``Ascend`` ``CPU``
4724
4725    Examples:
4726        >>> import mindspore as ms
4727        >>> from mindspore.ops import STFT
4728        >>> import numpy as np
4729        >>> x = ms.Tensor(np.random.rand(2,7192), ms.float32)
4730        >>> window = ms.Tensor(np.random.rand(64), ms.float32)
4731        >>> stft = STFT(64, 16, 64, False, True, True)
4732        >>> output = stft(x, window)
4733        >>> print(output.shape)
4734        (2, 33, 446)
4735    """
4736
4737    @prim_attr_register
4738    def __init__(self, n_fft, hop_length, win_length, normalized, onesided, return_complex):
4739        """Initialize STFT."""
4740        self.init_prim_io_names(inputs=['x', 'window'], outputs=['y'])
4741        validator.check_value_type('n_fft', n_fft, [int], self.name)
4742        validator.check_value_type('hop_length', hop_length, [int], self.name)
4743        validator.check_value_type('win_length', win_length, [int], self.name)
4744        validator.check_value_type('normalized', normalized, [bool], self.name)
4745        validator.check_value_type('onesided', onesided, [bool], self.name)
4746        validator.check_value_type('return_complex', return_complex, [bool], self.name)
4747
4748
4749class CholeskySolve(Primitive):
4750    """
4751    Computes the solution of a set of linear equations with a positive definite matrix,
4752    according to its Cholesky decomposition factor `u` , and outputs the result as `c`.
4753
4754    If `upper` is set to ``True`` , `u` is upper triangular and `c` is returned such that:
4755
4756    .. math::
4757        c = (u^{T}u)^{{-1}}b
4758
4759    If `upper` is set to `False`, `u` is lower triangular and `c` is returned such that:
4760
4761    .. math::
4762        c = (uu^{T})^{{-1}}b
4763
4764    Args:
4765        upper (bool, optional): A flag indicates whether to treat the Cholesky factor
4766            as an upper or a lower triangular matrix. Default: ``False`` .
4767
4768    Inputs:
4769        - **x1** (Tensor) - Tensor of shape :math:`(*, N, M)`, indicating 2D or 3D matrices,
4770          with float32 or float64 data type.
4771        - **x2** (Tensor) - Tensor of shape :math:`(*, N, N)`, indicating 2D or 3D square matrices composed of
4772          upper or lower triangular Cholesky factor, with float32 or float64 data type.
4773          x1 and x2 must have the same type.
4774
4775    Outputs:
4776        Tensor, has the same shape and data type as `x1`.
4777
4778    Raises:
4779        TypeError: If `upper` is not a bool.
4780        TypeError: If dtype of `x1` and `x2` is not one of: float64, float32.
4781        TypeError: If `x1` is not a Tensor.
4782        TypeError: If `x2` is not a Tensor.
4783        ValueError: If `x1` and `x2` have different batch size.
4784        ValueError: If `x1` and `x2` have different row numbers.
4785        ValueError: If `x1` is not 2D or 3D matrices.
4786        ValueError: If `x2` is not 2D or 3D square matrices.
4787
4788    Supported Platforms:
4789        ``Ascend`` ``GPU`` ``CPU``
4790
4791    Examples:
4792        >>> x1 = Tensor(np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]), mindspore.float32)
4793        >>> x2 = Tensor(np.array([[2, 0, 0], [4, 1, 0], [-1, 1, 2]]), mindspore.float32)
4794        >>> net = ops.CholeskySolve()
4795        >>> y = net(x1, x2)
4796        >>> print(y)
4797        [[ 5.8125 -2.625   0.625 ]
4798         [-2.625   1.25   -0.25  ]
4799         [ 0.625  -0.25    0.25  ]]
4800    """
4801
4802    @prim_attr_register
4803    def __init__(self, upper=False):
4804        """Initialize CholeskySolve"""
4805        self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['y'])
4806        validator.check_value_type('upper', upper, [bool], self.name)
4807
4808
4809class Polar(Primitive):
4810    r"""
4811    Converts polar coordinates to Cartesian coordinates.
4812
4813    Refer to :func:`mindspore.ops.polar` for more details.
4814
4815    Inputs:
4816        - **abs** (Tensor) - Radial distance. Tensor of any dimension,
4817          must be one of the following types: float32, float64.
4818
4819        - **angle** (Tensor) - Polar angle. It has the same shape and dtype as `abs`.
4820
4821    Outputs:
4822        Tensor, has the same shape and data type as `abs`.
4823
4824    Supported Platforms:
4825        ``GPU`` ``CPU``
4826
4827    Examples:
4828        >>> import mindspore
4829        >>> import numpy as np
4830        >>> from mindspore import Tensor, ops
4831        >>> polar = ops.Polar()
4832        >>> x1 = Tensor(np.array([1, 2]), mindspore.float64)
4833        >>> x2 = Tensor(np.array([3, 4]), mindspore.float64)
4834        >>> output = polar(x1, x2)
4835        >>> print(output)
4836        [-0.9899925 +0.14112001j -1.30728724-1.51360499j]
4837        >>> x1 = Tensor(2.1, mindspore.float32)
4838        >>> x2 = Tensor(2.1, mindspore.float32)
4839        >>> output = polar(x1, x2)
4840        >>> print(output)
4841        (-1.0601766+1.8127397j)
4842    """
4843
4844    @prim_attr_register
4845    def __init__(self):
4846        """Initialize Polar"""
4847        self.init_prim_io_names(inputs=['abs', 'angle'], outputs=['y'])
4848
4849
4850class TrilIndices(Primitive):
4851    r"""
4852    Calculates the indices of the lower triangular elements in a `row` * `col` matrix
4853    and returns them as a 2-by-N Tensor.
4854
4855    .. warning::
4856        This is an experimental API that is subject to change or deletion.
4857
4858    Refer to :func:`mindspore.ops.tril_indices` for more details.
4859
4860    Args:
4861        row (int): number of rows in the 2-D matrix.
4862        col (int): number of columns in the 2-D matrix.
4863        offset (int, optional): diagonal offset from the main diagonal. Default: ``0`` .
4864        dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor.
4865            An optional data type of ``mstype.int32`` and ``mstype.int64`` . Default: ``mstype.int32`` .
4866
4867    Outputs:
4868        - **y** (Tensor) - indices of the elements in lower triangular part of matrix. The type specified by `dtype`.
4869          The shape of output is :math:`(2, tril\_size)`, where :math:`tril\_size` is the number of elements in the
4870          lower triangular matrix.
4871
4872    Supported Platforms:
4873        ``Ascend`` ``GPU`` ``CPU``
4874
4875    Examples:
4876        >>> from mindspore import ops
4877        >>> from mindspore import dtype as mstype
4878        >>> net = ops.TrilIndices(4, 3, -1, mstype.int64)
4879        >>> output = net()
4880        >>> print(output)
4881        [[1 2 2 3 3 3]
4882         [0 0 1 0 1 2]]
4883        >>> print(output.dtype)
4884        Int64
4885    """
4886
4887    @prim_attr_register
4888    def __init__(self, row, col, offset=0, dtype=mstype.int32):
4889        """Initialize TrilIndices"""
4890        self.init_prim_io_names(inputs=[], outputs=['y'])
4891        validator.check_int(row, 0, validator.GE, "row", self.name)
4892        validator.check_int(col, 0, validator.GE, "col", self.name)
4893        validator.check_value_type("offset", offset, [int], self.name)
4894        valid_values = (mstype.int32, mstype.int64)
4895        validator.check_type_name("dtype", dtype, valid_values, self.name)
4896
4897
4898class MatrixTriangularSolve(Primitive):
4899    r"""
4900    Returns a new tensor with the solution of a linear equation system with an
4901    upper or lower triangular matrix.
4902
4903    Note:
4904        Only GPU platforms now support the broadcast mechanism.
4905
4906    Args:
4907        lower (bool, optional): If ``True`` , the innermost matrices in `matrix` is
4908            are lower triangular. Default: ``True`` .
4909        adjoint (bool, optional): Indicates whether the adjoint of the
4910            matrix is used during the computation. Default: ``False`` ,  use its transpose instead.
4911
4912    Inputs:
4913        - **matrix** (Tensor) - Tensor of shape :math:`(*, M, M)`,
4914          with float32, float64, complex64 and complex128 data type.
4915        - **rhs** (Tensor) - Tensor of shape :math:`(*, M, N)`,
4916          with float32, float64, complex64 and complex128 data type.
4917
4918    Outputs:
4919        Tensor, has the shape of :math:`(*, M, N)` and the same data type as `matrix`.
4920
4921    Raises:
4922        TypeError: If `matrix` or `rhs` is not a Tensor.
4923        TypeError: If `lower` or `adjoint` is not bool.
4924        ValueError: For GPU platform, if the batch sizes of `matrix` and `rhs` do not satisfy broadcasting rules.
4925            For other platforms, if the batch sizes of `matrix` and `rhs` are not equal.
4926        ValueError: If the inner-most 2 dimensions of `matrix` are not equal.
4927        ValueError: If the second-last dimensions of `matrix` and `rhs` are not equal.
4928
4929    Supported Platforms:
4930        ``Ascend`` ``GPU`` ``CPU``
4931
4932    Examples:
4933        >>> matrix_triangular_solve = ops.MatrixTriangularSolve(lower=True, adjoint=False)
4934        >>> matrix = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]])
4935        >>> rhs = np.array([[1, 0],[2, 2],[1, 5],[0, 3]])
4936        >>> output = matrix_triangular_solve(Tensor(matrix, mindspore.float32), Tensor(rhs, mindspore.float32))
4937        >>> print(output)
4938        [[ 0.33333334  0.        ]
4939         [ 1.3333333   2.        ]
4940         [ 0.6666666   5.        ]
4941         [-2.3333333  -4.        ]]
4942    """
4943
4944    @prim_attr_register
4945    def __init__(self, lower=True, adjoint=False):
4946        """Initialize MatrixTriangularSolve"""
4947        validator.check_value_type('adjoint', adjoint, [bool], self.name)
4948        validator.check_value_type('lower', lower, [bool], self.name)
4949
4950
4951class CompareAndBitpack(Primitive):
4952    """
4953    Compare values of `x` to `threshold` and pack resulting bits into a `uint8`.
4954
4955    Each comparison returns a boolean ``True`` (if x_value > threshold) or and ``False`` otherwise.
4956
4957    Given an `x` shaped :math:`(s_0, s_1, ..., s_n)`, the output is a `uint8`
4958    Tensor shaped :math:`(s_0, s_1, ..., s_n / 8)`.
4959
4960    Inputs:
4961        - **x** (Tensor) - Input tensor. Values to compare against `threshold` and bitpack. The data type must be
4962          bool, float16, float32, float64, int8, int16, int32, int64.
4963          Note: Currently, the innermost dimension of the tensor must be divisible by 8.
4964        - **threshold** (Tensor) - A 0D Tensor, whose data type is same as x.
4965
4966    Outputs:
4967        Tensor, has the uint8 type.
4968
4969    Raises:
4970        TypeError: If `x` or `threshold` is not a Tensor.
4971        TypeError: If the dtype of 'x' is not one of: bool, float16, float32, float64, int8, int16, int32, int64.
4972        TypeError: If `threshold`'s type is not as same 'x'.
4973        ValueError: If `threshold` is not a 0D Tensor.
4974        ValueError: If `x` is a 0D Tensor.
4975        ValueError: If the innermost dimension of `x`'s shape is not disvisible by 8.
4976
4977    Supported Platforms:
4978        ``Ascend`` ``CPU``
4979
4980    Examples:
4981        >>> x = Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32)
4982        >>> threshold = Tensor(6, mindspore.float32)
4983        >>> net = ops.CompareAndBitpack()
4984        >>> output = net(x, threshold)
4985        >>> print(output)
4986        [3]
4987    """
4988
4989    @prim_attr_register
4990    def __init__(self):
4991        """Initialize CompareAndBitPack"""
4992
4993
4994class Orgqr(Primitive):
4995    r"""
4996    Calculates the explicit representation of the orthogonal matrix :math:`Q`
4997    returned by :class:`mindspore.ops.Geqrf`.
4998
4999    .. warning::
5000        This is an experimental API that is subject to change or deletion.
5001
5002    Refer to :func:`mindspore.ops.orgqr` for more details.
5003
5004    Inputs:
5005        - **x** (Tensor) - Tensor of shape :math:`(*, M, N)`, indicating 2D or 3D matrices,
5006          with float32, float64, complex64 and complex128 data type.
5007        - **tau** (Tensor) - Indicates the reflecting coefficient in Householder transformation, it has
5008          shape :math:`(*, K)`, where `K` is less than or equal to `N`, and it has the same type as `x`.
5009
5010    Outputs:
5011        Tensor, has the same shape and data type as `x`.
5012
5013    Supported Platforms:
5014        ``Ascend`` ``GPU`` ``CPU``
5015
5016    Examples:
5017        >>> import mindspore
5018        >>> import numpy as np
5019        >>> from mindspore import Tensor, ops
5020        >>> x = Tensor(np.array([[-114.6, 10.9, 1.1], [-0.304, 38.07, 69.38], [-0.45, -0.17, 62.]]), mindspore.float32)
5021        >>> tau = Tensor(np.array([1.55, 1.94, 0.0]), mindspore.float32)
5022        >>> net = ops.Orgqr()
5023        >>> y = net(x, tau)
5024        >>> print(y)
5025        [[-0.54999995 -0.2128925   0.8137956 ]
5026         [ 0.47119996 -0.8752807   0.08240613]
5027         [ 0.69749993  0.42560163  0.57772595]]
5028    """
5029
5030    @prim_attr_register
5031    def __init__(self):
5032        """Initialize Orgqr"""
5033        self.init_prim_io_names(inputs=['x', 'tau'], outputs=['y'])
5034
5035
5036class TriuIndices(Primitive):
5037    r"""
5038    Calculates the indices of the upper triangular elements in a `row` * `col` matrix
5039    and returns them as a 2-by-N Tensor.
5040
5041    .. warning::
5042        This is an experimental API that is subject to change or deletion.
5043
5044    Refer to :func:`mindspore.ops.triu_indices` for more details.
5045
5046    Args:
5047        row (int): number of rows in the 2-D matrix.
5048        col (int): number of columns in the 2-D matrix.
5049        offset (int, optional): diagonal offset from the main diagonal. Default: ``0`` .
5050        dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor.
5051            An optional data type of ``mstype.int32`` and ``mstype.int64`` . Default: ``mstype.int32`` .
5052
5053    Outputs:
5054        - **y** (Tensor) - indices of the elements in lower triangular part of matrix. The type specified by `dtype`.
5055          The shape of output is :math:`(2, tril\_size)`, where :math:`tril\_size` is the number of elements in the
5056          lower triangular matrix.
5057
5058    Supported Platforms:
5059        ``Ascend`` ``GPU`` ``CPU``
5060
5061    Examples:
5062        >>> from mindspore import ops
5063        >>> from mindspore import dtype as mstype
5064        >>> net = ops.TriuIndices(5, 4, 2, mstype.int64)
5065        >>> output = net()
5066        >>> print(output)
5067        [[0 0 1]
5068         [2 3 3]]
5069        >>> print(output.dtype)
5070        Int64
5071    """
5072
5073    @prim_attr_register
5074    def __init__(self, row, col, offset=0, dtype=mstype.int32):
5075        """Initialize TriuIndices"""
5076        self.init_prim_io_names(inputs=[], outputs=['y'])
5077        validator.check_int(row, 0, validator.GE, "row", self.name)
5078        validator.check_int(col, 0, validator.GE, "col", self.name)
5079        validator.check_value_type("offset", offset, [int], self.name)
5080        valid_values = (mstype.int32, mstype.int64)
5081        validator.check_type_name("dtype", dtype, valid_values, self.name)
5082
5083
5084class Fmin(Primitive):
5085    """
5086    Computes the minimum of input tensors element-wise.
5087
5088    Refer to :func:`mindspore.ops.fmin` for more detail.
5089
5090    Supported Platforms:
5091
5092
5093    Examples:
5094        >>> x1 = Tensor(np.array([1.0, 5.0, 3.0]), mstype.float32)
5095        >>> x2 = Tensor(np.array([4.0, 2.0, 6.0]), mstype.float32)
5096        >>> fmin = ops.Fmin()
5097        >>> output = fmin(x1, x2)
5098        >>> print(output)
5099        [1. 2. 3.]
5100    """
5101
5102    __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
5103
5104    @prim_attr_register
5105    def __init__(self):
5106        """Initialize Fmin"""
5107        self.add_prim_attr('ignore_nan', True)
5108        self.init_prim_io_names(inputs=['x1, x2'], outputs=['y'])
5109
5110
5111class Fmax(Primitive):
5112    """
5113    Computes the maximum of input tensors element-wise.
5114
5115    .. warning::
5116        This is an experimental API that is subject to change or deletion.
5117
5118    Refer to :func:`mindspore.ops.fmax` for more detail.
5119
5120    Supported Platforms:
5121        ``CPU``
5122
5123    Examples:
5124        >>> import mindspore
5125        >>> import numpy as np
5126        >>> from mindspore import Tensor, ops
5127        >>> x1 = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
5128        >>> x2 = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
5129        >>> fmax = ops.Fmax()
5130        >>> output = fmax(x1, x2)
5131        >>> print(output)
5132        [4. 5. 6.]
5133    """
5134
5135    __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
5136
5137    @prim_attr_register
5138    def __init__(self):
5139        """Initialize Fmax"""
5140        self.add_prim_attr('ignore_nan', True)
5141        self.init_prim_io_names(inputs=['x1, x2'], outputs=['y'])
5142
5143
5144class SelfAdjointEig(Primitive):
5145    r"""
5146    Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in input
5147    such that input[..., :, :] = v[..., :, :] * diag(e[..., :]).
5148    The eigenvalues are sorted in non-decreasing order.
5149
5150    Args:
5151         compute_v(bool): If ``True``  then eigenvectors will be computed and returned in v;
5152              If ``False`` , only the eigenvalues will be computed. Default: ``True`` .
5153
5154    Inputs:
5155         - **x** (Tensor) - Must be one of the following types:
5156           float64, float32, complex64, complex128. Tensor input of shape :math:`[...,N, N]`.
5157
5158    Outputs:
5159         - **eigen_value** (Tensor) - Has the same type as input, the shape is :math:`[...,N]`.
5160         - **eigen_vector** (Tensor) - If `compute_v` is `False`, it’s an empty tensor.
5161           Otherwise, it has the same type and shape as input, the shape is the same as the input.
5162
5163    Raises:
5164         TypeError: If `compute_v` is not a bool.
5165         TypeError: If dtype of `x` is not one of: float64, float32, complex64 or complex128.
5166         TypeError: If `x` is not a Tensor.
5167         ValueError: If `x` is not a square(batch squares).
5168
5169    Supported Platforms:
5170         ``CPU``
5171
5172    Examples:
5173           >>> from mindspore.ops.operations.math_ops import SelfAdjointEig
5174           >>> input_x = Tensor(np.array([[1.0, 0.0], [0.0, 2.0]]).astype(np.float32))
5175           >>> SelfAdjointEig = SelfAdjointEig()
5176           >>> eigen_value, eigen_vector = SelfAdjointEig(input_x)
5177           >>> print(eigen_value)
5178           [1.  2.]
5179           >>> print(eigen_vector)
5180           [[1.  0.]
5181            [0.  1.]]
5182    """
5183
5184    @prim_attr_register
5185    def __init__(self, compute_v=True):
5186        """Initialize SelfAdjointEig."""
5187        self.init_prim_io_names(inputs=['x'], outputs=['eigen_value', 'eigen_vector'])
5188        validator.check_value_type("compute_v", compute_v, [bool], self.name)
5189
5190
5191class Cauchy(Primitive):
5192    r"""
5193    Create a tensor of shape `size` with random numbers drawn from Cauchy distribution.
5194    It is defined as follows:
5195
5196    .. math::
5197        f(x)= \frac{1}{\pi} \frac{\sigma}{(x-median)^2 +\sigma^2}
5198
5199    Args:
5200        size (list[int]): The size of tensor.
5201        median (float, optional): the location parameter, specifying the location
5202            of the peak of the distribution. Default: 0.0.
5203        sigma (float, optional): the scale parameter which specifies the half-width
5204            at half-maximum. Default: 1.0.
5205
5206    Outputs:
5207        Tensor with cauchy distribution data. Tensor shape is size, and data type is float32.
5208
5209    Raises:
5210        TypeError: If `sigma` is not a float.
5211        TypeError: If `median` is not a float.
5212        TypeError: If `size` is not a list.
5213        ValueError: If `size` list is empty.
5214        ValueError: If data of `size` is not a positive integer.
5215
5216    Supported Platforms:
5217        ``Ascend`` ``CPU``
5218
5219    Examples:
5220        >>> size = [1]
5221        >>> net = ops.Cauchy(size)
5222        >>> y = net()
5223        >>> print(y)
5224        [0.03128606]
5225    """
5226
5227    @prim_attr_register
5228    def __init__(self, size, median=0.0, sigma=1.0):
5229        validator.check_value_type('median', median, [float], self.name)
5230        validator.check_value_type('sigma', sigma, [float], self.name)
5231        validator.check_value_type('size', size, (list), self.name)
5232        for index, size_ in enumerate(size):
5233            validator.check_positive_int(size_, 'size[%d]' % index, self.name)
5234
5235
5236class Ormqr(Primitive):
5237    r"""
5238    Computes the matrix-matrix multiplication of a product of Householder matrices with a general matrix.
5239    Multiplies a(m, n) matrix C (given by other) with a matrix Q, where Q is represented using Householder
5240    reflectors (x, tau), which is the output of :func:`mindspore.ops.geqrf`.
5241
5242    Refer to :func:`mindspore.ops.ormqr` for more details.
5243
5244    .. warning::
5245        This is an experimental API that is subject to change or deletion.
5246
5247    Args:
5248        left (bool, optional): controls the order of multiplication. If ``True`` , compute op(Q)*C.
5249            If ``False`` , compute C*op(Q). Default: ``True`` .
5250        transpose(bool, optional): controls whether the matrix Q is conjugate transposed or not.Default: ``False`` .
5251
5252    Inputs:
5253        - **x** (Tensor) - Tensor of shape :math:`(*, mn, k)` where the value of mn depending on `left`,
5254          When `left` is ``True``, the value of mn is equal to m; otherwise, the value of mn is equal to n.
5255          and `*` is zero or more batch dimensions.
5256        - **tau** (Tensor) - Tensor of shape :math:`(*, min(mn, k))` where `*` is zero or more batch dimensions,
5257          and its type is the same as `x`.
5258        - **other** (Tensor) - Tensor of shape :math:`(*, m, n)` where `*` is zero or more batch dimensions,
5259          and its type is the same as `x`.
5260
5261    Outputs:
5262        - **y** (Tensor) - the output Tensor, has the same shape and data type as `other`.
5263
5264    Raises:
5265        TypeError: If `x` or `tau` or `other` is not Tensor.
5266        TypeError: If dtype of `x` or `tau` or `other` is not one of: float64, float32, complex64, complex128.
5267        ValueError: If `x` or `other` is less than 2D.
5268        ValueError: If rank(x) - rank(tau) != 1.
5269        ValueError: If tau.shape[:-1] != x.shape[:-2]
5270        ValueError: If other.shape[:-2] != x.shape[:-2]
5271        ValueError: If left == True, other.shape[-2] < tau.shape[-1].
5272        ValueError: If left == True, other.shape[-2] != x.shape[-2].
5273        ValueError: If left == False, other.shape[-1] < tau.shape[-1].
5274        ValueError: If left == False, other.shape[-1] != x.shape[-2].
5275
5276    Supported Platforms:
5277        ``GPU``
5278
5279    Examples:
5280        >>> import mindspore
5281        >>> import numpy as np
5282        >>> from mindspore import Tensor, ops
5283        >>> x = Tensor(np.array([[-114.6, 10.9, 1.1], [-0.304, 38.07, 69.38], [-0.45, -0.17, 62]]), mindspore.float32)
5284        >>> tau = Tensor(np.array([1.55, 1.94, 3.0]), mindspore.float32)
5285        >>> other = Tensor(np.array([[-114.6, 10.9, 1.1],
5286        ...                          [-0.304, 38.07, 69.38],
5287        ...                          [-0.45, -0.17, 62]]), mindspore.float32)
5288        >>> net = ops.Ormqr()
5289        >>> y = net(x, tau, other)
5290        >>> print(y)
5291        [[  63.82713   -13.823125 -116.28614 ]
5292         [ -53.659264  -28.157839  -70.42702 ]
5293         [ -79.54292    24.00183   -41.34253 ]]
5294    """
5295
5296    @prim_attr_register
5297    def __init__(self, left=True, transpose=False):
5298        """Initialize Ormqr"""
5299        self.init_prim_io_names(inputs=['x', 'tau', 'other'], outputs=['y'])
5300        self.left = validator.check_value_type('left', left, [bool], self.name)
5301        self.transpose = validator.check_value_type('transpose', transpose, [bool], self.name)
5302        self.add_prim_attr('left', self.left)
5303        self.add_prim_attr('transpose', self.transpose)
5304
5305
5306        self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
5307