• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2020-2021 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15
16"""Operators for math."""
17
18import numpy as np
19
20from .. import signature as sig
21from .._utils import get_broadcast_shape
22from ..primitive import Primitive, PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register, _run_op
23from ... import context
24from ..._checkparam import Rel
25from ..._checkparam import Validator as validator
26from ...common import dtype as mstype
27from ...common._decorator import deprecated
28from ...common.tensor import Tensor
29
30
31def _infer_shape_reduce(x, axis, keep_dims, prim_name):
32    """Common infer for reduce operator"""
33
34    def reduce_one_axis(one_axis):
35        validator.check_int_range(one_axis, -dim, dim, Rel.INC_LEFT, 'axis', prim_name)
36        if one_axis < 0:
37            one_axis += dim
38        axis_reduce.add(one_axis)
39
40    validator.check_value_type('axis', axis, [int, tuple, list], prim_name)
41    dim = len(x)
42    axis_reduce = set()
43
44    if isinstance(axis, int):
45        reduce_one_axis(axis)
46    else:
47        if not axis:
48            if keep_dims:
49                return [1] * dim
50            return []
51        for index, one_axis in enumerate(axis):
52            validator.check_value_type('axis[%d]' % index, one_axis, [int], prim_name)
53            reduce_one_axis(one_axis)
54
55    out_shape = []
56    for i in range(dim):
57        if i in axis_reduce:
58            if keep_dims:
59                out_shape.append(1)
60        else:
61            out_shape.append(x[i])
62    return out_shape
63
64
65class _BinaryOp(PrimitiveWithInfer):
66    """
67    Define binary operators.
68    """
69
70    __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
71
72    @prim_attr_register
73    def __init__(self):
74        """Initialize _BinaryOp"""
75        self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
76
77    def infer_shape(self, x_shape, y_shape):
78        return get_broadcast_shape(x_shape, y_shape, self.name)
79
80    def infer_min_shape(self, x_shape, y_shape):
81        return get_broadcast_shape(x_shape, y_shape, self.name, "min_shape")
82
83    def infer_max_shape(self, x_shape, y_shape):
84        return get_broadcast_shape(x_shape, y_shape, self.name, "max_shape")
85
86
87class _MathBinaryOp(_BinaryOp):
88    """
89    Define math binary operators.
90    """
91
92    @staticmethod
93    def do_infer_dtype(x_dtype, y_dtype, valid_dtype=mstype.number_type, prim_name=None):
94        """Staticmethod of infer dtype for _MathBinaryOp."""
95        args_type = {"x": x_dtype, "y": y_dtype}
96        complex_types = [mstype.tensor_type(mstype.complex64), mstype.tensor_type(mstype.complex128)]
97        if x_dtype in complex_types or y_dtype in complex_types:
98            tpye_infer_dict = {
99                (mstype.complex64, mstype.complex64): mstype.tensor_type(mstype.complex64),
100                (mstype.complex64, mstype.float32): mstype.tensor_type(mstype.complex64),
101                (mstype.float32, mstype.complex64): mstype.tensor_type(mstype.complex64),
102                (mstype.complex128, mstype.complex128): mstype.tensor_type(mstype.complex128),
103                (mstype.complex128, mstype.float64): mstype.tensor_type(mstype.complex128),
104                (mstype.float64, mstype.complex128): mstype.tensor_type(mstype.complex128),
105            }
106            if (x_dtype.element_type(), y_dtype.element_type()) not in tpye_infer_dict.keys():
107                raise TypeError('Complex math binary op expecting Tensor [complex64, complex64],'
108                                + '[complex64, float32], [float32, complex64], [complex128, complex128],'
109                                + '[complex128, float64], [float64, complex128],'
110                                + f'but got : [{format(x_dtype)},{format(y_dtype)}].')
111            return tpye_infer_dict.get((x_dtype.element_type(), y_dtype.element_type()))
112
113        validator.check_tensors_dtypes_same_and_valid(args_type, valid_dtype, prim_name)
114        return x_dtype
115
116    def infer_dtype(self, x_dtype, y_dtype):
117        return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type, self.name)
118
119
120class _BitwiseBinaryOp(_MathBinaryOp):
121    """
122    Define bitwise binary operators.
123    """
124
125    @prim_attr_register
126    def __init__(self):
127        """Initialize _BitwiseBinaryOp"""
128        self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['y'])
129
130    @staticmethod
131    def _check_bitwise_op_input_type(x1_type, x2_type, prim):
132        args = {'x1': x1_type, 'x2': x2_type}
133        valid_dtypes = mstype.int_type + mstype.uint_type
134        validator.check_tensors_dtypes_same_and_valid(args, valid_dtypes, prim)
135        return x1_type
136
137    def infer_dtype(self, x1_type, x2_type):
138        return _BitwiseBinaryOp._check_bitwise_op_input_type(x1_type, x2_type, self.name)
139
140
141class Add(_MathBinaryOp):
142    r"""
143    Adds two input tensors element-wise.
144
145    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
146    The inputs must be two tensors or one tensor and one scalar.
147    When the inputs are two tensors,
148    dtypes of them cannot be both bool, and the shapes of them could be broadcast.
149    When the inputs are one tensor and one scalar,
150    the scalar could only be a constant.
151
152    .. math::
153
154        out_{i} = x_{i} + y_{i}
155
156    Inputs:
157        - **x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
158          or a tensor whose data type is number or bool.
159        - **y** (Union[Tensor, Number, bool]) - The second input is a number, or a bool when the first input
160          is a tensor, or a tensor whose data type is number or bool.
161
162    Outputs:
163        Tensor, the shape is the same as the one after broadcasting,
164        and the data type is the one with higher precision or higher digits among the two inputs.
165
166    Raises:
167        TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
168
169    Supported Platforms:
170        ``Ascend`` ``GPU`` ``CPU``
171
172    Examples:
173        >>> # case 1: x and y are both Tensor.
174        >>> add = ops.Add()
175        >>> x = Tensor(np.array([1, 2, 3]).astype(np.float32))
176        >>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
177        >>> output = add(x, y)
178        >>> print(output)
179        [5. 7. 9.]
180        >>> # case 2: x is a scalar and y is a Tensor
181        >>> add = ops.Add()
182        >>> x = Tensor(1, mindspore.int32)
183        >>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
184        >>> output = add(x, y)
185        >>> print(output)
186        [5. 6. 7.]
187        >>> # the data type of x is int32, the data type of y is float32,
188        >>> # and the output is the data format of higher precision flost32.
189        >>> print(output.dtype)
190        Float32
191    """
192
193    def infer_value(self, x, y):
194        if x is not None and y is not None:
195            x = x.asnumpy()
196            y = y.asnumpy()
197            out = x + y
198            out = np.array(out, x.dtype)
199            return Tensor(out)
200        return None
201
202
203class TensorAdd(_MathBinaryOp):
204    """
205    Same as operator Add. TensorAdd will be deprecated in the future.
206    Please use Add instead.
207    """
208
209    @deprecated("1.1", "Add", True)
210    @prim_attr_register
211    def __init__(self):
212        """Initialize TensorAdd."""
213        _MathBinaryOp.__init__(self)
214
215    def infer_value(self, x, y):
216        if x is not None and y is not None:
217            x = x.asnumpy()
218            y = y.asnumpy()
219            out = x + y
220            out = np.array(out, x.dtype)
221            return Tensor(out)
222        return None
223
224
225class AssignAdd(PrimitiveWithInfer):
226    """
227    Updates a `Parameter` by adding a value to it.
228
229    Inputs of `variable` and `value` comply with the implicit type conversion rules to make the data types consistent.
230    If they have different data types, lower priority data type will be converted to
231    relatively highest priority data type.
232    If `value` is a number, the number is automatically converted to Tensor,
233    and the data type is consistent with the Tensor data type involved in the operation.
234    RuntimeError exception will be thrown when the data type conversion of Parameter is required.
235
236    Note:
237        Since `variable` is a data type Parameter, the data type cannot be changed,
238        so only the type of `value` is allowed to be promoted to the type of `variable`.
239        And the conversion type supported by different devices will be different,
240        it is recommended to use the same data type when using this operator.
241
242    Inputs:
243        - **variable** (Parameter) - The `Parameter`.
244          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
245        - **value** (Union[numbers.Number, Tensor]) - The value to be added to the `variable`.
246          It must have the same shape as `variable` if it is a Tensor.
247          it is recommended to use the same data type when using this operator.
248
249    Outputs:
250        Tensor, has the same data type and shape as original `variable`.
251
252    Raises:
253        TypeError: If `value` is neither Number nor Tensor.
254
255    Supported Platforms:
256        ``Ascend`` ``GPU`` ``CPU``
257
258    Examples:
259        >>> class Net(nn.Cell):
260        ...     def __init__(self):
261        ...         super(Net, self).__init__()
262        ...         self.AssignAdd = ops.AssignAdd()
263        ...         self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int64), name="global_step")
264        ...
265        ...     def construct(self, x):
266        ...         self.AssignAdd(self.variable, x)
267        ...         return self.variable
268        ...
269        >>> net = Net()
270        >>> value = Tensor(np.ones([1]).astype(np.int64)*100)
271        >>> output = net(value)
272        >>> print(output)
273        [101]
274    """
275    __mindspore_signature__ = (
276        sig.make_sig('x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
277        sig.make_sig('value', dtype=sig.sig_dtype.T)
278    )
279
280    @prim_attr_register
281    def __init__(self):
282        """Initialize AssignAdd"""
283        self.init_prim_io_names(inputs=['ref', 'value'], outputs=['output'])
284        self.add_prim_attr('side_effect_mem', True)
285
286    def infer_shape(self, variable, value):
287        return value
288
289    def infer_dtype(self, variable, value):
290        args = {"variable": variable, "value": value}
291        validator.check_scalar_or_tensor_types_same(args, mstype.number_type, self.name)
292        return value
293
294
295class AssignSub(PrimitiveWithInfer):
296    """
297    Updates a `Parameter` by subtracting a value from it.
298
299    Inputs of `variable` and `value` comply with the implicit type conversion rules to make the data types consistent.
300    If they have different data types, lower priority data type will be converted to
301    relatively highest priority data type.
302    If `value` is a number, the number is automatically converted to Tensor,
303    and the data type is consistent with the Tensor data type involved in the operation.
304    RuntimeError exception will be thrown when the data type conversion of Parameter is required.
305
306    Note:
307        Since `variable` is a data type Parameter, the data type cannot be changed,
308        so only the type of `value` is allowed to be promoted to the type of `variable`.
309        And the conversion type supported by different devices will be different,
310        it is recommended to use the same data type when using this operator.
311
312    Inputs:
313        - **variable** (Parameter) - The `Parameter`.
314          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
315        - **value** (Union[numbers.Number, Tensor]) - The value to be subtracted from the `variable`.
316          It must have the same shape as `variable` if it is a Tensor.
317          it is recommended to use the same data type when using this operator.
318
319    Outputs:
320        Tensor, has the same data type and shape as original `variable`.
321
322    Raises:
323        TypeError: If `value` is neither Number nor Tensor.
324
325    Supported Platforms:
326        ``Ascend``
327
328    Examples:
329        >>> class Net(nn.Cell):
330        ...     def __init__(self):
331        ...         super(Net, self).__init__()
332        ...         self.AssignSub = ops.AssignSub()
333        ...         self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int32), name="global_step")
334        ...
335        ...     def construct(self, x):
336        ...         self.AssignSub(self.variable, x)
337        ...         return self.variable
338        ...
339        >>> net = Net()
340        >>> value = Tensor(np.ones([1]).astype(np.int32)*100)
341        >>> output = net(value)
342        >>> print(output)
343        [-99]
344    """
345
346    __mindspore_signature__ = (
347        sig.make_sig('variable', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
348        sig.make_sig('value', dtype=sig.sig_dtype.T)
349    )
350
351    @prim_attr_register
352    def __init__(self):
353        """Initialize AssignSub"""
354        self.init_prim_io_names(inputs=['ref', 'value'], outputs=['output'])
355        self.add_prim_attr('side_effect_mem', True)
356
357    def infer_shape(self, variable, value):
358        return value
359
360    def infer_dtype(self, variable, value):
361        args = {"variable": variable, "value": value}
362        validator.check_scalar_or_tensor_types_same(args, mstype.number_type, self.name)
363        return value
364
365
366class _Reduce(PrimitiveWithInfer):
367    """
368    Definition of base class of reduction class operators.
369
370    Args:
371         keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
372                           If false, don't keep these dimensions. Default: False.
373    """
374
375    __mindspore_signature__ = (
376        sig.make_sig('input_x'),
377        sig.make_sig('axis', default=())
378    )
379
380    @prim_attr_register
381    def __init__(self, keep_dims=False):
382        """Initialize Reduce"""
383        validator.check_value_type('keep_dims', keep_dims, [bool], self.name)
384        self.init_prim_io_names(inputs=['input_x', 'axis'], outputs=['y'])
385
386    def __call__(self, x, axis=()):
387        args = [x, axis]
388        output = _run_op(self, self.name, args)
389        return output
390
391    def do_infer(self, input_x, axis, valid_dtype=mstype.number_type):
392        """ return meta infos of input parameters """
393        axis_v = axis['value']
394        input_shp = input_x['shape']
395        args = {'input_x': input_x['dtype']}
396        validator.check_tensors_dtypes_same_and_valid(args, valid_dtype, self.name)
397        if not isinstance(axis['dtype'], mstype.tensor_type) and axis_v is None:
398            raise ValueError(f"For '{self.name}', the 'axis' cannot be None, but got {axis}.")
399        if -1 in input_shp:
400            if axis_v is None:
401                max_v = max(input_shp)
402                if 'max_shape' and 'min_shape' in input_x:
403                    input_max_shp = input_x['max_shape']
404                    max_v = max(input_max_shp)
405                axis_shape_list = axis['shape']
406                if len(axis_shape_list) != 1:
407                    raise ValueError(f"For '{self.name}', the shape of 'axis' must be 1-D, but "
408                                     f"got {len(axis_shape_list)}.")
409                axis_shape = axis_shape_list[0]
410                if axis_shape == -1 and not self.keep_dims:
411                    out_shape = np.array([-2]).tolist()
412                    output_min_shape = np.ones_like(input_shp).tolist()
413                    output_max_shape = max_v * np.ones_like(input_shp)
414                    output_max_shape = output_max_shape.tolist()
415                elif not self.keep_dims:
416                    out_shape = -1 * np.ones_like(input_shp[:-axis_shape])
417                    out_shape = out_shape.tolist()
418                    output_min_shape = np.ones_like(out_shape).tolist()
419                    output_max_shape = max_v * np.ones_like(out_shape)
420                    output_max_shape = output_max_shape.tolist()
421                else:
422                    out_shape = -1 * np.ones_like(input_shp)
423                    out_shape = out_shape.tolist()
424                    output_min_shape = np.ones_like(input_shp).tolist()
425                    output_max_shape = max_v * np.ones_like(input_shp)
426                    output_max_shape = output_max_shape.tolist()
427            else:
428                out_shape = _infer_shape_reduce(input_shp, axis_v, self.keep_dims, self.name)
429                output_max_shape = _infer_shape_reduce(input_x['max_shape'], axis_v, self.keep_dims, self.name)
430                output_min_shape = _infer_shape_reduce(input_x['min_shape'], axis_v, self.keep_dims, self.name)
431        else:
432            if axis_v is None:
433                raise ValueError(f"For {self.name}, the 'axis' cannot be None.")
434            out_shape = _infer_shape_reduce(input_shp, axis_v, self.keep_dims, self.name)
435            output_max_shape = out_shape
436            output_min_shape = out_shape
437
438        value = None
439        if input_x['value'] is not None:
440            prim_map = {
441                'ReduceSum': np.sum,
442                'ReduceMax': np.max,
443                'ReduceMin': np.min,
444            }
445            np_reduce_func = prim_map.get(self.name, None)
446
447            if np_reduce_func is not None:
448                value = input_x['value'].asnumpy()
449                if not axis_v:
450                    axis_v = [i for i in range(len(input_x['shape']))]
451                    axis_v = tuple(axis_v)
452                value = np_reduce_func(value, axis_v, keepdims=self.keep_dims)
453                value = np.array(value)
454                value = Tensor(value)
455        return {'shape': out_shape,
456                'min_shape': output_min_shape,
457                'max_shape': output_max_shape,
458                'dtype': input_x['dtype'],
459                'value': value}
460
461    def __infer__(self, input_x, axis):
462        return self.do_infer(input_x, axis)
463
464
465class ReduceMean(_Reduce):
466    """
467    Reduces a dimension of a tensor by averaging all elements in the dimension, by Default. And also can reduces
468    a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by
469    controlling `keep_dims`.
470
471    Args:
472        keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
473                          If false, don't keep these dimensions. Default: False.
474
475    Inputs:
476        - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
477          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
478        - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
479          Only constant value is allowed. Must be in the range [-rank(`x`), rank(`x`)).
480
481    Outputs:
482        Tensor, has the same dtype as the `x`.
483
484        - If axis is (), and keep_dims is False,
485          the output is a 0-D tensor representing the mean of all elements in the input tensor.
486        - If axis is int, set as 2, and keep_dims is False,
487          the shape of output is :math:`(x_1, x_3, ..., x_R)`.
488        - If axis is tuple(int), set as (2, 3), and keep_dims is False,
489          the shape of output is :math:`(x_1, x_4, ..., x_R)`.
490
491    Raises:
492        TypeError: If `keep_dims` is not a bool.
493        TypeError: If `x` is not a Tensor.
494        ValueError: If `axis` is not one of the following: int, tuple or list.
495
496    Supported Platforms:
497        ``Ascend`` ``GPU`` ``CPU``
498
499    Examples:
500        >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
501        >>> op = ops.ReduceMean(keep_dims=True)
502        >>> output = op(x, 1)
503        >>> result = output.shape
504        >>> print(result)
505        (3, 1, 5, 6)
506        >>> # case 1: Reduces a dimension by averaging all elements in the dimension.
507        >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
508        ...                      [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
509        ...                      [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
510        >>> output = op(x)
511        >>> print(output)
512        [[[5.]]]
513        >>> print(output.shape)
514        (1, 1, 1)
515        >>> # case 2: Reduces a dimension along the axis 0
516        >>> output = op(x, 0)
517        >>> print(output)
518        [[[4. 4. 4. 4. 4. 4.]
519          [5. 5. 5. 5. 5. 5.]
520          [6. 6. 6. 6. 6. 6.]]]
521        >>> # case 3: Reduces a dimension along the axis 1
522        >>> output = op(x, 1)
523        >>> print(output)
524        [[[2. 2. 2. 2. 2. 2.]]
525         [[5. 5. 5. 5. 5. 5.]]
526         [[8. 8. 8. 8. 8. 8.]]]
527        >>> # case 4: Reduces a dimension along the axis 2
528        >>> output = op(x, 2)
529        >>> print(output)
530        [[[1.       ]
531          [2.       ]
532          [3.       ]]
533         [[4.       ]
534          [5.       ]
535          [6.       ]]
536         [[7.0000005]
537          [8.       ]
538          [9.       ]]]
539    """
540
541
542class ReduceSum(_Reduce):
543    """
544    Reduces a dimension of a tensor by summing all elements in the dimension, by Default. And also can reduces
545    a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by
546    controlling `keep_dims`.
547
548    Args:
549        keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
550                          If false, don't keep these dimensions. Default: False.
551
552    Inputs:
553         - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
554           :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
555         - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
556           Only constant value is allowed. Must be in the range [-rank(`x`), rank(`x`)).
557
558    Outputs:
559        Tensor, has the same dtype as the `x`.
560
561        - If axis is (), and keep_dims is False,
562          the output is a 0-D tensor representing the sum of all elements in the input tensor.
563        - If axis is int, set as 2, and keep_dims is False,
564          the shape of output is :math:`(x_1, x_3, ..., x_R)`.
565        - If axis is tuple(int), set as (2, 3), and keep_dims is False,
566          the shape of output is :math:`(x_1, x_4, ..., x_R)`.
567
568    Raises:
569        TypeError: If `keep_dims` is not a bool.
570        TypeError: If `x` is not a Tensor.
571        ValueError: If `axis` is None.
572
573    Supported Platforms:
574        ``Ascend`` ``GPU`` ``CPU``
575
576    Examples:
577        >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
578        >>> op = ops.ReduceSum(keep_dims=True)
579        >>> output = op(x, 1)
580        >>> output.shape
581        (3, 1, 5, 6)
582        >>> # case 1: Reduces a dimension by averaging all elements in the dimension.
583        >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
584        ...                      [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
585        ...                      [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
586        >>> output = op(x)
587        >>> print(output)
588        [[[270.]]]
589        >>> print(output.shape)
590        (1, 1, 1)
591        >>> # case 2: Reduces a dimension along axis 0.
592        >>> output = op(x, 0)
593        >>> print(output)
594        [[[12. 12. 12. 12. 12. 12.]
595          [15. 15. 15. 15. 15. 15.]
596          [18. 18. 18. 18. 18. 18.]]]
597        >>> # case 3: Reduces a dimension along axis 1.
598        >>> output = op(x, 1)
599        >>> print(output)
600        [[[ 6.  6.  6.  6.  6.  6.]]
601         [[15. 15. 15. 15. 15. 15.]]
602         [[24. 24. 24. 24. 24. 24.]]]
603        >>> # case 4: Reduces a dimension along axis 2.
604        >>> output = op(x, 2)
605        >>> print(output)
606        [[[ 6.]
607          [12.]
608          [18.]]
609         [[24.]
610          [30.]
611          [36.]]
612         [[42.]
613          [48.]
614          [54.]]]
615    """
616
617    @prim_attr_register
618    def __init__(self, keep_dims=False):
619        """Initialize ReduceSum"""
620        super(ReduceSum, self).__init__(keep_dims)
621        self.__setattr_flag__ = True
622
623
624class ReduceAll(_Reduce):
625    """
626    Reduces a dimension of a tensor by the "logicalAND" of all elements in the dimension, by Default. And also can
627    reduces a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by
628    controlling `keep_dims`.
629
630    Args:
631       keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
632                         If false, don't keep these dimensions.
633                         Default : False, don't keep these reduced dimensions.
634
635    Inputs:
636        - **x** (Tensor[bool]) - The input tensor. The dtype of the tensor to be reduced is bool.
637          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
638        - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
639          Only constant value is allowed. Must be in the range [-rank(x), rank(x)).
640
641    Outputs:
642        Tensor, the dtype is bool.
643
644        - If axis is (), and keep_dims is False,
645          the output is a 0-D tensor representing the "logical and" of all elements in the input tensor.
646        - If axis is int, set as 2, and keep_dims is False,
647          the shape of output is :math:`(x_1, x_3, ..., x_R)`.
648        - If axis is tuple(int), set as (2, 3), and keep_dims is False,
649          the shape of output is :math:`(x_1, x_4, ..., x_R)`.
650
651    Raises:
652        TypeError: If `keep_dims` is not a bool.
653        TypeError: If `x` is not a Tensor.
654        ValueError: If `axis` is not one of the following: int, tuple or list.
655
656    Supported Platforms:
657        ``Ascend`` ``GPU`` ``CPU``
658
659    Examples:
660        >>> x = Tensor(np.array([[True, False], [True, True]]))
661        >>> op = ops.ReduceAll(keep_dims=True)
662        >>> # case 1: Reduces a dimension by averaging all elements in the dimension.
663        >>> output = op(x)
664        >>> print(output)
665        [[False]]
666        >>> print(output.shape)
667        (1, 1)
668        >>> # case 2: Reduces a dimension along axis 0.
669        >>> output = op(x, 0)
670        >>> print(output)
671        [[ True False]]
672        >>> # case 3: Reduces a dimension along axis 1.
673        >>> output = op(x, 1)
674        >>> print(output)
675        [[False]
676        [ True]]
677    """
678
679    def __infer__(self, input_x, axis):
680        return self.do_infer(input_x, axis, (mstype.bool_,))
681
682
683class ReduceAny(_Reduce):
684    """
685    Reduces a dimension of a tensor by the "logical OR" of all elements in the dimension, by Default. And also can
686    reduces a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by
687    controlling `keep_dims`.
688
689    Args:
690       keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
691                         If false, don't keep these dimensions.
692                         Default : False, don't keep these reduced dimensions.
693
694    Inputs:
695        - **x** (Tensor[bool]) - The input tensor. The dtype of the tensor to be reduced is bool.
696          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
697        - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
698          Only constant value is allowed. Must be in the range [-rank(x), rank(x)).
699
700    Outputs:
701        Tensor, the dtype is bool.
702
703        - If axis is (), and keep_dims is False,
704          the output is a 0-D tensor representing the "logical or" of all elements in the input tensor.
705        - If axis is int, set as 2, and keep_dims is False,
706          the shape of output is :math:`(x_1, x_3, ..., x_R)`.
707        - If axis is tuple(int), set as (2, 3), and keep_dims is False,
708          the shape of output is :math:`(x_1, x_4, ..., x_R)`.
709
710    Raises:
711        TypeError: If `keep_dims` is not a bool.
712        TypeError: If `x` is not a Tensor.
713        ValueError: If `axis` is not one of the following: int, tuple or list.
714
715    Supported Platforms:
716        ``Ascend`` ``GPU`` ``CPU``
717
718    Examples:
719        >>> x = Tensor(np.array([[True, False], [True, True]]))
720        >>> op = ops.ReduceAny(keep_dims=True)
721        >>> # case 1: Reduces a dimension by averaging all elements in the dimension.
722        >>> output = op(x)
723        >>> print(output)
724        [[ True]]
725        >>> print(output.shape)
726        (1, 1)
727        >>> # case 2: Reduces a dimension along axis 0.
728        >>> output = op(x, 0)
729        >>> print(output)
730        [[ True True]]
731        >>> # case 3: Reduces a dimension along axis 1.
732        >>> output = op(x, 1)
733        >>> print(output)
734        [[True]
735        [ True]]
736    """
737
738    def __infer__(self, input_x, axis):
739        return self.do_infer(input_x, axis, (mstype.bool_,))
740
741
742class ReduceMax(_Reduce):
743    """
744    Reduces a dimension of a tensor by the maximum value in this dimension, by Default. And also can
745    reduces a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by
746    controlling `keep_dims`.
747
748    Args:
749        keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
750                          If false, don't keep these dimensions.
751                          Default : False, don't keep these reduced dimensions.
752
753    Inputs:
754         - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
755           :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
756         - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
757           Only constant value is allowed. Must be in the range [-rank(x), rank(x)).
758
759    Outputs:
760        Tensor, has the same dtype as the `x`.
761
762        - If axis is (), and keep_dims is False,
763          the output is a 0-D tensor representing the maximum of all elements in the input tensor.
764        - If axis is int, set as 2, and keep_dims is False,
765          the shape of output is :math:`(x_1, x_3, ..., x_R)`.
766        - If axis is tuple(int), set as (2, 3), and keep_dims is False,
767          the shape of output is :math:`(x_1, x_4, ..., x_R)`.
768
769    Raises:
770        TypeError: If `keep_dims` is not a bool.
771        TypeError: If `x` is not a Tensor.
772        ValueError: If `axis` is not one of the following: int, tuple or list.
773
774    Supported Platforms:
775        ``Ascend`` ``GPU`` ``CPU``
776
777    Examples:
778        >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
779        >>> op = ops.ReduceMax(keep_dims=True)
780        >>> output = op(x, 1)
781        >>> result = output.shape
782        >>> print(result)
783        (3, 1, 5, 6)
784        >>> # case 1: Reduces a dimension by averaging all elements in the dimension.
785        >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
786        ...                      [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
787        ...                      [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
788        >>> output = op(x)
789        >>> print(output)
790        [[[9.]]]
791        >>> print(output.shape)
792        (1, 1, 1)
793        >>> # case 2: Reduces a dimension along axis 0.
794        >>> output = op(x, 0)
795        >>> print(output)
796        [[[7. 7. 7. 7. 7. 7.]
797          [8. 8. 8. 8. 8. 8.]
798          [9. 9. 9. 9. 9. 9.]]]
799        >>> # case 3: Reduces a dimension along axis 1.
800        >>> output = op(x, 1)
801        >>> print(output)
802        [[[3. 3. 3. 3. 3. 3.]]
803         [[6. 6. 6. 6. 6. 6.]]
804         [[9. 9. 9. 9. 9. 9.]]]
805        >>> # case 4: Reduces a dimension along axis 2.
806        >>> output = op(x, 2)
807        >>> print(output)
808        [[[1.]
809          [2.]
810          [3.]]
811         [[4.]
812          [5.]
813          [6.]]
814         [[7.]
815          [8.]
816          [9.]]]
817    """
818
819    @prim_attr_register
820    def __init__(self, keep_dims=False):
821        """Initialize ReduceMax."""
822        super(ReduceMax, self).__init__(keep_dims)
823        self.__setattr_flag__ = True
824
825    def __infer__(self, input_x, axis):
826        return self.do_infer(input_x, axis, mstype.number_type + (mstype.bool_,))
827
828
829class ReduceMin(_Reduce):
830    """
831    Reduces a dimension of a tensor by the minimum value in the dimension, by Default. And also can
832    reduces a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by
833    controlling `keep_dims`.
834
835    Args:
836        keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
837                          If false, don't keep these dimensions.
838                          Default : False, don't keep these reduced dimensions.
839
840    Inputs:
841        - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
842          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
843        - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
844          Only constant value is allowed. Must be in the range [-rank(x), rank(x)).
845
846    Outputs:
847        Tensor, has the same dtype as the `x`.
848
849        - If axis is (), and keep_dims is False,
850          the output is a 0-D tensor representing the minimum of all elements in the input tensor.
851        - If axis is int, set as 2, and keep_dims is False,
852          the shape of output is :math:`(x_1, x_3, ..., x_R)`.
853        - If axis is tuple(int), set as (2, 3), and keep_dims is False,
854          the shape of output is :math:`(x_1, x_4, ..., x_R)`.
855
856    Raises:
857        TypeError: If `keep_dims` is not a bool.
858        TypeError: If `x` is not a Tensor.
859        ValueError: If `axis` is not one of the following: int, tuple or list.
860
861    Supported Platforms:
862        ``Ascend`` ``GPU`` ``CPU``
863
864    Examples:
865        >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
866        >>> op = ops.ReduceMin(keep_dims=True)
867        >>> output = op(x, 1)
868        >>> result = output.shape
869        >>> print(result)
870        (3, 1, 5, 6)
871        >>> # case 1: Reduces a dimension by averaging all elements in the dimension.
872        >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
873        ...                      [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
874        ...                      [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
875        >>> output = op(x)
876        >>> print(output)
877        [[[1.]]]
878        >>> print(output.shape)
879        (1, 1, 1)
880        >>> # case 2: Reduces a dimension along axis 0.
881        >>> output = op(x, 0)
882        >>> print(output)
883        [[[1. 1. 1. 1. 1. 1.]
884          [2. 2. 2. 2. 2. 2.]
885          [3. 3. 3. 3. 3. 3.]]]
886        >>> # case 3: Reduces a dimension along axis 1.
887        >>> output = op(x, 1)
888        >>> print(output)
889        [[[1. 1. 1. 1. 1. 1.]]
890         [[4. 4. 4. 4. 4. 4.]]
891         [[7. 7. 7. 7. 7. 7.]]]
892        >>> # case 4: Reduces a dimension along axis 2.
893        >>> output = op(x, 2)
894        >>> print(output)
895        [[[1.]
896          [2.]
897          [3.]]
898         [[4.]
899          [5.]
900          [6.]]
901         [[7.]
902          [8.]
903          [9.]]]
904    """
905
906
907class ReduceProd(_Reduce):
908    """
909    Reduces a dimension of a tensor by multiplying all elements in the dimension, by Default. And also can
910    reduces a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by
911    controlling `keep_dims`.
912
913    Args:
914        keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
915                          If false, don't keep these dimensions.
916                          Default : False, don't keep these reduced dimensions.
917
918    Inputs:
919        - **x** (Tensor[Number]) - The input tensor. The dtype of the tensor to be reduced is number.
920          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
921        - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
922          Only constant value is allowed. Must be in the range [-rank(x), rank(x)).
923
924    Outputs:
925        Tensor, has the same dtype as the `x`.
926
927        - If axis is (), and keep_dims is False,
928          the output is a 0-D tensor representing the product of all elements in the input tensor.
929        - If axis is int, set as 2, and keep_dims is False,
930          the shape of output is :math:`(x_1, x_3, ..., x_R)`.
931        - If axis is tuple(int), set as (2, 3), and keep_dims is False,
932          the shape of output is :math:`(x_1, x_4, ..., x_R)`.
933
934    Raises:
935        TypeError: If `keep_dims` is not a bool.
936        TypeError: If `x` is not a Tensor.
937        ValueError: If `axis` is not one of the following: int, tuple or list.
938
939    Supported Platforms:
940        ``Ascend`` ``GPU``
941
942    Examples:
943        >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
944        >>> op = ops.ReduceProd(keep_dims=True)
945        >>> output = op(x, 1)
946        >>> result = output.shape
947        >>> print(result)
948        (3, 1, 5, 6)
949        >>> # case 1: Reduces a dimension by averaging all elements in the dimension.
950        >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
951        ...                      [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
952        ...                      [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
953        >>> output = op(x)
954        >>> print(output)
955        [[[2.2833798e+33]]]
956        >>> print(output.shape)
957        (1, 1, 1)
958        >>> # case 2: Reduces a dimension along axis 0.
959        >>> output = op(x, 0)
960        >>> print(output)
961        [[[ 28.  28.  28.  28.  28.  28.]
962          [ 80.  80.  80.  80.  80.  80.]
963          [162. 162. 162. 162. 162. 162.]]]
964        >>> # case 3: Reduces a dimension along axis 1.
965        >>> output = op(x, 1)
966        >>> print(output)
967        [[[  6.   6.   6.   6.   6.   6.]]
968         [[120. 120. 120. 120. 120. 120.]]
969         [[504. 504. 504. 504. 504. 504.]]]
970        >>> # case 4: Reduces a dimension along axis 2.
971        >>> output = op(x, 2)
972        >>> print(output)
973        [[[1.00000e+00]
974          [6.40000e+01]
975          [7.29000e+02]]
976         [[4.09600e+03]
977          [1.56250e+04]
978          [4.66560e+04]]
979         [[1.17649e+05]
980          [2.62144e+05]
981          [5.31441e+05]]]
982    """
983
984
985class CumProd(PrimitiveWithInfer):
986    """
987    Computes the cumulative product of the tensor x along axis.
988
989    Args:
990        exclusive (bool): If true, perform exclusive cumulative product. Default: False.
991        reverse (bool): If true, reverse the result along axis. Default: False
992
993    Inputs:
994        - **x** (Tensor[Number]) - The input tensor.
995          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
996        - **axis** (int) - The dimensions to compute the cumulative product.
997          Only constant value is allowed.
998
999    Outputs:
1000        Tensor, has the same shape and dtype as the `x`.
1001
1002    Raises:
1003        TypeError: If `exclusive` or `reverse` is not a bool.
1004        ValueError: If `axis` is None.
1005
1006    Supported Platforms:
1007        ``Ascend`` ``GPU``
1008
1009    Examples:
1010        >>> a, b, c, = 1, 2, 3
1011        >>> x = Tensor(np.array([a, b, c]).astype(np.float32))
1012        >>> op0 = ops.CumProd()
1013        >>> output0 = op0(x, 0) # output=[a, a * b, a * b * c]
1014        >>> op1 = ops.CumProd(exclusive=True)
1015        >>> output1 = op1(x, 0) # output=[1, a, a * b]
1016        >>> op2 = ops.CumProd(reverse=True)
1017        >>> output2 = op2(x, 0) # output=[a * b * c, b * c, c]
1018        >>> op3 = ops.CumProd(exclusive=True, reverse=True)
1019        >>> output3 = op3(x, 0) # output=[b * c, c, 1]
1020        >>> print(output0)
1021        [1. 2. 6.]
1022        >>> print(output1)
1023        [1. 1. 2.]
1024        >>> print(output2)
1025        [6. 6. 3.]
1026        >>> print(output3)
1027        [6. 3. 1.]
1028        >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [5, 3, 5]]).astype(np.float32))
1029        >>> output4 = op0(x, 0)
1030        >>> output5 = op0(x, 1)
1031        >>> print(output4)
1032        [[ 1.  2.  3.]
1033         [ 4. 10. 18.]
1034         [20. 30. 90.]]
1035        >>> print(output5)
1036        [[1.  2.   6.]
1037         [4. 20. 120.]
1038         [5. 15.  75.]]
1039    """
1040
1041    @prim_attr_register
1042    def __init__(self, exclusive=False, reverse=False):
1043        """Initialize CumProd."""
1044        cls_name = self.name
1045        self.exclusive = validator.check_value_type("exclusive", exclusive, [bool], cls_name)
1046        self.reverse = validator.check_value_type("reverse", reverse, [bool], cls_name)
1047        self.init_prim_io_names(inputs=['x', 'axis'], outputs=['y'])
1048
1049    def infer_shape(self, x_shape, axis_shape):
1050        return x_shape
1051
1052    def infer_dtype(self, x_type, axis_type):
1053        cls_name = self.name
1054        validator.check_tensor_dtype_valid('x', x_type, mstype.number_type, cls_name)
1055        validator.check_subclass("axis", axis_type, mstype.int_, cls_name)
1056        return x_type
1057
1058    def infer_value(self, x, axis):
1059        if axis is None:
1060            raise ValueError(f"For '{self.name}', the 'axis' cannot be None, but got {axis}.")
1061
1062
1063class Cdist(Primitive):
1064    """
1065    Computes batched the p norm distance between each pair of the two collections of row vectors.
1066
1067    Args:
1068        p (float): P value for the p norm distance to calculate between each vector pair ∈[0,∞].
1069
1070    Inputs:
1071        - **input_x** (Tensor) - Input tensor of shape :math:`(B, P, M)`.
1072          Letter :math:`B` represents 0 or positive int number.
1073          When :math:`B` is equal to 0, it means this dimension can be ignored,
1074          i.e. shape of the tensor is :math:`(P, M)`.
1075        - **input_y** (Tensor) - Input tensor of shape :math:`(B, R, M)`.
1076
1077    Outputs:
1078        Tensor, has the same dtype as `input_x`, which shape is :math:`(B, P, R)`.
1079
1080    Raises:
1081        TypeError: If `input_x` or `input_y` is not a Tensor.
1082        TypeError: If dtype of `input_x` or `input_y` is neither float16 nor float32.
1083        TypeError: If `p` is not a float.
1084        ValueError: If `p` is a negative float.
1085        ValueError: If dimension of `input_x` is not the same as `input_y`.
1086        ValueError: If dimension of `input_x` or `input_y` is neither 2 nor 3.
1087
1088    Supported Platforms:
1089        ``Ascend``
1090
1091    Examples:
1092        >>> input_x = Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))
1093        >>> input_y = Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))
1094        >>> op = ops.Cdist(p=2.0)
1095        >>> output = op(input_x, input_y)
1096        >>> print(output)
1097        [[[2.8284273 2.8284273]
1098          [1.4142137 1.4142137]]]
1099    """
1100
1101    @prim_attr_register
1102    def __init__(self, p=2.0):
1103        """Initialize Cdist"""
1104        validator.check_value_type("p", p, [float], self.name)
1105        validator.check_non_negative_float(p, "p", self.name)
1106        self.init_prim_io_names(inputs=['input_x', 'input_y'], outputs=['output'])
1107
1108
1109class MatMul(PrimitiveWithCheck):
1110    r"""
1111    Multiplies matrix `x` and matrix `y`.
1112
1113    .. math::
1114
1115        (Output)_{i j}=\sum_{k=1}^{p} a_{i k} b_{k j}=a_{i 1} b_{1 j}+a_{i 2} b_{2 j}+\cdots+a_{i p} b_{p j}, p\in N
1116
1117    where the :math:`i,j` indicates the output of the i-th row and j-th column element.
1118
1119    Args:
1120        transpose_x (bool): If true, `x` is transposed before multiplication. Default: False.
1121        transpose_y (bool): If true, `y` is transposed before multiplication. Default: False.
1122
1123    Inputs:
1124        - **x** (Tensor) - The first tensor to be multiplied. The shape of the tensor is :math:`(N, C)`. If
1125          `transpose_x` is True, its shape must be :math:`(N, C)` after transpose.
1126        - **y** (Tensor) - The second tensor to be multiplied. The shape of the tensor is :math:`(C, M)`. If
1127          `transpose_y` is True, its shape must be :math:`(C, M)` after transpose.
1128
1129    Outputs:
1130        Tensor, the shape of the output tensor is :math:`(N, M)`.
1131
1132    Raises:
1133        TypeError: If `transpose_a` or `transpose_b` is not a bool.
1134        ValueError: If the column of matrix dimensions of `x` is not equal to
1135                    the row of matrix dimensions of `y`.
1136        ValueError: If length of shape of `x` or `y` is not equal to 2.
1137
1138    Supported Platforms:
1139        ``Ascend`` ``GPU`` ``CPU``
1140
1141    Examples:
1142        >>> x = Tensor(np.ones(shape=[1, 3]), mindspore.float32)
1143        >>> y = Tensor(np.ones(shape=[3, 4]), mindspore.float32)
1144        >>> matmul = ops.MatMul()
1145        >>> output = matmul(x, y)
1146        >>> print(output)
1147        [[3. 3. 3. 3.]]
1148    """
1149
1150    @prim_attr_register
1151    def __init__(self, transpose_a=False, transpose_b=False):
1152        """Initialize MatMul."""
1153        self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['output'])
1154        cls_name = self.name
1155        validator.check_value_type("transpose_a", transpose_a, [bool], cls_name)
1156        validator.check_value_type("transpose_b", transpose_b, [bool], cls_name)
1157
1158    def check_shape_size(self, x1, x2):
1159        """Check the shape size of inputs for MatMul."""
1160        if len(x1) != 2 or len(x2) != 2:
1161            raise ValueError(f"For '{self.name}', inputs 'x', 'y' should have the same dimension size and "
1162                             f"be equal to 2, but got the size of 'x': ({len(x1)}) and the size of 'y': ({len(x2)}).")
1163
1164    def check_shape(self, x1, x2):
1165        self.check_shape_size(x1, x2)
1166        cls_name = self.name
1167        # expected dimension of x, y, x:[...,a,b] y:[..., c,d], the dim size should be the same except the last two
1168        for i in range(len(x1) - 2):
1169            if x1[i] != x2[i]:
1170                raise ValueError(f"For '{cls_name}', the dim[{i}] of 'x' should be equal to the dim[{i}] of 'y', "
1171                                 f"but got 'x[{i}]': {x1[i]} and 'y[{i}]': {x2[i]}.")
1172
1173        # validate whether last two dims satisfying matrix multiply
1174        x1_last = x1[-2:]
1175        x2_last = x2[-2:]
1176        x1_col = x1_last[not self.transpose_a]
1177        x2_row = x2_last[self.transpose_b]
1178        if np.all(np.array(x1) != -1) and np.all(np.array(x2) != -1):
1179            if x1_col != x2_row:
1180                raise ValueError(f"For '{cls_name}', the input dimensions must be equal, but got 'x1_col': {x1_col} "
1181                                 f"and 'x2_row': {x2_row}. And 'x' shape {x1}(transpose_a={self.transpose_a}), "
1182                                 f"'y' shape {x2}(transpose_b={self.transpose_b}).")
1183        # set attribute
1184        self.add_prim_attr('transpose_x1', self.transpose_a)
1185        self.add_prim_attr('transpose_x2', self.transpose_b)
1186
1187    def check_dtype(self, x1, x2):
1188        args = {"x1": x1, "x2": x2}
1189        validator.check_tensors_dtypes_same_and_valid(args, mstype.float_type + mstype.int_type, self.name)
1190
1191
1192class BatchMatMul(MatMul):
1193    r"""
1194    Computes matrix multiplication between two tensors by batch.
1195
1196    .. math::
1197
1198        \\text{output}[..., :, :] = \\text{matrix}(x[..., :, :]) * \\text{matrix}(y[..., :, :])
1199
1200    The two input tensors must have the same rank and the rank must be not less than `3`.
1201
1202    Args:
1203        transpose_x (bool): If true, the last two dimensions of `x` is transposed before multiplication.
1204            Default: False.
1205        transpose_y (bool): If true, the last two dimensions of `y` is transposed before multiplication.
1206            Default: False.
1207
1208    Inputs:
1209        - **x** (Tensor) - The first tensor to be multiplied. The shape of the tensor is :math:`(*B, N, C)`,
1210          where :math:`*B` represents the batch size which can be multidimensional, :math:`N` and :math:`C` are the
1211          size of the last two dimensions. If `transpose_a` is True, its shape must be :math:`(*B, C, N)`.
1212        - **y** (Tensor) - The second tensor to be multiplied. The shape of the tensor is :math:`(*B, C, M)`. If
1213          `transpose_b` is True, its shape must be :math:`(*B, M, C)`.
1214
1215    Outputs:
1216        Tensor, the shape of the output tensor is :math:`(*B, N, M)`.
1217
1218    Raises:
1219        TypeError: If `transpose_x` or `transpose_y` is not a bool.
1220        ValueError: If length of shape of `x` is not equal to length of shape of `y` or
1221                    length of shape of `x` is less than 3.
1222
1223    Supported Platforms:
1224        ``Ascend`` ``GPU`` ``CPU``
1225
1226    Examples:
1227        >>> x = Tensor(np.ones(shape=[2, 4, 1, 3]), mindspore.float32)
1228        >>> y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
1229        >>> batmatmul = ops.BatchMatMul()
1230        >>> output = batmatmul(x, y)
1231        >>> print(output)
1232        [[[[3. 3. 3. 3.]]
1233          [[3. 3. 3. 3.]]
1234          [[3. 3. 3. 3.]]
1235          [[3. 3. 3. 3.]]]
1236         [[[3. 3. 3. 3.]]
1237          [[3. 3. 3. 3.]]
1238          [[3. 3. 3. 3.]]
1239          [[3. 3. 3. 3.]]]]
1240        >>> x = Tensor(np.ones(shape=[2, 4, 3, 1]), mindspore.float32)
1241        >>> y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
1242        >>> batmatmul = ops.BatchMatMul(transpose_a=True)
1243        >>> output = batmatmul(x, y)
1244        >>> print(output)
1245        [[[[3. 3. 3. 3.]]
1246          [[3. 3. 3. 3.]]
1247          [[3. 3. 3. 3.]]
1248          [[3. 3. 3. 3.]]]
1249         [[[3. 3. 3. 3.]]
1250          [[3. 3. 3. 3.]]
1251          [[3. 3. 3. 3.]]
1252          [[3. 3. 3. 3.]]]]
1253    """
1254
1255    @prim_attr_register
1256    def __init__(self, transpose_a=False, transpose_b=False):
1257        """Initialize BatchMatMul."""
1258        self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['output'])
1259        cls_name = self.name
1260        validator.check_value_type("transpose_a", transpose_a, [bool], cls_name)
1261        validator.check_value_type("transpose_b", transpose_b, [bool], cls_name)
1262
1263    def check_shape_size(self, x, y):
1264        if len(x) != len(y) or len(x) < 3:
1265            raise ValueError(f"For '{self.name}', input 'x', 'y' should be the same dimension size and should be "
1266                             f"greater than or equal to 3, but got 'x' size: {len(x)}, 'y' size: {len(y)}.")
1267
1268
1269class CumSum(PrimitiveWithInfer):
1270    """
1271    Computes the cumulative sum of input tensor along axis.
1272
1273    .. math::
1274
1275        y_i = x_1 + x_2 + x_3 + ... + x_i
1276
1277    Args:
1278        exclusive (bool): If true, perform exclusive mode. Default: False.
1279        reverse (bool): If true, perform inverse cumulative sum. Default: False.
1280
1281    Inputs:
1282        - **input** (Tensor) - The input tensor to accumulate.
1283        - **axis**  (int) - The axis to accumulate the tensor's value. Only constant value is allowed.
1284          Must be in the range [-rank(input), rank(input)).
1285
1286    Outputs:
1287        Tensor, the shape of the output tensor is consistent with the input tensor's.
1288
1289    Raises:
1290        TypeError: If `exclusive` or `reverse` is not a bool.
1291        TypeError: If `axis` is not an int.
1292
1293    Supported Platforms:
1294        ``Ascend`` ``GPU`` ``CPU``
1295
1296    Examples:
1297        >>> x = Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))
1298        >>> cumsum = ops.CumSum()
1299        >>> # case 1: along the axis 0
1300        >>> y = cumsum(x, 0)
1301        >>> print(y)
1302        [[ 3.  4.  6. 10.]
1303         [ 4. 10. 13. 19.]
1304         [ 8. 13. 21. 26.]
1305         [ 9. 16. 28. 35.]]
1306        >>> # case 2: along the axis 1
1307        >>> y = cumsum(x, 1)
1308        >>> print(y)
1309        [[ 3.  7. 13. 23.]
1310         [ 1.  7. 14. 23.]
1311         [ 4.  7. 15. 22.]
1312         [ 1.  4. 11. 20.]]
1313        >>> # Next demonstrate exclusive and reverse, along axis 1
1314        >>> # case 3: exclusive = True
1315        >>> cumsum = ops.CumSum(exclusive=True)
1316        >>> y = cumsum(x, 1)
1317        >>> print(y)
1318        [[ 0.  3.  7. 13.]
1319         [ 0.  1.  7. 14.]
1320         [ 0.  4.  7. 15.]
1321         [ 0.  1.  4. 11.]]
1322        >>> # case 4: reverse = True
1323        >>> cumsum = ops.CumSum(reverse=True)
1324        >>> y = cumsum(x, 1)
1325        >>> print(y)
1326        [[23. 20. 16. 10.]
1327         [23. 22. 16.  9.]
1328         [22. 18. 15.  7.]
1329         [20. 19. 16.  9.]]
1330    """
1331
1332    @prim_attr_register
1333    def __init__(self, exclusive=False, reverse=False):
1334        """Initialize cumsum"""
1335        cls_name = self.name
1336        validator.check_value_type('exclusive', exclusive, [bool], cls_name)
1337        validator.check_value_type('reverse', reverse, [bool], cls_name)
1338        self.init_prim_io_names(inputs=['x', 'axis'], outputs=['y'])
1339
1340    def __infer__(self, x, axis):
1341        cls_name = self.name
1342        x_shp = x['shape']
1343        if axis['value'] is None:
1344            raise ValueError(f"For '{self.name}', the 'axis' cannot be None, but got {axis}.")
1345        validator.check_value_type('axis', axis['value'], [int], cls_name)
1346        valid_dtypes = [mstype.uint8, mstype.int8, mstype.int32, mstype.float16, mstype.float32, mstype.float64]
1347        validator.check_tensor_dtype_valid('x', x['dtype'], valid_dtypes, cls_name)
1348        return {'shape': x_shp,
1349                'dtype': x['dtype'],
1350                'value': None}
1351
1352
1353class AddN(Primitive):
1354    """
1355    Computes addition of all input tensors element-wise.
1356
1357    All input tensors must have the same shape.
1358
1359    Inputs:
1360        - **x** (Union(tuple[Tensor], list[Tensor])) - The input tuple or list
1361          is made up of multiple tensors whose dtype is number or bool to be added together.
1362
1363    Outputs:
1364        Tensor, has the same shape and dtype as each entry of the `x`.
1365
1366    Raises:
1367        TypeError: If `x` is neither tuple nor list.
1368
1369    Supported Platforms:
1370        ``Ascend`` ``GPU`` ``CPU``
1371
1372    Examples:
1373        >>> class NetAddN(nn.Cell):
1374        ...     def __init__(self):
1375        ...         super(NetAddN, self).__init__()
1376        ...         self.addN = ops.AddN()
1377        ...
1378        ...     def construct(self, *z):
1379        ...         return self.addN(z)
1380        ...
1381        >>> net = NetAddN()
1382        >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
1383        >>> y = Tensor(np.array([4, 5, 6]), mindspore.float32)
1384        >>> output = net(x, y, x, y)
1385        >>> print(output)
1386        [10. 14. 18.]
1387    """
1388
1389    @prim_attr_register
1390    def __init__(self):
1391        """Initialize AddN."""
1392        self.init_prim_io_names(inputs=["inputs"], outputs=["sum"])
1393
1394    def check_elim(self, inputs):
1395        if len(inputs) != 1:
1396            return False, None
1397        if isinstance(inputs[0], Tensor):
1398            return True, inputs[0]
1399        raise TypeError(f"For '{self.name}', the type of 'inputs[0]' should be a tensor, but "
1400                        f"got {type(inputs[0]).__name__}, "
1401                        f"or the length of 'inputs' should not equal to 1, but got ({len(inputs)}).")
1402
1403
1404class AccumulateNV2(PrimitiveWithInfer):
1405    """
1406    Computes accumulation of all input tensors element-wise.
1407
1408    AccumulateNV2 is similar to AddN, but there is a significant difference
1409    among them: AccumulateNV2 will not wait for all of its inputs to be ready
1410    before summing. That is to say, AccumulateNV2 is able to save
1411    memory when inputs are ready at different time since the minimum temporary
1412    storage is proportional to the output size rather than the input size.
1413
1414    Inputs:
1415        - **x** (Union(tuple[Tensor], list[Tensor])) - The input tuple or list
1416          is made up of multiple tensors whose dtype is number to be added together.
1417
1418    Outputs:
1419        Tensor, has the same shape and dtype as each entry of the `x`.
1420
1421    Raises:
1422        TypeError: If `x` is neither tuple nor list.
1423
1424    Supported Platforms:
1425        ``Ascend``
1426
1427    Examples:
1428        >>> class NetAccumulateNV2(nn.Cell):
1429        ...     def __init__(self):
1430        ...         super(NetAccumulateNV2, self).__init__()
1431        ...         self.accumulateNV2 = ops.AccumulateNV2()
1432        ...
1433        ...     def construct(self, *z):
1434        ...         return self.accumulateNV2(z)
1435        ...
1436        >>> net = NetAccumulateNV2()
1437        >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
1438        >>> y = Tensor(np.array([4, 5, 6]), mindspore.float32)
1439        >>> output = net(x, y, x, y)
1440        >>> print(output)
1441        [10. 14. 18.]
1442    """
1443
1444    @prim_attr_register
1445    def __init__(self):
1446        """Initialize AccumulateNV2."""
1447        self.__setattr_flag__ = True
1448        self.init_prim_io_names(inputs=["inputs"], outputs=["sum"])
1449
1450    def check_elim(self, inputs):
1451        if len(inputs) != 1:
1452            return False, None
1453        if isinstance(inputs[0], Tensor):
1454            return True, inputs[0]
1455        raise TypeError(f"For '{self.name}', the type of 'inputs[0]' should be a tensor, "
1456                        f"but got {type(inputs[0]).__name__}, "
1457                        f"or the length of 'inputs' should not equal to 1, but got ({len(inputs)}).")
1458
1459    def infer_shape(self, inputs):
1460        cls_name = self.name
1461        validator.check_int(len(inputs), 1, Rel.GE, "inputs", cls_name)
1462        self.add_prim_attr('n', len(inputs))
1463        shp0 = inputs[0]
1464        for i, shp in enumerate(inputs):
1465            validator.check(f"shape of inputs[{i}]", shp, 'shape of inputs[0]', shp0, Rel.EQ, cls_name)
1466        return shp0
1467
1468    def infer_dtype(self, inputs):
1469        cls_name = self.name
1470        validator.check_value_type("inputs", inputs, [tuple, list], cls_name)
1471        validator.check_int(len(inputs), 1, Rel.GE, "inputs", cls_name)
1472        args = {}
1473        for i, dtype in enumerate(inputs):
1474            args[f"inputs[{i}]"] = dtype
1475        validator.check_tensors_dtypes_same_and_valid(args, mstype.number_type + (mstype.bool_,), cls_name)
1476        return inputs[0]
1477
1478
1479class Neg(PrimitiveWithInfer):
1480    """
1481    Returns a tensor with negative values of the input tensor element-wise.
1482
1483    .. math::
1484
1485        out_{i} = - x_{i}
1486
1487    Inputs:
1488        - **x** (Tensor) - The input tensor whose dtype is number.
1489          :math:`(N,*)` where :math:`*` means ,any number of additional dimensions, its rank should less than 8.
1490
1491    Outputs:
1492        Tensor, has the same shape and dtype as input.
1493
1494    Raises:
1495        TypeError: If `x` is not a Tensor.
1496
1497    Supported Platforms:
1498        ``Ascend`` ``GPU`` ``CPU``
1499
1500    Examples:
1501        >>> neg = ops.Neg()
1502        >>> x = Tensor(np.array([1, 2, -1, 2, 0, -3.5]), mindspore.float32)
1503        >>> output = neg(x)
1504        >>> print(output)
1505        [-1.  -2.   1.  -2.   0.   3.5]
1506    """
1507
1508    @prim_attr_register
1509    def __init__(self):
1510        """Initialize Neg"""
1511        self.init_prim_io_names(inputs=['x'], outputs=['y'])
1512
1513    def infer_shape(self, x_shape):
1514        return x_shape
1515
1516    def infer_dtype(self, x_dtype):
1517        validator.check_tensor_dtype_valid("x", x_dtype, mstype.number_type, self.name)
1518        return x_dtype
1519
1520    def infer_value(self, input_x):
1521        if input_x is not None:
1522            input_x = input_x.asnumpy()
1523            out = np.array(-input_x, input_x.dtype)
1524            return Tensor(out)
1525
1526        return None
1527
1528
1529class InplaceAdd(PrimitiveWithInfer):
1530    """
1531    Adds v into specified rows of x. Computes y = x; y[i,] += v.
1532
1533    Args:
1534        indices (Union[int, tuple]): Indices into the left-most dimension of x, and determines which rows of x
1535            to add with v. It is an integer or a tuple, whose value is in [0, the first dimension size of x).
1536
1537    Inputs:
1538        - **x** (Tensor) - The first input is a tensor whose data type is float16, float32 or int32.
1539          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
1540        - **input_v** (Tensor) - The second input is a tensor that has the same dimension sizes as x except
1541          the first dimension, which must be the same as indices's size. It has the same data type with `x`.
1542
1543    Outputs:
1544        Tensor, has the same shape and dtype as x.
1545
1546    Raises:
1547        TypeError: If `indices` is neither int nor tuple.
1548        TypeError: If `indices` is a tuple whose elements are not all int.
1549        ValueError: If length of shape of `x` is not equal to length of shape of `input_v`.
1550
1551    Supported Platforms:
1552        ``Ascend``
1553
1554    Examples:
1555        >>> indices = (0, 1)
1556        >>> x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
1557        >>> input_v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
1558        >>> inplaceAdd = ops.InplaceAdd(indices)
1559        >>> output = inplaceAdd(x, input_v)
1560        >>> print(output)
1561        [[1.5 3. ]
1562         [4.  5.5]
1563         [5.  6. ]]
1564    """
1565
1566    @prim_attr_register
1567    def __init__(self, indices):
1568        """Initialize InplaceAdd"""
1569        self.init_prim_io_names(inputs=['x', 'v'], outputs=['y'])
1570        self.indices = indices
1571        validator.check_value_type('indices', indices, [tuple, int], self.name)
1572        if isinstance(indices, int):
1573            self.indices = (indices,)
1574        for item in self.indices:
1575            validator.check_value_type("item of indices", item, [int], self.name)
1576
1577    def infer_dtype(self, x_dtype, v_dtype):
1578        args = {'x': x_dtype, 'v': v_dtype}
1579        valid_type = [mstype.int32, mstype.float16, mstype.float32]
1580        validator.check_tensors_dtypes_same_and_valid(args, valid_type, self.name)
1581        return x_dtype
1582
1583    def infer_shape(self, x_shape, v_shape):
1584        validator.check("x", len(x_shape), "v", len(v_shape), Rel.EQ, self.name)
1585        validator.check("size of indices", len(self.indices), "v's first dimension", v_shape[0],
1586                        Rel.EQ, self.name)
1587        for i in self.indices:
1588            if i < 0 or i >= x_shape[0]:
1589                raise ValueError(f"For '{self.name}', the value of 'indices' must be "
1590                                 f"in [0, {x_shape[0]}), but got {i}.")
1591        x_rank = len(x_shape)
1592        for idx in range(x_rank)[1:]:
1593            validator.check('v dim %d' % idx, v_shape[idx], "x dim %d" % idx, x_shape[idx], Rel.EQ, self.name)
1594
1595        return x_shape
1596
1597
1598class InplaceSub(PrimitiveWithInfer):
1599    """
1600    Subtracts v into specified rows of x. Computes y = x; y[i, :] -= v.
1601
1602    Args:
1603        indices (Union[int, tuple]): Indices into the left-most dimension of x, and determines which rows of x
1604            to subtract with v. It is a int or tuple, whose value is in [0, the first dimension size of x).
1605
1606    Inputs:
1607        - **x** (Tensor) - The first input is a tensor whose data type is float16, float32 or int32.
1608          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
1609        - **input_v** (Tensor) - The second input is a tensor who has the same dimension sizes as x except
1610          the first dimension, which must be the same as indices's size. It has the same data type with `x`.
1611
1612    Outputs:
1613        Tensor, has the same shape and dtype as x.
1614
1615    Raises:
1616        TypeError: If `indices` is neither int nor tuple.
1617        TypeError: If `indices` is a tuple whose elements are not all int.
1618        ValueError: If length of shape of `x` is not equal to length of shape of `input_v`.
1619
1620    Supported Platforms:
1621        ``Ascend``
1622
1623    Examples:
1624        >>> indices = (0, 1)
1625        >>> x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
1626        >>> input_v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
1627        >>> inplaceSub = ops.InplaceSub(indices)
1628        >>> output = inplaceSub(x, input_v)
1629        >>> print(output)
1630        [[0.5 1. ]
1631         [2.  2.5]
1632         [5.  6. ]]
1633    """
1634
1635    @prim_attr_register
1636    def __init__(self, indices):
1637        """Initialize InplaceSub"""
1638        self.init_prim_io_names(inputs=['x', 'v'], outputs=['y'])
1639        self.indices = indices
1640        validator.check_value_type('indices', indices, [tuple, int], self.name)
1641        if isinstance(indices, int):
1642            self.indices = (indices,)
1643        for item in self.indices:
1644            validator.check_value_type("item of indices", item, [int], self.name)
1645
1646    def infer_dtype(self, x_dtype, v_dtype):
1647        args = {'x': x_dtype, 'v': v_dtype}
1648        valid_type = [mstype.int32, mstype.float16, mstype.float32]
1649        validator.check_tensors_dtypes_same_and_valid(args, valid_type, self.name)
1650        return x_dtype
1651
1652    def infer_shape(self, x_shape, v_shape):
1653        validator.check("x", len(x_shape), "v", len(v_shape), Rel.EQ, self.name)
1654        validator.check("size of indices", len(self.indices), "v's first dimension", v_shape[0],
1655                        Rel.EQ, self.name)
1656        for i in self.indices:
1657            if i < 0 or i >= x_shape[0]:
1658                raise ValueError(f"For '{self.name}', the value of 'indices' must be "
1659                                 f"in [0, {x_shape[0]}), but got {i}.")
1660        x_rank = len(x_shape)
1661        for idx in range(x_rank)[1:]:
1662            validator.check('v dim %d' % idx, v_shape[idx], "x dim %d" % idx, x_shape[idx], Rel.EQ, self.name)
1663
1664        return x_shape
1665
1666
1667class Sub(_MathBinaryOp):
1668    """
1669    Subtracts the second input tensor from the first input tensor element-wise.
1670
1671    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
1672    The inputs must be two tensors or one tensor and one scalar.
1673    When the inputs are two tensors,
1674    dtypes of them cannot be both bool, and the shapes of them could be broadcast.
1675    When the inputs are one tensor and one scalar,
1676    the scalar could only be a constant.
1677
1678    .. math::
1679
1680        out_{i} = x_{i} - y_{i}
1681
1682    Inputs:
1683        - **x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
1684          or a tensor whose data type is number or bool.
1685        - **y** (Union[Tensor, Number, bool]) - The second input is a number, or a bool when the first input
1686          is a tensor, or a tensor whose data type is number or bool.
1687
1688    Outputs:
1689        Tensor, the shape is the same as the one after broadcasting,
1690        and the data type is the one with higher precision or higher digits among the two inputs.
1691
1692    Raises:
1693        TypeError: If `x` and `y` is not a Number or a bool or a Tensor.
1694
1695    Supported Platforms:
1696        ``Ascend`` ``GPU`` ``CPU``
1697
1698    Examples:
1699        >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
1700        >>> y = Tensor(np.array([4, 5, 6]), mindspore.int32)
1701        >>> sub = ops.Sub()
1702        >>> output = sub(x, y)
1703        >>> print(output)
1704        [-3 -3 -3]
1705    """
1706
1707    def infer_value(self, x, y):
1708        if x is not None and y is not None:
1709            x = x.asnumpy()
1710            y = y.asnumpy()
1711            out = x - y
1712            out = np.array(out, x.dtype)
1713            return Tensor(out)
1714        return None
1715
1716
1717class Mul(_MathBinaryOp):
1718    """
1719    Multiplies two tensors element-wise.
1720
1721    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
1722    The inputs must be two tensors or one tensor and one scalar.
1723    When the inputs are two tensors,
1724    dtypes of them cannot be both bool, and the shapes of them could be broadcast.
1725    When the inputs are one tensor and one scalar,
1726    the scalar could only be a constant.
1727
1728    .. math::
1729
1730        out_{i} = x_{i} * y_{i}
1731
1732    Inputs:
1733        - **x** (Union[Tensor, Number, bool]) - The first input is a number or
1734          a bool or a tensor whose data type is number or bool.
1735        - **y** (Union[Tensor, Number, bool]) - The second input is a number or
1736          a bool when the first input is a tensor or a tensor whose data type is number or bool.
1737
1738    Outputs:
1739        Tensor, the shape is the same as the one after broadcasting,
1740        and the data type is the one with higher precision or higher digits among the two inputs.
1741
1742    Raises:
1743        TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
1744        ValueError: If `x` and `y` are not the same shape.
1745
1746    Supported Platforms:
1747        ``Ascend`` ``GPU`` ``CPU``
1748
1749    Examples:
1750        >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
1751        >>> y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
1752        >>> mul = ops.Mul()
1753        >>> output = mul(x, y)
1754        >>> print(output)
1755        [ 4. 10. 18.]
1756    """
1757
1758    def infer_value(self, x, y):
1759        if x is not None and y is not None:
1760            x = x.asnumpy()
1761            y = y.asnumpy()
1762            out = x * y
1763            out = np.array(out, x.dtype)
1764            return Tensor(out)
1765        return None
1766
1767
1768class SquaredDifference(_MathBinaryOp):
1769    """
1770    Subtracts the second input tensor from the first input tensor element-wise and returns square of it.
1771
1772    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
1773    The inputs must be two tensors or one tensor and one scalar.
1774    When the inputs are two tensors,
1775    dtypes of them cannot be both bool, and the shapes of them could be broadcast.
1776    When the inputs are one tensor and one scalar,
1777    the scalar could only be a constant.
1778
1779    .. math::
1780
1781        out_{i} = (x_{i} - y_{i}) * (x_{i} - y_{i}) = (x_{i} - y_{i})^2
1782
1783    Inputs:
1784        - **x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
1785          or a tensor whose data type is float16, float32, int32 or bool.
1786        - **y** (Union[Tensor, Number, bool]) - The second input is a number, or a bool when the first input
1787          is a tensor or a tensor whose data type is float16, float32, int32 or bool.
1788
1789    Outputs:
1790        Tensor, the shape is the same as the one after broadcasting,
1791        and the data type is the one with higher precision or higher digits among the two inputs.
1792
1793    Raises:
1794        TypeError: if `x` and `y` is not a Number or a bool or a Tensor.
1795
1796    Supported Platforms:
1797        ``Ascend`` ``GPU`` ``CPU``
1798
1799    Examples:
1800        >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
1801        >>> y = Tensor(np.array([2.0, 4.0, 6.0]), mindspore.float32)
1802        >>> squared_difference = ops.SquaredDifference()
1803        >>> output = squared_difference(x, y)
1804        >>> print(output)
1805        [1. 4. 9.]
1806    """
1807
1808    def infer_dtype(self, x_dtype, y_dtype):
1809        valid_type = [mstype.float16, mstype.float32, mstype.int32]
1810        return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, valid_type, self.name)
1811
1812
1813class Square(Primitive):
1814    """
1815    Returns square of a tensor element-wise.
1816
1817    .. math::
1818
1819        out_{i} = (x_{i})^2
1820
1821    Inputs:
1822        - **x** (Tensor) - The input tensor whose dtype is number.
1823          :math:`(N,*)` where :math:`*` means ,any number of additional dimensions, its rank should less than 8.
1824
1825    Outputs:
1826        Tensor, has the same shape and dtype as the `x`.
1827
1828    Raises:
1829        TypeError: If `x` is not a Tensor.
1830
1831    Supported Platforms:
1832        ``Ascend`` ``GPU`` ``CPU``
1833
1834    Examples:
1835        >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
1836        >>> square = ops.Square()
1837        >>> output = square(x)
1838        >>> print(output)
1839        [1. 4. 9.]
1840    """
1841
1842    @prim_attr_register
1843    def __init__(self):
1844        """Initialize Square"""
1845        self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
1846
1847
1848class Rsqrt(PrimitiveWithInfer):
1849    r"""
1850    Computes reciprocal of square root of input tensor element-wise.
1851
1852    .. math::
1853
1854        out_{i} =  \frac{1}{\sqrt{x_{i}}}
1855
1856    Inputs:
1857        - **x** (Tensor) - The input of Rsqrt. Each element must be a non-negative number.
1858          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
1859
1860    Outputs:
1861        Tensor, has the same type and shape as `x`.
1862
1863    Raises:
1864        TypeError: If dtype of `x` is neither float16 nor float32.
1865
1866    Supported Platforms:
1867        ``Ascend`` ``GPU``
1868
1869    Examples:
1870        >>> input_tensor = Tensor([[4, 4], [9, 9]], mindspore.float32)
1871        >>> rsqrt = ops.Rsqrt()
1872        >>> output = rsqrt(input_tensor)
1873        >>> print(output)
1874        [[0.5        0.5       ]
1875         [0.33333334 0.33333334]]
1876    """
1877
1878    @prim_attr_register
1879    def __init__(self):
1880        """Initialize Rsqrt"""
1881        self.init_prim_io_names(inputs=['x'], outputs=['output'])
1882
1883    def infer_shape(self, x_shape):
1884        return x_shape
1885
1886    def infer_dtype(self, x_dtype):
1887        validator.check_tensor_dtype_valid("x", x_dtype, mstype.number_type, self.name)
1888        return x_dtype
1889
1890    def infer_value(self, x):
1891        if x is not None:
1892            x = x.asnumpy()
1893            out = 1.0 / np.sqrt(x)
1894            out = np.array(out, x.dtype)
1895            return Tensor(out)
1896        return None
1897
1898
1899class Sqrt(PrimitiveWithCheck):
1900    r"""
1901    Returns square root of a tensor element-wise.
1902
1903    .. math::
1904
1905        out_{i} =  \sqrt{x_{i}}
1906
1907
1908    Inputs:
1909        - **x** (Tensor) - The input tensor whose dtype is number.
1910          :math:`(N,*)` where :math:`*` means ,any number of additional dimensions, its rank should less than 8.
1911
1912    Outputs:
1913        Tensor, has the same shape and data type as the `x`.
1914
1915    Raises:
1916        TypeError: If `x` is not a Tensor.
1917
1918    Supported Platforms:
1919        ``Ascend`` ``GPU`` ``CPU``
1920
1921    Examples:
1922        >>> x = Tensor(np.array([1.0, 4.0, 9.0]), mindspore.float32)
1923        >>> sqrt = ops.Sqrt()
1924        >>> output = sqrt(x)
1925        >>> print(output)
1926        [1. 2. 3.]
1927    """
1928
1929    @prim_attr_register
1930    def __init__(self):
1931        """Initialize Sqrt"""
1932        self.init_prim_io_names(inputs=['x'], outputs=['output'])
1933
1934    def check_dtype(self, x_type):
1935        validator.check_tensor_dtype_valid("x", x_type, mstype.number_type, self.name)
1936
1937    def infer_value(self, x):
1938        """Infer the value of input for Sqrt."""
1939        if x is not None:
1940            x = x.asnumpy()
1941            out = np.sqrt(x)
1942            out = np.array(out, x.dtype)
1943            return Tensor(out)
1944        return None
1945
1946
1947class Reciprocal(PrimitiveWithInfer):
1948    r"""
1949    Returns reciprocal of a tensor element-wise.
1950
1951    .. math::
1952
1953        out_{i} =  \frac{1}{x_{i}}
1954
1955    Inputs:
1956        - **x** (Tensor) - The input tensor.
1957          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
1958
1959    Outputs:
1960        Tensor, has the same shape as the `x`.
1961
1962    Raises:
1963        TypeError: If `x` is not a Tensor.
1964
1965    Supported Platforms:
1966        ``Ascend`` ``GPU`` ``CPU``
1967
1968    Examples:
1969        >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
1970        >>> reciprocal = ops.Reciprocal()
1971        >>> output = reciprocal(x)
1972        >>> print(output)
1973        [1.   0.5  0.25]
1974    """
1975
1976    @prim_attr_register
1977    def __init__(self):
1978        """Initialize Reciprocal"""
1979        if context.get_context("device_target") == "GPU":
1980            self.target = "GPU"
1981        else:
1982            self.target = "OTHER"
1983        self.init_prim_io_names(inputs=['x'], outputs=['y'])
1984
1985    def infer_shape(self, x):
1986        return x
1987
1988    def infer_dtype(self, x):
1989        validator.check_subclass("x", x, mstype.tensor, self.name)
1990        return x
1991
1992    def infer_value(self, x):
1993        if x is not None:
1994            x = x.asnumpy()
1995            out = 1.0 / x
1996            out = np.array(out, x.dtype)
1997            return Tensor(out)
1998        return None
1999
2000
2001class Pow(_MathBinaryOp):
2002    """
2003    Computes a tensor to the power of the second input.
2004
2005    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
2006    The inputs must be two tensors or one tensor and one scalar.
2007    When the inputs are two tensors,
2008    dtypes of them cannot be both bool, and the shapes of them could be broadcast.
2009    When the inputs are one tensor and one scalar,
2010    the scalar could only be a constant.
2011
2012    .. math::
2013
2014        out_{i} = x_{i} ^{ y_{i}}
2015
2016    Inputs:
2017        - **x** (Union[Tensor, Number, bool]) - The first input is a number or
2018          a bool or a tensor whose data type is number or bool.
2019        - **y** (Union[Tensor, Number, bool]) - The second input is a number or
2020          a bool when the first input is a tensor or a tensor whose data type is number or bool.
2021
2022    Outputs:
2023        Tensor, the shape is the same as the one after broadcasting,
2024        and the data type is the one with higher precision or higher digits among the two inputs.
2025
2026    Raises:
2027        TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
2028        ValueError: If `x` and `y` are not the same shape.
2029
2030    Supported Platforms:
2031        ``Ascend`` ``GPU`` ``CPU``
2032
2033    Examples:
2034        >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
2035        >>> y = 3.0
2036        >>> pow = ops.Pow()
2037        >>> output = pow(x, y)
2038        >>> print(output)
2039        [ 1.  8. 64.]
2040        >>>
2041        >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
2042        >>> y = Tensor(np.array([2.0, 4.0, 3.0]), mindspore.float32)
2043        >>> pow = ops.Pow()
2044        >>> output = pow(x, y)
2045        >>> print(output)
2046        [ 1. 16. 64.]
2047    """
2048
2049    def infer_value(self, x, power):
2050        if x is not None and power is not None:
2051            x = x.asnumpy()
2052            power = power.asnumpy()
2053            out = np.power(x, power)
2054            out = np.array(out, x.dtype)
2055            return Tensor(out)
2056        return None
2057
2058
2059class Exp(PrimitiveWithInfer):
2060    r"""
2061    Returns exponential of a tensor element-wise.
2062
2063    .. math::
2064
2065        out_i = e^{x_i}
2066
2067    Inputs:
2068        - **x** (Tensor) - The input tensor.
2069          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
2070
2071    Outputs:
2072        Tensor, has the same shape and dtype as the `x`.
2073
2074    Raises:
2075        TypeError: If `x` is not a Tensor.
2076
2077    Supported Platforms:
2078        ``Ascend`` ``GPU`` ``CPU``
2079
2080    Examples:
2081        >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
2082        >>> exp = ops.Exp()
2083        >>> output = exp(x)
2084        >>> print(output)
2085        [ 2.718282  7.389056 54.598152]
2086    """
2087
2088    @prim_attr_register
2089    def __init__(self):
2090        """Initialize Exp"""
2091        self.init_prim_io_names(inputs=['x'], outputs=['y'])
2092
2093    def infer_shape(self, x_shape):
2094        return x_shape
2095
2096    def infer_dtype(self, x_type):
2097        validator.check_subclass("x", x_type, mstype.tensor, self.name)
2098        return x_type
2099
2100    def infer_value(self, x):
2101        if x is not None:
2102            x = x.asnumpy()
2103            out = np.exp(x)
2104            out = np.array(out, x.dtype)
2105            return Tensor(out)
2106        return None
2107
2108
2109class Expm1(PrimitiveWithInfer):
2110    r"""
2111    Returns exponential then minus 1 of a tensor element-wise.
2112
2113    .. math::
2114
2115        out_i = e^{x_i} - 1
2116
2117    Inputs:
2118        - **x** (Tensor) - The input tensor. With float16 or float32 data type.
2119          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
2120
2121    Outputs:
2122        Tensor, has the same shape as the `x`.
2123
2124    Raises:
2125        TypeError: If dtype of `x` is neither float16 nor float32.
2126
2127    Supported Platforms:
2128        ``Ascend`` ``GPU`` ``CPU``
2129
2130    Examples:
2131        >>> x = Tensor(np.array([0.0, 1.0, 2.0, 4.0]), mindspore.float32)
2132        >>> expm1 = ops.Expm1()
2133        >>> output = expm1(x)
2134        >>> print(output)
2135        [ 0.        1.718282  6.389056 53.598152]
2136    """
2137
2138    @prim_attr_register
2139    def __init__(self):
2140        """Initialize Expm1."""
2141        self.init_prim_io_names(inputs=['x'], outputs=['y'])
2142
2143    def infer_shape(self, x_shape):
2144        return x_shape
2145
2146    def infer_dtype(self, x_type):
2147        validator.check_tensor_dtype_valid("x", x_type, [mstype.float16, mstype.float32], self.name)
2148        return x_type
2149
2150
2151class HistogramFixedWidth(PrimitiveWithInfer):
2152    """
2153    Returns a rank 1 histogram counting the number of entries in values that fall into every bin. The bins are equal
2154    width and determined by the arguments range and nbins.
2155
2156    Args:
2157        dtype (str): An optional attribute. The dtype must be "int32". Default: "int32".
2158        nbins (int): The number of histogram bins, the type is a positive integer.
2159
2160    Inputs:
2161        - **x** (Tensor) - Numeric Tensor. Must be one of the following types: int32, float32, float16.
2162        - **range** (Tensor) - Must has the same data type as `x`, and the shape is [2].
2163          x <= range[0] will be mapped to hist[0], x >= range[1] will be mapped to hist[-1].
2164
2165    Outputs:
2166        Tensor, the type is int32.
2167
2168    Raises:
2169        TypeError: If `dtype` is not a str or `nbins` is not an int.
2170        ValueError: If `nbins` is less than 1.
2171        ValueError: If `dtype` is neither 'int32' nor 'int64'.
2172
2173    Supported Platforms:
2174        ``Ascend``
2175
2176    Examples:
2177        >>> x = Tensor([-1.0, 0.0, 1.5, 2.0, 5.0, 15], mindspore.float16)
2178        >>> range_op = Tensor([0.0, 5.0], mindspore.float16)
2179        >>> hist = ops.HistogramFixedWidth(5)
2180        >>> output = hist(x, range_op)
2181        >>> print(output)
2182        [2 1 1 0 2]
2183    """
2184
2185    @prim_attr_register
2186    def __init__(self, nbins, dtype='int32'):
2187        """Initialize HistogramFixedWidth."""
2188        self.nbins = validator.check_value_type("nbins", nbins, [int], self.name)
2189        validator.check_int(nbins, 1, Rel.GE, "nbins", self.name)
2190        valid_values = ['int32']
2191        self.dtype = validator.check_string(dtype, valid_values, "dtype", self.name)
2192        self.add_prim_attr('dtype', 3)
2193        self.init_prim_io_names(inputs=['x', 'range'], outputs=['y'])
2194
2195    def infer_shape(self, x_shape, range_shape):
2196        return (self.nbins,)
2197
2198    def infer_dtype(self, x_dtype, range_dtype):
2199        valid_dtypes = (mstype.float16, mstype.float32, mstype.int32)
2200        validator.check_tensor_dtype_valid("x", x_dtype, valid_dtypes, self.name)
2201        validator.check_tensor_dtype_valid("range", range_dtype, valid_dtypes, self.name)
2202        y_dtype = mstype.int32
2203        return y_dtype
2204
2205
2206class Log(PrimitiveWithInfer):
2207    """
2208    Returns the natural logarithm of a tensor element-wise.
2209
2210    .. math::
2211        y_i = log_e(x_i)
2212
2213    .. warning::
2214        If the input value of operator Log is within the range (0, 0.01] or [0.95, 1.05], the output accuracy
2215        is subject to change.
2216
2217    Inputs:
2218        - **x** (Tensor) - The input tensor. The value must be greater than 0.
2219          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
2220
2221    Outputs:
2222        Tensor, has the same shape as the `x`.
2223
2224    Raises:
2225        TypeError: If `x` is not a Tensor.
2226
2227    Supported Platforms:
2228        ``Ascend`` ``GPU`` ``CPU``
2229
2230    Examples:
2231        >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
2232        >>> log = ops.Log()
2233        >>> output = log(x)
2234        >>> print(output)
2235        [0.        0.6931472 1.3862944]
2236    """
2237
2238    @prim_attr_register
2239    def __init__(self):
2240        """Initialize Log."""
2241        self.init_prim_io_names(inputs=['x'], outputs=['y'])
2242
2243    def infer_shape(self, x):
2244        return x
2245
2246    def infer_dtype(self, x):
2247        validator.check_subclass("x", x, mstype.tensor, self.name)
2248        return x
2249
2250    def infer_value(self, x):
2251        if x is not None:
2252            x = x.asnumpy()
2253            out = np.log(x)
2254            out = np.array(out, x.dtype)
2255            return Tensor(out)
2256        return None
2257
2258
2259class Log1p(Primitive):
2260    """
2261    Returns the natural logarithm of one plus the input tensor element-wise.
2262
2263    Inputs:
2264        - **x** (Tensor) - The input tensor. With float16 or float32 data type.
2265          The value must be greater than -1.
2266          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
2267
2268    Outputs:
2269        Tensor, has the same shape as the `x`.
2270
2271    Raises:
2272        TypeError: If dtype of `x` is neither float16 nor float32.
2273
2274    Supported Platforms:
2275        ``Ascend`` ``GPU``
2276
2277    Examples:
2278        >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
2279        >>> log1p = ops.Log1p()
2280        >>> output = log1p(x)
2281        >>> print(output)
2282        [0.6931472 1.0986123 1.609438 ]
2283    """
2284
2285    @prim_attr_register
2286    def __init__(self):
2287        """Initialize Log1p."""
2288        self.init_prim_io_names(inputs=['x'], outputs=['y'])
2289
2290
2291class Erf(PrimitiveWithInfer):
2292    r"""
2293    Computes the Gauss error function of `x` element-wise.
2294
2295    .. math::
2296
2297        erf(x)=\frac{2} {\sqrt{\pi}} \int\limits_0^{x} e^{-t^{2}} dt
2298
2299    Inputs:
2300        - **x** (Tensor) - The input tensor. The data type must be float16 or float32.
2301          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
2302
2303    Outputs:
2304        Tensor, has the same shape and dtype as the `x`.
2305
2306    Raises:
2307        TypeError: If dtype of `x` is neither float16 nor float32.
2308
2309    Supported Platforms:
2310        ``Ascend`` ``GPU``
2311
2312    Examples:
2313        >>> x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
2314        >>> erf = ops.Erf()
2315        >>> output = erf(x)
2316        >>> print(output)
2317        [-0.8427168   0.          0.8427168   0.99530876  0.99997765]
2318    """
2319
2320    @prim_attr_register
2321    def __init__(self):
2322        """Initialize Erf"""
2323        self.init_prim_io_names(inputs=['x'], outputs=['y'])
2324
2325    def infer_shape(self, x_shape):
2326        return x_shape
2327
2328    def infer_dtype(self, x_dtype):
2329        validator.check_tensor_dtype_valid("x", x_dtype, [mstype.float16, mstype.float32], self.name)
2330        return x_dtype
2331
2332
2333class Erfc(PrimitiveWithInfer):
2334    r"""
2335    Computes the complementary error function of `x` element-wise.
2336
2337    .. math::
2338
2339        erfc(x) = 1 - \frac{2} {\sqrt{\pi}} \int\limits_0^{x} e^{-t^{2}} dt
2340
2341    Inputs:
2342        - **x** (Tensor) - The input tensor. The data type must be float16 or float32.
2343          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
2344
2345    Outputs:
2346        Tensor, has the same shap dtype as the `x`.
2347
2348    Raises:
2349        TypeError: If dtype of `x` is neither float16 nor float32.
2350
2351    Supported Platforms:
2352        ``Ascend`` ``GPU``
2353
2354    Examples:
2355        >>> x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
2356        >>> erfc = ops.Erfc()
2357        >>> output = erfc(x)
2358        >>> print(output)
2359        [1.8427168e+00 1.0000000e+00 1.5728319e-01 4.6912432e-03 2.2351742e-05]
2360    """
2361
2362    @prim_attr_register
2363    def __init__(self):
2364        """Initialize Erfc"""
2365        self.init_prim_io_names(inputs=['x'], outputs=['y'])
2366
2367    def infer_shape(self, x_shape):
2368        return x_shape
2369
2370    def infer_dtype(self, x_type):
2371        validator.check_tensor_dtype_valid("x", x_type, [mstype.float16, mstype.float32], self.name)
2372        return x_type
2373
2374
2375class Minimum(_MathBinaryOp):
2376    """
2377    Computes the minimum of input tensors element-wise.
2378
2379    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
2380    The inputs must be two tensors or one tensor and one scalar.
2381    When the inputs are two tensors,
2382    dtypes of them cannot be both bool, and the shapes of them could be broadcast.
2383    When the inputs are one tensor and one scalar,
2384    the scalar could only be a constant.
2385
2386    Inputs:
2387        - **x** (Union[Tensor, Number, bool]) - The first input is a number or
2388          a bool or a tensor whose data type is number or bool.
2389        - **y** (Union[Tensor, Number, bool]) - The second input is a number or
2390          a bool when the first input is a tensor or a tensor whose data type is number or bool.
2391
2392    Outputs:
2393        Tensor, the shape is the same as the one after broadcasting,
2394        and the data type is the one with higher precision or higher digits among the two inputs.
2395
2396    Raises:
2397        TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
2398        ValueError: If `x` and `y` are not the same shape.
2399
2400    Supported Platforms:
2401        ``Ascend`` ``GPU`` ``CPU``
2402
2403    Examples:
2404        >>> # case 1 : same data type
2405        >>> x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
2406        >>> y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
2407        >>> minimum = ops.Minimum()
2408        >>> output = minimum(x, y)
2409        >>> print(output)
2410        [1. 2. 3.]
2411        >>> # case 2 : different data type
2412        >>> x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.int32)
2413        >>> y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
2414        >>> output = minimum(x, y)
2415        >>> print(output.dtype)
2416        Float32
2417    """
2418
2419    def infer_value(self, x, y):
2420        if x is not None and y is not None:
2421            x = x.asnumpy()
2422            y = y.asnumpy()
2423            out = np.minimum(x, y)
2424            out = np.array(out, x.dtype)
2425            return Tensor(out)
2426        return None
2427
2428
2429class Maximum(_MathBinaryOp):
2430    """
2431    Computes the maximum of input tensors element-wise.
2432
2433    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
2434    The inputs must be two tensors or one tensor and one scalar.
2435    When the inputs are two tensors,
2436    dtypes of them cannot be both bool, and the shapes of them could be broadcast.
2437    When the inputs are one tensor and one scalar,
2438    the scalar could only be a constant.
2439
2440    Inputs:
2441        - **x** (Union[Tensor, Number, bool]) - The first input is a number or
2442          a bool or a tensor whose data type is number or bool.
2443        - **y** (Union[Tensor, Number, bool]) - The second input is a number or
2444          a bool when the first input is a tensor or a tensor whose data type is number or bool.
2445
2446    Outputs:
2447        Tensor, the shape is the same as the one after broadcasting,
2448        and the data type is the one with higher precision or higher digits among the two inputs.
2449
2450    Raises:
2451        TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
2452        ValueError: If `x` and `y` are not the same shape.
2453
2454    Supported Platforms:
2455        ``Ascend`` ``GPU`` ``CPU``
2456
2457    Examples:
2458        >>> # case 1 : same data type
2459        >>> x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
2460        >>> y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
2461        >>> maximum = ops.Maximum()
2462        >>> output = maximum(x, y)
2463        >>> print(output)
2464        [4. 5. 6.]
2465        >>> # case 2 : different data type
2466        >>> x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.int32)
2467        >>> y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
2468        >>> output = maximum(x, y)
2469        >>> print(output.dtype)
2470        Float32
2471    """
2472
2473    def infer_value(self, x, y):
2474        if x is not None and y is not None:
2475            x = x.asnumpy()
2476            y = y.asnumpy()
2477            out = np.maximum(x, y)
2478            out = np.array(out, x.dtype)
2479            return Tensor(out)
2480        return None
2481
2482
2483class RealDiv(_MathBinaryOp):
2484    """
2485    Divides the first input tensor by the second input tensor in floating-point type element-wise.
2486
2487    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
2488    The inputs must be two tensors or one tensor and one scalar.
2489    When the inputs are two tensors,
2490    dtypes of them cannot be both bool, and the shapes of them could be broadcast.
2491    When the inputs are one tensor and one scalar,
2492    the scalar could only be a constant.
2493
2494    .. math::
2495
2496        out_{i} = x_{i} / y_{i}
2497
2498    Inputs:
2499        - **x** (Union[Tensor, Number, bool]) - The first input is a number or
2500          a bool or a tensor whose data type is number or bool.
2501        - **y** (Union[Tensor, Number, bool]) - The second input is a number or
2502          a bool when the first input is a tensor or a tensor whose data type is number or bool.
2503
2504    Outputs:
2505        Tensor, the shape is the same as the one after broadcasting,
2506        and the data type is the one with higher precision or higher digits among the two inputs.
2507
2508    Raises:
2509        TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
2510        ValueError: If `x` and `y` are not the same shape.
2511
2512    Supported Platforms:
2513        ``Ascend`` ``GPU`` ``CPU``
2514
2515    Examples:
2516        >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
2517        >>> y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
2518        >>> realdiv = ops.RealDiv()
2519        >>> output = realdiv(x, y)
2520        >>> print(output)
2521        [0.25 0.4  0.5 ]
2522    """
2523
2524    def infer_value(self, x, y):
2525        if x is not None and y is not None:
2526            x = x.asnumpy()
2527            y = y.asnumpy()
2528            out = x / y
2529            out = np.array(out, x.dtype)
2530            return Tensor(out)
2531        return None
2532
2533
2534class Div(_MathBinaryOp):
2535    r"""
2536    Computes the quotient of dividing the first input tensor by the second input tensor element-wise.
2537
2538    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
2539    The inputs must be two tensors or one tensor and one scalar.
2540    When the inputs are two tensors,
2541    dtypes of them cannot be both bool, and the shapes of them could be broadcast.
2542    When the inputs are one tensor and one scalar,
2543    the scalar could only be a constant.
2544
2545    .. math::
2546
2547        out_{i} = \frac{x_i}{y_i}
2548
2549    Inputs:
2550        - **x** (Union[Tensor, Number, bool]) - The first input is a number or
2551          a bool or a tensor whose data type is number or bool.
2552        - **y** (Union[Tensor, Number, bool]) - When the first input is a tensor, The second input
2553          could be a number, a bool, or a tensor whose data type is number or bool. When the first input
2554          is a number or a bool, the second input must be a tensor whose data type is number or bool.
2555
2556    Outputs:
2557        Tensor, the shape is the same as the one after broadcasting,
2558        and the data type is the one with higher precision or higher digits among the two inputs.
2559
2560    Raises:
2561        TypeError: If neither `x` nor `y` is a Tensor.
2562
2563    Supported Platforms:
2564        ``Ascend`` ``GPU`` ``CPU``
2565
2566    Examples:
2567        >>> # case 1 :has same data type and shape of the two inputs
2568        >>> x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
2569        >>> y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32)
2570        >>> div = ops.Div()
2571        >>> output = div(x, y)
2572        >>> print(output)
2573        [-1.3333334  2.5        2.        ]
2574        >>> # case 2 : different data type and shape of the two inputs
2575        >>> x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.int32)
2576        >>> y = Tensor(2, mindspore.float32)
2577        >>> output = div(x, y)
2578        >>> print(output)
2579        [-2.  2.5  3.]
2580        >>> print(output.dtype)
2581        Float32
2582    """
2583
2584    def infer_value(self, x, y):
2585        if x is not None and y is not None:
2586            x = x.asnumpy()
2587            y = y.asnumpy()
2588            out = np.array(x / y, x.dtype)
2589            return Tensor(out)
2590        return None
2591
2592
2593class DivNoNan(_MathBinaryOp):
2594    """
2595    Computes a safe divide and returns 0 if the y is zero.
2596
2597    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
2598    The inputs must be two tensors or one tensor and one scalar.
2599    When the inputs are two tensors,
2600    dtypes of them cannot be both bool, and the shapes of them could be broadcast.
2601    When the inputs are one tensor and one scalar,
2602    the scalar could only be a constant.
2603
2604    Inputs:
2605        - **x** (Union[Tensor, Number, bool]) - The first input is a number or
2606          a bool or a tensor whose data type is number or bool.
2607        - **y** (Union[Tensor, Number, bool]) - The second input is a number or
2608          a bool when the first input is a tensor or a tensor whose data type is number or bool.
2609
2610    Outputs:
2611        Tensor, the shape is the same as the one after broadcasting,
2612        and the data type is the one with higher precision or higher digits among the two inputs.
2613
2614    Raises:
2615        TypeError: If neither `x` nor `y` is a Tensor.
2616
2617    Supported Platforms:
2618        ``Ascend`` ``GPU``
2619
2620    Examples:
2621        >>> x = Tensor(np.array([-1.0, 0., 1.0, 5.0, 6.0]), mindspore.float32)
2622        >>> y = Tensor(np.array([0., 0., 0., 2.0, 3.0]), mindspore.float32)
2623        >>> div_no_nan = ops.DivNoNan()
2624        >>> output = div_no_nan(x, y)
2625        >>> print(output)
2626        [0.  0.  0.  2.5 2. ]
2627    """
2628
2629    @prim_attr_register
2630    def __init__(self):
2631        """Initialize _BinaryOp"""
2632        self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
2633
2634    def infer_value(self, x, y):
2635        if x is not None and y is not None:
2636            x = x.asnumpy()
2637            y = y.asnumpy()
2638            with np.errstate(divide='ignore', invalid='ignore'):
2639                out = np.true_divide(x, y)
2640                out[~np.isfinite(out)] = 0
2641            return out
2642        return None
2643
2644
2645class MulNoNan(_MathBinaryOp):
2646    r"""
2647    Computes `x` * `y` element-wise. If `y` is zero, no matter what `x` is, it will return 0, and also
2648    If `x` is zero, no matter what `y` is, it will return 0.
2649
2650    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
2651    The inputs must be two tensors or one tensor and one scalar.
2652    When the inputs are two tensors, the shapes of them could be broadcasted.
2653    When the inputs are one tensor and one scalar, the scalar could only be a constant.
2654
2655    Note:
2656        The shapes of `x` and `y` should be the same or can be broadcasted.
2657
2658    Inputs:
2659        - **x** (Union[Tensor]) - The first input is a tensor whose data type is one of
2660          flota16, float32, int32, int64 currently or scalar.
2661        - **y** (Union[Tensor]) - The second input is a tensor whose data type is one of
2662          flota16, float32, int32, int64 currently or scalar.
2663
2664    Outputs:
2665        Tensor, the shape is the same as the shape after broadcasting,
2666        and the data type is the one with higher precision among the two inputs.
2667
2668
2669    Supported Platforms:
2670        ``Ascend``
2671
2672    Raises:
2673        TypeError: If neither `x` nor `y` is a bool Tensor.
2674
2675    Examples:
2676        >>> # case 1 : same data type and shape of two inputs, there are some 0 in y.
2677        >>> x = Tensor(np.array([[-1.0, 6.0, np.inf], [np.nan, -7.0, 4.0]]), mindspore.float32)
2678        >>> y = Tensor(np.array([[-1.0, 4.0, 0], [0, -3.0, 1.0]]), mindspore.float32)
2679        >>> mul_no_nan = ops.MulNoNan()
2680        >>> output = mul_no_nan(x, y)
2681        >>> print(output)
2682        [[ 1. 24. 0.]
2683        [ 0. 21. 4.]]
2684        >>> # case 2 : the shape of two inputs is same, there are some 0 in x, y.
2685        >>> x = Tensor(np.array([[-1.0, 6.0, 0], [0, np.nan, 4.0]]), mindspore.int32)
2686        >>> y = Tensor(np.array([[-1.0, 4.0, np.inf], [np.nan, 0, 1.0]]), mindspore.float32)
2687        >>> output = mul_no_nan(x, y)
2688        >>> print(output)
2689        [[ 1. 24. 0.]
2690         [ 0.  0. 4.]]
2691        >>> print(output.dtype)
2692        Float32
2693        >>> # case 3 : the y is a scalar.
2694        >>> x = Tensor(np.array([[-1.0, 6.0, 0], [0, np.nan, 4.0]]), mindspore.float32)
2695        >>> y = Tensor(0, mindspore.float32)
2696        >>> output = mul_no_nan(x, y)
2697        >>> print(output)
2698        [[ 0. 0. 0.]
2699         [ 0. 0. 0.]]
2700    """
2701
2702    @prim_attr_register
2703    def __init__(self):
2704        """Initialize _BinaryOp"""
2705        self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
2706
2707    def infer_value(self, x, y):
2708        if x is not None and y is not None:
2709            x = x.asnumpy()
2710            y = y.asnumpy()
2711            with np.errstate(divide='ignore', invalid='ignore'):
2712                out = np.multiply(x, y)
2713                out[y == 0] = 0
2714            return out
2715        return None
2716
2717
2718class FloorDiv(_MathBinaryOp):
2719    """
2720    Divides the first input tensor by the second input tensor element-wise and round down to the closest integer.
2721
2722    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
2723    The inputs must be two tensors or one tensor and one scalar.
2724    When the inputs are two tensors,
2725    dtypes of them cannot be both bool, and the shapes of them could be broadcast.
2726    When the inputs are one tensor and one scalar,
2727    the scalar could only be a constant.
2728
2729    .. math::
2730
2731        out_{i} = \\text{floor}( \\frac{x_i}{y_i})
2732
2733    where the :math:`floor` indicates the Floor operator, for more details, please refer to the Floor operator.
2734
2735    Inputs:
2736        - **x** (Union[Tensor, Number, bool]) - The first input is a number or
2737          a bool or a tensor whose data type is number or bool.
2738        - **y** (Union[Tensor, Number, bool]) - The second input is a number or
2739          a bool when the first input is a tensor or a tensor whose data type is number or bool.
2740
2741    Outputs:
2742        Tensor, the shape is the same as the one after broadcasting,
2743        and the data type is the one with higher precision or higher digits among the two inputs.
2744
2745    Raises:
2746        TypeError: If neither `x` nor `y` is a Tensor.
2747
2748    Supported Platforms:
2749        ``Ascend`` ``GPU`` ``CPU``
2750
2751    Examples:
2752        >>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
2753        >>> y = Tensor(np.array([3, 3, 3]), mindspore.int32)
2754        >>> floor_div = ops.FloorDiv()
2755        >>> output = floor_div(x, y)
2756        >>> print(output)
2757        [ 0  1 -1]
2758    """
2759
2760
2761class TruncateDiv(_MathBinaryOp):
2762    """
2763    Divides the first input tensor by the second input tensor element-wise for integer types, negative numbers will
2764    round fractional quantities towards zero.
2765
2766    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
2767    The inputs must be two tensors or one tensor and one scalar.
2768    When the inputs are two tensors,
2769    dtypes of them cannot be both bool, and the shapes of them could be broadcast.
2770    When the inputs are one tensor and one scalar,
2771    the scalar could only be a constant.
2772
2773    Note:
2774        Broadcasting is supported.
2775
2776    Inputs:
2777        - **x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
2778          or a tensor whose data type is number or bool.
2779        - **y** (Union[Tensor, Number, bool]) - The second input is a number, or a bool when the first input
2780          is a tensor, or a tensor whose data type is number or bool.
2781
2782    Outputs:
2783        Tensor, the shape is the same as the one after broadcasting,
2784        and the data type is the one with higher precision or higher digits among the two inputs.
2785
2786    Raises:
2787        TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
2788
2789    Supported Platforms:
2790        ``Ascend`` ``GPU``
2791
2792    Examples:
2793        >>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
2794        >>> y = Tensor(np.array([3, 3, 3]), mindspore.int32)
2795        >>> truncate_div = ops.TruncateDiv()
2796        >>> output = truncate_div(x, y)
2797        >>> print(output)
2798        [0 1 0]
2799    """
2800
2801
2802class TruncateMod(_MathBinaryOp):
2803    r"""
2804    Returns the remainder of division element-wise.
2805
2806    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
2807    The inputs must be two tensors or one tensor and one scalar.
2808    When the inputs are two tensors,
2809    dtypes of them cannot be both bool, and the shapes of them could be broadcast.
2810    When the inputs are one tensor and one scalar,
2811    the scalar could only be a constant.
2812
2813    .. warning::
2814        - The input data does not support 0.
2815        - When the elements of input exceeds 2048 , the accuracy of operator cannot guarantee the requirement of
2816          double thousandths in the mini form.
2817        - Due to different architectures, the calculation results of this operator on NPU and CPU may be inconsistent.
2818        - If shape is expressed as (D1,D2... ,Dn), then D1\*D2... \*DN<=1000000,n<=8.
2819
2820    Inputs:
2821        - **x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
2822          or a tensor whose data type is number or bool.
2823        - **y** (Union[Tensor, Number, bool]) - The second input is a number, or a bool when the first input
2824          is a tensor, or a tensor whose data type is number or bool.
2825
2826    Outputs:
2827        Tensor, the shape is the same as the one after broadcasting,
2828        and the data type is the one with higher precision or higher digits among the two inputs.
2829
2830    Raises:
2831        TypeError: If neither `x` nor `y` is one of the following: Tensor, Number, bool.
2832
2833    Supported Platforms:
2834        ``Ascend`` ``GPU``
2835
2836    Examples:
2837        >>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
2838        >>> y = Tensor(np.array([3, 3, 3]), mindspore.int32)
2839        >>> truncate_mod = ops.TruncateMod()
2840        >>> output = truncate_mod(x, y)
2841        >>> print(output)
2842        [ 2  1 -1]
2843    """
2844
2845
2846class Mod(_MathBinaryOp):
2847    r"""
2848    Computes the remainder of dividing the first input tensor by the second input tensor element-wise.
2849
2850    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
2851    The inputs must be two tensors or one tensor and one scalar. When the inputs are two tensors,
2852    both dtypes cannot be bool, and the shapes of them could be broadcast. When the inputs are one tensor
2853    and one scalar, the scalar could only be a constant.
2854
2855    .. math::
2856
2857        out_{i} = x_{i} // y_{i}
2858
2859    .. warning::
2860        - The input data does not support 0.
2861        - When the elements of input exceeds 2048 , the accuracy of operator cannot guarantee the requirement of
2862          double thousandths in the mini form.
2863        - Due to different architectures, the calculation results of this operator on NPU and CPU may be inconsistent.
2864        - If shape is expressed as (D1,D2... ,Dn), then D1\*D2... \*DN<=1000000,n<=8.
2865
2866    Inputs:
2867        - **x** (Union[Tensor, Number]) - The first input is a number or a tensor whose data type is number.
2868        - **y** (Union[Tensor, Number]) - When the first input is a tensor, The second input
2869          could be a number or a tensor whose data type is number. When the first input is a number,
2870          the second input must be a tensor whose data type is number.
2871
2872    Outputs:
2873        Tensor, the shape is the same as the one after broadcasting,
2874        and the data type is the one with higher precision or higher digits among the two inputs.
2875
2876    Raises:
2877        ValueError: When `x` and `y` are not the same dtype.
2878
2879    Supported Platforms:
2880        ``Ascend`` ``GPU`` ``CPU``
2881
2882    Examples:
2883        >>> x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
2884        >>> y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32)
2885        >>> mod = ops.Mod()
2886        >>> output = mod(x, y)
2887        >>> print(output)
2888        [-1.  1.  0.]
2889    """
2890
2891    def infer_value(self, x, y):
2892        if x is not None and y is not None:
2893            x = x.asnumpy()
2894            y = y.asnumpy()
2895            return Tensor(np.fmod(x, y))
2896        return None
2897
2898
2899class Floor(PrimitiveWithInfer):
2900    r"""
2901    Rounds a tensor down to the closest integer element-wise.
2902
2903    .. math::
2904
2905        out_i = \lfloor x_i \rfloor
2906
2907    Inputs:
2908        - **x** (Tensor) - The input tensor. Its element data type must be float.
2909          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
2910
2911    Outputs:
2912        Tensor, has the same shape as `x`.
2913
2914    Raises:
2915        TypeError: If dtype of `x` is not float.
2916
2917    Supported Platforms:
2918        ``Ascend`` ``GPU`` ``CPU``
2919
2920    Examples:
2921        >>> x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
2922        >>> floor = ops.Floor()
2923        >>> output = floor(x)
2924        >>> print(output)
2925        [ 1.  2. -2.]
2926    """
2927
2928    @prim_attr_register
2929    def __init__(self):
2930        """Initialize Floor."""
2931        self.init_prim_io_names(inputs=['x'], outputs=['y'])
2932
2933    def infer_shape(self, x_shape):
2934        return x_shape
2935
2936    def infer_dtype(self, x_dtype):
2937        validator.check_tensor_dtype_valid("x", x_dtype, mstype.float_type, self.name)
2938        return x_dtype
2939
2940
2941class FloorMod(_MathBinaryOp):
2942    r"""
2943    Computes the remainder of division element-wise. It's a flooring divide.
2944    E.g. :math:`floor(x / y) * y + mod(x, y) = x`.
2945
2946    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
2947    The inputs must be two tensors or one tensor and one scalar.
2948    When the inputs are two tensors,
2949    dtypes of them cannot be both bool , and the shapes of them could be broadcast.
2950    When the inputs are one tensor and one scalar,
2951    the scalar could only be a constant.
2952
2953    .. math::
2954
2955        out_{i} =\text{floor}(x_{i} // y_{i})
2956
2957    where the :math:`floor` indicates the Floor operator, for more details, please refer to the Floor operator.
2958
2959    .. warning::
2960        - The input data does not support 0.
2961        - When the elements of input exceeds 2048 , the accuracy of operator cannot guarantee the requirement of
2962          double thousandths in the mini form.
2963        - Due to different architectures, the calculation results of this operator on NPU and CPU may be inconsistent.
2964        - If shape is expressed as (D1,D2... ,Dn), then D1\*D2... \*DN<=1000000,n<=8.
2965
2966    Inputs:
2967        - **x** (Union[Tensor, Number, bool]) - The first input is a number or
2968          a bool or a tensor whose data type is number or bool.
2969        - **y** (Union[Tensor, Number, bool]) - The second input is a number or
2970          a bool when the first input is a tensor or a tensor whose data type is number or bool.
2971
2972    Outputs:
2973        Tensor, the shape is the same as the one after broadcasting,
2974        and the data type is the one with higher precision or higher digits among the two inputs.
2975
2976    Raises:
2977        TypeError: If neither `x` nor `y` is a Tensor.
2978
2979    Supported Platforms:
2980        ``Ascend`` ``GPU`` ``CPU``
2981
2982    Examples:
2983        >>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
2984        >>> y = Tensor(np.array([3, 3, 3]), mindspore.int32)
2985        >>> floor_mod = ops.FloorMod()
2986        >>> output = floor_mod(x, y)
2987        >>> print(output)
2988        [2 1 2]
2989    """
2990
2991
2992class Ceil(PrimitiveWithInfer):
2993    r"""
2994    Rounds a tensor up to the closest integer element-wise.
2995
2996    .. math::
2997
2998        out_i = \lceil x_i \rceil = \lfloor x_i \rfloor + 1
2999
3000    Inputs:
3001        - **x** (Tensor) - The input tensor. It's element data type must be float16 or float32.
3002          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
3003
3004    Outputs:
3005        Tensor, has the same shape as `x`.
3006
3007    Raises:
3008        TypeError: If dtype of `x` is neither float16 nor float32.
3009
3010    Supported Platforms:
3011        ``Ascend``
3012
3013    Examples:
3014        >>> x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
3015        >>> ceil_op = ops.Ceil()
3016        >>> output = ceil_op(x)
3017        >>> print(output)
3018        [ 2.  3. -1.]
3019    """
3020
3021    @prim_attr_register
3022    def __init__(self):
3023        """Initialize Ceil."""
3024        self.init_prim_io_names(inputs=['x'], outputs=['y'])
3025
3026    def infer_shape(self, x_shape):
3027        return x_shape
3028
3029    def infer_dtype(self, x_dtype):
3030        validator.check_tensor_dtype_valid("x", x_dtype, [mstype.float16, mstype.float32], self.name)
3031        return x_dtype
3032
3033
3034class Xdivy(_MathBinaryOp):
3035    """
3036    Divides the first input tensor by the second input tensor element-wise. Returns zero when `x` is zero.
3037
3038    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
3039    The inputs must be two tensors or one tensor and one scalar.
3040    When the inputs are two tensors,
3041    dtypes of them cannot be both bool, and the shapes of them could be broadcast.
3042    When the inputs are one tensor and one scalar,
3043    the scalar could only be a constant.
3044
3045    Inputs:
3046        - **x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
3047          or a tensor whose data type is float16, float32 or bool.
3048        - **y** (Union[Tensor, Number, bool]) - The second input is a number,
3049          or a bool when the first input is a tensor, or a tensor whose data type is float16, float32 or bool.
3050
3051    Outputs:
3052        Tensor, the shape is the same as the one after broadcasting,
3053        and the data type is the one with higher precision or higher digits among the two inputs.
3054
3055    Raises:
3056        TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
3057
3058    Supported Platforms:
3059        ``Ascend``
3060
3061    Examples:
3062        >>> x = Tensor(np.array([2, 4, -1]), mindspore.float32)
3063        >>> y = Tensor(np.array([2, 2, 2]), mindspore.float32)
3064        >>> xdivy = ops.Xdivy()
3065        >>> output = xdivy(x, y)
3066        >>> print(output)
3067        [ 1.   2.  -0.5]
3068    """
3069
3070    def infer_dtype(self, x_dtype, y_dtype):
3071        return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, [mstype.float16, mstype.float32], self.name)
3072
3073
3074class Xlogy(_MathBinaryOp):
3075    r"""
3076    Computes the first input tensor multiplied by the logarithm of second input tensor element-wise.
3077    Returns zero when `x` is zero.
3078
3079    .. math::
3080
3081        out_i = x_{i}\ln{y_{i}}
3082
3083    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
3084    The inputs must be two tensors or one tensor and one scalar.
3085    When the inputs are two tensors,
3086    dtypes of them cannot be both bool, and the shapes of them could be broadcast.
3087    When the inputs are one tensor and one scalar,
3088    the scalar could only be a constant.
3089
3090    Inputs:
3091        - **x** (Union[Tensor, Number, bool]) - The first input is a number or
3092          a bool or a tensor whose data type is float16, float32 or bool.
3093        - **y** (Union[Tensor, Number, bool]) - The second input is a number or
3094          a bool when the first input is a tensor or a tensor whose data type is float16, float32 or bool.
3095          The value must be positive.
3096
3097    Outputs:
3098        Tensor, the shape is the same as the one after broadcasting,
3099        and the data type is the one with higher precision or higher digits among the two inputs.
3100
3101    Raises:
3102        TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
3103
3104    Supported Platforms:
3105        ``Ascend``
3106
3107    Examples:
3108        >>> x = Tensor(np.array([-5, 0, 4]), mindspore.float32)
3109        >>> y = Tensor(np.array([2, 2, 2]), mindspore.float32)
3110        >>> xlogy = ops.Xlogy()
3111        >>> output = xlogy(x, y)
3112        >>> print(output)
3113        [-3.465736   0.        2.7725887]
3114    """
3115
3116    def infer_dtype(self, x_dtype, y_dtype):
3117        return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, [mstype.float16, mstype.float32], self.name)
3118
3119
3120class Acosh(PrimitiveWithInfer):
3121    r"""
3122    Computes inverse hyperbolic cosine of the inputs element-wise.
3123
3124    .. math::
3125
3126        out_i = \cosh^{-1}(input_i)
3127
3128    .. warning::
3129        Given an input tensor x, the function computes inverse hyperbolic cosine of every element.
3130        Input range is [1, inf].
3131
3132    Inputs:
3133        - **x** (Tensor) - The data type should be one of the following types: float16, float32.
3134          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
3135
3136    Outputs:
3137        Tensor, has the same shape and type as `x`.
3138
3139    Raises:
3140        TypeError: If `x` is not a Tensor.
3141
3142    Supported Platforms:
3143        ``Ascend`` ``GPU`` ``CPU``
3144
3145    Examples:
3146        >>> import numpy as np
3147        >>> import mindspore.ops as ops
3148        >>> from mindspore import Tensor, dtype
3149        >>> acosh = ops.Acosh()
3150        >>> x = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), dtype.float32)
3151        >>> output = acosh(x)
3152        >>> print(output)
3153        [0. 0.9624236 1.7627472 5.298292]
3154    """
3155
3156    @prim_attr_register
3157    def __init__(self):
3158        """Initialize Acosh"""
3159
3160    def infer_shape(self, x_shape):
3161        return x_shape
3162
3163    def infer_dtype(self, x_dtype):
3164        validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
3165        return x_dtype
3166
3167
3168class Cosh(PrimitiveWithInfer):
3169    r"""
3170    Computes hyperbolic cosine of input element-wise.
3171
3172    .. math::
3173
3174        out_i = \cosh(input_i)
3175
3176    Inputs:
3177        - **x** (Tensor) - The shape of tensor is
3178          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
3179
3180    Outputs:
3181        Tensor, has the same shape as `x`.
3182
3183    Raises:
3184        TypeError: If `x` is not a Tensor.
3185
3186    Supported Platforms:
3187        ``Ascend`` ``CPU``
3188
3189    Examples:
3190        >>> cosh = ops.Cosh()
3191        >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
3192        >>> output = cosh(x)
3193        >>> print(output)
3194        [1.0289385 1.364684 1.048436 1.0040528]
3195    """
3196
3197    @prim_attr_register
3198    def __init__(self):
3199        """Initialize Cosh"""
3200
3201    def infer_shape(self, x_shape):
3202        return x_shape
3203
3204    def infer_dtype(self, x_dtype):
3205        validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
3206        return x_dtype
3207
3208
3209class Asinh(PrimitiveWithInfer):
3210    r"""
3211    Computes inverse hyperbolic sine of the input element-wise.
3212
3213    .. math::
3214
3215        out_i = \sinh^{-1}(input_i)
3216
3217    Inputs:
3218        - **x** (Tensor) - The shape of tensor is
3219          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
3220          The data type should be one of the following types: float16, float32.
3221
3222    Outputs:
3223        Tensor, has the same shape and type as `x`.
3224
3225    Raises:
3226        TypeError: If `x` is not a Tensor.
3227
3228    Supported Platforms:
3229        ``Ascend`` ``GPU`` ``CPU``
3230
3231    Examples:
3232        >>> asinh = ops.Asinh()
3233        >>> x = Tensor(np.array([-5.0, 1.5, 3.0, 100.0]), mindspore.float32)
3234        >>> output = asinh(x)
3235        >>> print(output)
3236        [-2.3124385  1.1947632  1.8184465  5.298342 ]
3237    """
3238
3239    @prim_attr_register
3240    def __init__(self):
3241        """Initialize Asinh"""
3242
3243    def infer_shape(self, x_shape):
3244        return x_shape
3245
3246    def infer_dtype(self, x_dtype):
3247        validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
3248        return x_dtype
3249
3250
3251class Sinh(PrimitiveWithInfer):
3252    r"""
3253    Computes hyperbolic sine of the input element-wise.
3254
3255    .. math::
3256
3257        out_i = \sinh(input_i)
3258
3259    Inputs:
3260        - **x** (Tensor) - The shape of tensor is
3261          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
3262
3263    Outputs:
3264        Tensor, has the same shape as `x`.
3265
3266    Raises:
3267        TypeError: If `x` is not a Tensor.
3268
3269    Supported Platforms:
3270        ``Ascend`` ``CPU``
3271
3272    Examples:
3273        >>> sinh = ops.Sinh()
3274        >>> x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
3275        >>> output = sinh(x)
3276        >>> print(output)
3277        [0.6604918  0.28367308 0.44337422 0.6604918 ]
3278    """
3279
3280    @prim_attr_register
3281    def __init__(self):
3282        """Initialize Sinh"""
3283
3284    def infer_shape(self, x_shape):
3285        return x_shape
3286
3287    def infer_dtype(self, x_dtype):
3288        validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
3289        return x_dtype
3290
3291
3292class _LogicBinaryOp(_BinaryOp):
3293    """
3294    Define logic binary operators.
3295    """
3296
3297    @staticmethod
3298    def do_infer_dtype(x_dtype, y_dtype, valid_type=mstype.number_type, prim_name=None):
3299        """Staticmethod of infer dtype for _LogicBinaryOp."""
3300        args_dtype = {"x": x_dtype, "y": y_dtype}
3301        validator.check_tensors_dtypes_same_and_valid(args_dtype, valid_type, prim_name)
3302        return mstype.tensor_type(mstype.bool_)
3303
3304    def infer_dtype(self, x_dtype, y_dtype):
3305        return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, prim_name=self.name)
3306
3307
3308class Equal(_LogicBinaryOp):
3309    r"""
3310    Computes the equivalence between two tensors element-wise.
3311
3312    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
3313    The inputs must be two tensors or one tensor and one scalar.
3314    When the inputs are two tensors, the shapes of them could be broadcast.
3315    When the inputs are one tensor and one scalar, the scalar could only be a constant.
3316
3317    .. math::
3318
3319        out_{i} =\begin{cases}
3320            & \text{True,    if } x_{i} = y_{i} \\
3321            & \text{False,   if } x_{i} \ne y_{i}
3322            \end{cases}
3323
3324    Inputs:
3325        - **x** (Union[Tensor, Number]) - The first input is a number or
3326          a tensor whose data type is number.
3327        - **y** (Union[Tensor, Number]) - The second input is a number
3328          when the first input is a tensor or a tensor whose data type is number.
3329          The data type is the same as the first input.
3330
3331    Outputs:
3332        Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
3333
3334    Raises:
3335        TypeError: If neither `x` nor `y` is a Tensor.
3336
3337    Supported Platforms:
3338        ``Ascend`` ``GPU`` ``CPU``
3339
3340    Examples:
3341        >>> # case 1: The shape of two inputs are different
3342        >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
3343        >>> equal = ops.Equal()
3344        >>> output = equal(x, 2.0)
3345        >>> print(output)
3346        [False True False]
3347        >>> # case 2: The shape of two inputs are the same
3348        >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
3349        >>> y = Tensor(np.array([1, 2, 4]), mindspore.int32)
3350        >>> equal = ops.Equal()
3351        >>> output = equal(x, y)
3352        >>> print(output)
3353        [ True  True False]
3354    """
3355
3356    def infer_dtype(self, x_dtype, y_dtype):
3357        return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type + (mstype.bool_,), self.name)
3358
3359    def infer_value(self, x, y):
3360        if x is None or y is None:
3361            return None
3362        if isinstance(x, Tensor) and x.has_init:
3363            x = x.init_data()
3364        if isinstance(y, Tensor) and y.has_init:
3365            y = y.init_data()
3366        return Tensor(x.asnumpy() == y.asnumpy())
3367
3368
3369class ApproximateEqual(_LogicBinaryOp):
3370    r"""
3371    Returns True if abs(x-y) is smaller than tolerance element-wise, otherwise False.
3372
3373    .. math::
3374
3375        out_i = \begin{cases}
3376        & \text{ if } \left | x_{i} - y_{i} \right | < \text{tolerance},\ \ True  \\
3377        & \text{ if } \left | x_{i} - y_{i} \right | \ge \text{tolerance},\ \  False
3378        \end{cases}
3379
3380    where :math:`\text{tolerance}` indicates Acceptable maximum tolerance.
3381
3382    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
3383    If they have different data types, lower priority data type will be converted to
3384    relatively highest priority data type.
3385    RuntimeError exception will be thrown when the data type conversion of Parameter is required.
3386
3387    Args:
3388        tolerance (float): The maximum deviation that two elements can be considered equal. Default: 1e-05.
3389
3390    Inputs:
3391        - **x** (Tensor) - A tensor. Must be one of the following types: float32, float16.
3392          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
3393        - **y** (Tensor) - A tensor of the same type and shape as 'x'.
3394
3395    Outputs:
3396        Tensor, the shape is the same as the shape of 'x', and the data type is bool.
3397
3398    Raises:
3399        TypeError: If `tolerance` is not a float.
3400
3401    Supported Platforms:
3402        ``Ascend``
3403
3404    Examples:
3405        >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
3406        >>> y = Tensor(np.array([2, 4, 6]), mindspore.float32)
3407        >>> approximate_equal = ops.ApproximateEqual(2.)
3408        >>> output = approximate_equal(x, y)
3409        >>> print(output)
3410        [ True  True  False]
3411    """
3412
3413    @prim_attr_register
3414    def __init__(self, tolerance=1e-05):
3415        """Initialize ApproximateEqual"""
3416        validator.check_value_type("tolerance", tolerance, [float], self.name)
3417
3418    def infer_shape(self, x_shape, y_shape):
3419        validator.check("x_shape", x_shape, "y_shape", y_shape, Rel.EQ, self.name)
3420        return x_shape
3421
3422    def infer_dtype(self, x_dtype, y_dtype):
3423        args_dtype = {"x": x_dtype, "y": y_dtype}
3424        valid_type = [mstype.float32, mstype.float16]
3425        validator.check_tensors_dtypes_same_and_valid(args_dtype, valid_type, prim_name=self.name)
3426        return mstype.tensor_type(mstype.bool_)
3427
3428
3429class EqualCount(PrimitiveWithInfer):
3430    """
3431    Computes the number of the same elements of two tensors.
3432
3433    The two input tensors must have the same data type and shape.
3434
3435    Inputs:
3436        - **x** (Tensor) - The first input tensor. If the data type and shape of `y` are determined, then `x`
3437          must be the same as `y`, and vice versa.
3438          :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
3439        - **y** (Tensor) - The second input tensor. If the data type and shape of `x` are determined, then `y`
3440          must be the same as `x`, and vice versa.
3441
3442    Outputs:
3443        Tensor, with the type same as input tensor and size as (1,).
3444
3445    Raises:
3446        TypeError: If `x` or `y` is not a Tensor.
3447        ValueError: If shape of `x` is not equal to shape of `y`.
3448
3449    Supported Platforms:
3450        ``GPU`` ``CPU``
3451
3452    Examples:
3453        >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
3454        >>> y = Tensor(np.array([1, 2, 4]), mindspore.int32)
3455        >>> equal_count = ops.EqualCount()
3456        >>> output = equal_count(x, y)
3457        >>> print(output)
3458        [2]
3459    """
3460
3461    @prim_attr_register
3462    def __init__(self):
3463        """Initialize EqualCount"""
3464        self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
3465
3466    def infer_shape(self, x_shape, y_shape):
3467        validator.check("x_shape", x_shape, "y_shape", y_shape, Rel.EQ, self.name)
3468        output_shape = (1,)
3469        return output_shape
3470
3471    def infer_dtype(self, x_dtype, y_dtype):
3472        args = {'x': x_dtype, 'y': y_dtype}
3473        validator.check_tensors_dtypes_same_and_valid(args, mstype.number_type + (mstype.bool_,), self.name)
3474        return x_dtype
3475
3476
3477class NotEqual(_LogicBinaryOp):
3478    r"""
3479    Computes the non-equivalence of two tensors element-wise.
3480
3481    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
3482    The inputs must be two tensors or one tensor and one scalar.
3483    When the inputs are two tensors, the shapes of them could be broadcast.
3484    When the inputs are one tensor and one scalar, the scalar could only be a constant.
3485
3486    .. math::
3487
3488        out_{i} =\begin{cases}
3489            & \text{True,    if } x_{i} \ne y_{i} \\
3490            & \text{False,   if } x_{i} = y_{i}
3491            \end{cases}
3492
3493    Inputs:
3494        - **x** (Union[Tensor, Number, bool]) - The first input is a number or
3495          a bool or a tensor whose data type is number or bool.
3496        - **y** (Union[Tensor, Number, bool]) - The second input is a number or
3497          a bool when the first input is a tensor or a tensor whose data type is number or bool.
3498
3499    Outputs:
3500        Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
3501
3502    Raises:
3503        TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
3504        TypeError: If neither `x` nor `y` is a Tensor.
3505
3506    Supported Platforms:
3507        ``Ascend`` ``GPU`` ``CPU``
3508
3509    Examples:
3510        >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
3511        >>> not_equal = ops.NotEqual()
3512        >>> output = not_equal(x, 2.0)
3513        >>> print(output)
3514        [ True False  True]
3515        >>>
3516        >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
3517        >>> y = Tensor(np.array([1, 2, 4]), mindspore.int32)
3518        >>> not_equal = ops.NotEqual()
3519        >>> output = not_equal(x, y)
3520        >>> print(output)
3521        [False False  True]
3522    """
3523
3524    def infer_dtype(self, x_dtype, y_dtype):
3525        return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type + (mstype.bool_,), self.name)
3526
3527
3528class Greater(_LogicBinaryOp):
3529    r"""
3530    Computes the boolean value of :math:`x > y` element-wise.
3531
3532    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
3533    The inputs must be two tensors or one tensor and one scalar.
3534    When the inputs are two tensors,
3535    dtypes of them cannot be both bool, and the shapes of them could be broadcast.
3536    When the inputs are one tensor and one scalar,
3537    the scalar could only be a constant.
3538
3539    .. math::
3540
3541        out_{i} =\begin{cases}
3542            & \text{True,    if } x_{i}>y_{i} \\
3543            & \text{False,   if } x_{i}<=y_{i}
3544            \end{cases}
3545
3546    Note:
3547        Broadcasting is supported.
3548
3549    Inputs:
3550        - **x** (Union[Tensor, Number, bool]) - The first input is a number or
3551          a bool or a tensor whose data type is number or bool.
3552        - **y** (Union[Tensor, Number, bool]) - The second input is a number or
3553          a bool when the first input is a tensor or a tensor whose data type is number or bool.
3554
3555    Outputs:
3556        Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
3557
3558    Raises:
3559        TypeError: If neither `x` nor `y` is a Tensor.
3560
3561    Supported Platforms:
3562        ``Ascend`` ``GPU`` ``CPU``
3563
3564    Examples:
3565        >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
3566        >>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
3567        >>> greater = ops.Greater()
3568        >>> output = greater(x, y)
3569        >>> print(output)
3570        [False  True False]
3571    """
3572
3573    def infer_value(self, x, y):
3574        if x is not None and y is not None:
3575            x = x.asnumpy()
3576            y = y.asnumpy()
3577            out = np.array(np.greater(x, y))
3578            return Tensor(out)
3579        return None
3580
3581
3582class GreaterEqual(_LogicBinaryOp):
3583    r"""
3584    Computes the boolean value of :math:`x >= y` element-wise.
3585
3586    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
3587    The inputs must be two tensors or one tensor and one scalar.
3588    When the inputs are two tensors,
3589    dtypes of them cannot be both bool, and the shapes of them could be broadcast.
3590    When the inputs are one tensor and one scalar,
3591    the scalar could only be a constant.
3592
3593    .. math::
3594
3595        out_{i} =\begin{cases}
3596            & \text{True,    if } x_{i}>=y_{i} \\
3597            & \text{False,   if } x_{i}<y_{i}
3598            \end{cases}
3599
3600    Inputs:
3601        - **x** (Union[Tensor, Number, bool]) - The first input is a number or
3602          a bool or a tensor whose data type is number or bool.
3603        - **y** (Union[Tensor, Number, bool]) - The second input is a number or
3604          a bool when the first input is a tensor or a tensor whose data type is number or bool.
3605
3606    Outputs:
3607        Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
3608
3609    Raises:
3610        TypeError: If neither `x` nor `y` is a Tensor.
3611
3612    Supported Platforms:
3613        ``Ascend`` ``GPU`` ``CPU``
3614
3615    Examples:
3616        >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
3617        >>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
3618        >>> greater_equal = ops.GreaterEqual()
3619        >>> output = greater_equal(x, y)
3620        >>> print(output)
3621        [True True False]
3622    """
3623
3624    def infer_value(self, x, y):
3625        if x is not None and y is not None:
3626            x = x.asnumpy()
3627            y = y.asnumpy()
3628            out = np.array(np.greater_equal(x, y))
3629            return Tensor(out)
3630        return None
3631
3632
3633class Lerp(Primitive):
3634    """
3635    Does a linear interpolation of two tensors start and end based on a float or tensor weight.
3636
3637    If `weight` is a tensor, the shapes of three inputs need to be broadcast;
3638    If `weight` is a float, the shapes of `start` and `end` need to be broadcast.
3639
3640    .. math::
3641
3642        output_{i} = start_{i} + weight_{i} * (end_{i} - start_{i})
3643
3644    Inputs:
3645        - **start** (Tensor) - The tensor with the starting points. Data type must be float16 or float32.
3646        - **end** (Tensor) - The tensor with the ending points. Data type must be float16 or float32.
3647        - **weight** (Union[float, Tensor]) – The weight for the interpolation formula. Must be a float
3648          or a scalar tensor with float16 or float32 data type.
3649
3650    Outputs:
3651        Tensor, has the same type and shape as input `start`.
3652
3653    Raises:
3654        TypeError: If `start` or `end` is not a tensor.
3655        TypeError: If `weight` is neither float nor tensor.
3656        TypeError: If dtype of `start` or `end` is neither float16 nor float32.
3657        TypeError: If dtype of `weight` is neither float16 nor float32 when it is a tensor.
3658        TypeError: If `start` and `end` have different data types.
3659        TypeError: If `start`, `end` and `weight` have different data types when `weight` is a tensor.
3660        ValueError: If `end` could not be broadcast to a tensor with shape of `start`.
3661        ValueError: If `weight` could not be broadcast to tensors with shapes of `start` and `end` when it is a tensor.
3662
3663    Supported Platforms:
3664        ``Ascend``
3665
3666    Examples:
3667        >>> start = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
3668        >>> end = Tensor(np.array([10., 10., 10., 10.]), mindspore.float32)
3669        >>> lerp = ops.Lerp()
3670        >>> output = lerp(start, end, 0.5)
3671        >>> print(output)
3672        [5.5 6. 6.5 7. ]
3673    """
3674
3675    @prim_attr_register
3676    def __init__(self):
3677        self.init_prim_io_names(inputs=['start', 'end', 'weight'], outputs=['output'])
3678
3679
3680class Less(_LogicBinaryOp):
3681    r"""
3682    Computes the boolean value of :math:`x < y` element-wise.
3683
3684    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
3685    The inputs must be two tensors or one tensor and one scalar.
3686    When the inputs are two tensors,
3687    dtypes of them cannot be both bool, and the shapes of them could be broadcast.
3688    When the inputs are one tensor and one scalar,
3689    the scalar could only be a constant.
3690
3691    .. math::
3692
3693        out_{i} =\begin{cases}
3694            & \text{True,    if } x_{i}<y_{i} \\
3695            & \text{False,   if } x_{i}>=y_{i}
3696            \end{cases}
3697
3698    Inputs:
3699        - **x** (Union[Tensor, Number, bool]) - The first input is a number or
3700          a bool or a tensor whose data type is number or bool.
3701        - **y** (Union[Tensor, Number, bool]) - The second input is a number or
3702          a bool when the first input is a tensor or a tensor whose data type is number or bool.
3703
3704    Outputs:
3705        Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
3706
3707    Raises:
3708        TypeError: If `x` and `y` is not one of the following: Tensor, Number, bool.
3709
3710    Supported Platforms:
3711        ``Ascend`` ``GPU`` ``CPU``
3712
3713    Examples:
3714        >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
3715        >>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
3716        >>> less = ops.Less()
3717        >>> output = less(x, y)
3718        >>> print(output)
3719        [False False True]
3720    """
3721
3722    def infer_value(self, x, y):
3723        if x is not None and y is not None:
3724            x = x.asnumpy()
3725            y = y.asnumpy()
3726            out = np.array(np.less(x, y))
3727            return Tensor(out)
3728        return None
3729
3730
3731class LessEqual(_LogicBinaryOp):
3732    r"""
3733    Computes the boolean value of :math:`x <= y` element-wise.
3734
3735    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
3736    The inputs must be two tensors or one tensor and one scalar.
3737    When the inputs are two tensors,
3738    dtypes of them cannot be both bool , and the shapes of them could be broadcast.
3739    When the inputs are one tensor and one scalar,
3740    the scalar could only be a constant.
3741
3742    .. math::
3743
3744        out_{i} =\begin{cases}
3745            & \text{True,    if } x_{i}<=y_{i} \\
3746            & \text{False,   if } x_{i}>y_{i}
3747            \end{cases}
3748
3749    Inputs:
3750        - **x** (Union[Tensor, Number, bool]) - The first input is a number or
3751          a bool or a tensor whose data type is number or bool.
3752        - **y** (Union[Tensor, Number, bool]) - The second input is a number or
3753          a bool when the first input is a tensor or a tensor whose data type is number or bool.
3754
3755    Outputs:
3756        Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
3757
3758    Raises:
3759        TypeError: If neither `x` nor `y` is a Tensor.
3760
3761    Supported Platforms:
3762        ``Ascend`` ``GPU`` ``CPU``
3763
3764    Examples:
3765        >>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
3766        >>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
3767        >>> less_equal = ops.LessEqual()
3768        >>> output = less_equal(x, y)
3769        >>> print(output)
3770        [ True False  True]
3771    """
3772
3773    def infer_value(self, x, y):
3774        if x is not None and y is not None:
3775            x = x.asnumpy()
3776            y = y.asnumpy()
3777            out = np.array(np.less_equal(x, y))
3778            return Tensor(out)
3779        return None
3780
3781
3782class LogicalNot(PrimitiveWithInfer):
3783    """
3784    Computes the "logical NOT" of a tensor element-wise.
3785
3786    .. math::
3787
3788        out_{i} = \\neg x_{i}
3789
3790    Inputs:
3791        - **x** (Tensor) - The input tensor whose dtype is bool.
3792          :math:`(N,*)` where :math:`*` means,any number of additional dimensions.
3793
3794    Outputs:
3795        Tensor, the shape is the same as the `x`, and the dtype is bool.
3796
3797    Raises:
3798        TypeError: If `x` is not a Tensor.
3799
3800    Supported Platforms:
3801        ``Ascend`` ``GPU`` ``CPU``
3802
3803    Examples:
3804        >>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
3805        >>> logical_not = ops.LogicalNot()
3806        >>> output = logical_not(x)
3807        >>> print(output)
3808        [False  True False]
3809    """
3810
3811    @prim_attr_register
3812    def __init__(self):
3813        """Initialize LogicalNot"""
3814        self.init_prim_io_names(inputs=['x'], outputs=['output'])
3815
3816    def infer_shape(self, x_shape):
3817        return x_shape
3818
3819    def infer_dtype(self, x_dtype):
3820        validator.check_tensor_dtype_valid("x", x_dtype, [mstype.bool_], self.name + " or '~' operator")
3821        return mstype.tensor_type(mstype.bool_)
3822
3823    def infer_value(self, x):
3824        if x is not None:
3825            x = x.asnumpy()
3826            return Tensor(np.logical_not(x))
3827        return None
3828
3829
3830class LogicalAnd(_LogicBinaryOp):
3831    r"""
3832    Computes the "logical AND" of two tensors element-wise.
3833
3834    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
3835    The inputs must be two tensors or one tensor and one bool.
3836    When the inputs are two tensors, the shapes of them could be broadcast,
3837    and the data types of them must be bool.
3838    When the inputs are one tensor and one bool, the bool object could only be a constant,
3839    and the data type of the tensor must be bool.
3840
3841    .. math::
3842
3843        out_{i} = x_{i} \wedge y_{i}
3844
3845    Note:
3846        LogicalAnd supports broadcasting.
3847
3848    Inputs:
3849        - **x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type is bool.
3850        - **y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or
3851          a tensor whose data type is bool.
3852
3853    Outputs:
3854        Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
3855
3856    Raises:
3857        TypeError: If neither `x` nor `y` is a Tensor.
3858
3859    Supported Platforms:
3860        ``Ascend`` ``GPU`` ``CPU``
3861
3862    Examples:
3863        >>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
3864        >>> y = Tensor(np.array([True, True, False]), mindspore.bool_)
3865        >>> logical_and = ops.LogicalAnd()
3866        >>> output = logical_and(x, y)
3867        >>> print(output)
3868        [ True False False]
3869    """
3870
3871    def infer_dtype(self, x_dtype, y_dtype):
3872        return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, (mstype.bool_,), self.name)
3873
3874    def infer_value(self, x, y):
3875        if x is not None and y is not None:
3876            x = x.asnumpy()
3877            y = y.asnumpy()
3878            out = np.array(np.logical_and(x, y))
3879            return Tensor(out)
3880        return None
3881
3882
3883class LogicalOr(_LogicBinaryOp):
3884    """
3885    Computes the "logical OR" of two tensors element-wise.
3886
3887    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
3888    The inputs must be two tensors or one tensor and one bool.
3889    When the inputs are two tensors, the shapes of them could be broadcast,
3890    and the data types of them must be bool.
3891    When the inputs are one tensor and one bool, the bool object could only be a constant,
3892    and the data type of the tensor must be bool.
3893
3894    .. math::
3895
3896        out_{i} = x_{i} \\vee y_{i}
3897
3898    Note:
3899        LogicalOr supports broadcasting.
3900
3901    Inputs:
3902        - **x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type is bool.
3903        - **y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or
3904          a tensor whose data type is bool.
3905
3906    Outputs:
3907        Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
3908
3909    Raises:
3910        TypeError: If neither `x` nor `y` is a Tensor.
3911
3912    Supported Platforms:
3913        ``Ascend`` ``GPU`` ``CPU``
3914
3915    Examples:
3916        >>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
3917        >>> y = Tensor(np.array([True, True, False]), mindspore.bool_)
3918        >>> logical_or = ops.LogicalOr()
3919        >>> output = logical_or(x, y)
3920        >>> print(output)
3921        [ True  True  True]
3922    """
3923
3924    def infer_dtype(self, x_dtype, y_dtype):
3925        return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, (mstype.bool_,), self.name)
3926
3927    def infer_value(self, x, y):
3928        if x is not None and y is not None:
3929            x = x.asnumpy()
3930            y = y.asnumpy()
3931            out = np.array(np.logical_or(x, y))
3932            return Tensor(out)
3933        return None
3934
3935
3936class IsNan(PrimitiveWithInfer):
3937    r"""
3938    Determines which elements are NaN for each position.
3939
3940    .. math::
3941
3942        out_i = \begin{cases}
3943          & \text{ if } x_{i} = \text{Nan},\ \ True \\
3944          & \text{ if } x_{i} \ne  \text{Nan},\ \ False
3945        \end{cases}
3946
3947    where :math:`Nan` means not a number.
3948
3949    Inputs:
3950        - **x** (Tensor) - The input tensor.
3951          :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
3952
3953    Outputs:
3954        Tensor, has the same shape of input, and the dtype is bool.
3955
3956    Raises:
3957        TypeError: If `x` is not a Tensor.
3958
3959    Supported Platforms:
3960        ``GPU`` ``CPU``
3961
3962    Examples:
3963        >>> is_nan = ops.IsNan()
3964        >>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
3965        >>> output = is_nan(x)
3966        >>> print(output)
3967        [True False False]
3968    """
3969
3970    @prim_attr_register
3971    def __init__(self):
3972        """Initialize IsNan"""
3973        self.init_prim_io_names(inputs=['x'], outputs=['output'])
3974
3975    def infer_shape(self, x_shape):
3976        return x_shape
3977
3978    def infer_dtype(self, x_dtype):
3979        return mstype.tensor_type(mstype.bool_)
3980
3981
3982class IsInf(PrimitiveWithInfer):
3983    r"""
3984    Determines which elements are inf or -inf for each position
3985
3986    .. math::
3987
3988        out_i = \begin{cases}
3989        & \text{ if } x_{i} = \text{Inf},\ \ True \\
3990        & \text{ if } x_{i} \ne \text{Inf},\ \ False
3991        \end{cases}
3992
3993    where :math:`Inf` means not a number.
3994
3995    Inputs:
3996        - **x** (Tensor) - The input tensor.
3997          :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
3998
3999    Outputs:
4000        Tensor, has the same shape of input, and the dtype is bool.
4001
4002    Raises:
4003        TypeError: If `x` is not a Tensor.
4004
4005    Supported Platforms:
4006        ``GPU``
4007
4008    Examples:
4009        >>> is_inf = ops.IsInf()
4010        >>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
4011        >>> output = is_inf(x)
4012        >>> print(output)
4013        [False False True]
4014    """
4015
4016    @prim_attr_register
4017    def __init__(self):
4018        """Initialize IsInf"""
4019        self.init_prim_io_names(inputs=['x'], outputs=['output'])
4020
4021    def infer_shape(self, x_shape):
4022        return x_shape
4023
4024    def infer_dtype(self, x_dtype):
4025        return mstype.tensor_type(mstype.bool_)
4026
4027
4028class IsFinite(PrimitiveWithInfer):
4029    r"""
4030    Determines which elements are finite for each position.
4031
4032    .. math::
4033
4034        out_i = \begin{cases}
4035          & \text{ if } x_{i} = \text{Finite},\ \ True\  \\
4036          & \text{ if } x_{i} \ne \text{Finite},\ \ False
4037        \end{cases}
4038
4039    Inputs:
4040        - **x** (Tensor) - The input tensor.
4041          :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4042
4043    Outputs:
4044        Tensor, has the same shape of input, and the dtype is bool.
4045
4046    Raises:
4047        TypeError: If `x` is not a Tensor.
4048
4049    Supported Platforms:
4050        ``Ascend`` ``GPU`` ``CPU``
4051
4052    Examples:
4053        >>> is_finite = ops.IsFinite()
4054        >>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
4055        >>> output = is_finite(x)
4056        >>> print(output)
4057        [False  True False]
4058    """
4059
4060    @prim_attr_register
4061    def __init__(self):
4062        """Initialize IsFinite"""
4063        self.init_prim_io_names(inputs=['x'], outputs=['output'])
4064
4065    def infer_shape(self, x_shape):
4066        return x_shape
4067
4068    def infer_dtype(self, x_dtype):
4069        validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type + (mstype.bool_,), self.name)
4070        return mstype.tensor_type(mstype.bool_)
4071
4072
4073class FloatStatus(PrimitiveWithInfer):
4074    """
4075    Determines if the elements contain Not a Number(NaN), infinite or negative infinite. 0 for normal, 1 for overflow.
4076
4077    Inputs:
4078        - **x** (Tensor) - The input tensor. The data type must be float16 or float32.
4079          :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4080
4081    Outputs:
4082        Tensor, has the shape of `(1,)`, and the dtype is `mindspore.dtype.float32`.
4083
4084    Raises:
4085        TypeError: If dtype of `x` is neither float16 nor float32.
4086
4087    Supported Platforms:
4088        ``GPU``
4089
4090    Examples:
4091        >>> float_status = ops.FloatStatus()
4092        >>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
4093        >>> result = float_status(x)
4094        >>> print(result)
4095        [1.]
4096    """
4097
4098    @prim_attr_register
4099    def __init__(self):
4100        """Initialize FloatStatus"""
4101        self.init_prim_io_names(inputs=['x'], outputs=['output'])
4102
4103    def infer_shape(self, x_shape):
4104        return [1]
4105
4106    def infer_dtype(self, x_dtype):
4107        validator.check_tensor_dtype_valid('x', x_dtype, [mstype.float32, mstype.float16], self.name)
4108        return mstype.float32
4109
4110
4111class NPUAllocFloatStatus(PrimitiveWithInfer):
4112    """
4113    Allocates a flag to store the overflow status.
4114
4115    The flag is a tensor whose shape is `(8,)` and data type is `mindspore.dtype.float32`.
4116
4117    Note:
4118        Examples: see `NPUGetFloatStatus`.
4119
4120    Outputs:
4121        Tensor, has the shape of `(8,)`.
4122
4123    Supported Platforms:
4124        ``Ascend``
4125
4126    Examples:
4127        >>> alloc_status = ops.NPUAllocFloatStatus()
4128        >>> output = alloc_status()
4129        >>> print(output)
4130        [0. 0. 0. 0. 0. 0. 0. 0.]
4131    """
4132
4133    @prim_attr_register
4134    def __init__(self):
4135        """Initialize NPUAllocFloatStatus"""
4136
4137    def infer_shape(self):
4138        return [8]
4139
4140    def infer_dtype(self):
4141        return mstype.float32
4142
4143
4144class NPUGetFloatStatus(PrimitiveWithInfer):
4145    """
4146    Updates the flag which is the output tensor of `NPUAllocFloatStatus` with the latest overflow status.
4147
4148    The flag is a tensor whose shape is `(8,)` and data type is `mindspore.dtype.float32`.
4149    If the sum of the flag equals to 0, there is no overflow happened. If the sum of the flag is bigger than 0, there
4150    is overflow happened.
4151    In addition, there are strict sequencing requirements for use, i.e., before using the NPUGetFloatStatus operator,
4152    need to ensure that the NPUClearFlotStatus and your compute has been executed.
4153    We use Depend to ensure the execution order.
4154
4155    Inputs:
4156        - **x** (Tensor) - The output tensor of `NPUAllocFloatStatus`.
4157          The data type must be float16 or float32.
4158          :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should less than 8.
4159
4160    Outputs:
4161        Tensor, has the same shape as `x`. All the elements in the tensor will be zero.
4162
4163    Raises:
4164        TypeError: If `x` is not a Tensor.
4165        TypeError: If dtype of `x` is neither float16 nor float32.
4166
4167    Supported Platforms:
4168        ``Ascend``
4169
4170    Examples:
4171        >>> self.alloc_status = ops.NPUAllocFloatStatus()
4172        >>> self.get_status = ops.NPUGetFloatStatus()
4173        >>> self.clear_status = ops.NPUClearFloatStatus()
4174        >>> init = self.alloc_status()
4175        >>> init = F.Depend(init, input)  # Ensure clear_status after input
4176        >>> clear_status = self.clear_status(init)
4177        >>> input = F.Depend(input, clear_status)  # Ensure your compute after clear_status
4178        >>> output = Compute(input)
4179        >>> init = F.Depend(init, output)
4180        >>> flag = self.get_status(init)  # Ensure get_status after your compute
4181        >>> self.clear_status(init)
4182        >>> print(init)
4183        [0. 0. 0. 0. 0. 0. 0. 0.]
4184    """
4185
4186    @prim_attr_register
4187    def __init__(self):
4188        """Initialize NPUGetFloatStatus"""
4189
4190    def infer_shape(self, x_shape):
4191        cls_name = self.name
4192        validator.check_equal_int(len(x_shape), 1, "len(x_shape)", cls_name)
4193        validator.check_equal_int(x_shape[0], 8, "x_shape[0]", cls_name)
4194        return [8]
4195
4196    def infer_dtype(self, x_dtype):
4197        validator.check_tensor_dtype_valid('x', x_dtype, [mstype.float16, mstype.float32], self.name)
4198        return mstype.float32
4199
4200
4201class NPUClearFloatStatus(PrimitiveWithInfer):
4202    """
4203    Clears the flag which stores the overflow status.
4204
4205    Note:
4206        The flag is in the register on the `Ascend` device. It will be reset and can not be reused again after the
4207        `NPUClearFloatStatus` is called.
4208        In addition, there are strict sequencing requirements for use, i.e., before using the NPUGetFloatStatus
4209        operator, need to ensure that the NPUClearFlotStatus and your compute has been executed.
4210        We use Depend to ensure the execution order.
4211
4212        Examples: see `NPUGetFloatStatus`.
4213
4214    Inputs:
4215        - **x** (Tensor) - The output tensor of `NPUAllocFloatStatus`.
4216          The data type must be float16 or float32.
4217
4218    Outputs:
4219        Tensor, has the same shape as `x`. All the elements in the tensor will be zero.
4220
4221    Supported Platforms:
4222        ``Ascend``
4223
4224    Examples:
4225        >>> self.alloc_status = ops.NPUAllocFloatStatus()
4226        >>> self.get_status = ops.NPUGetFloatStatus()
4227        >>> self.clear_status = ops.NPUClearFloatStatus()
4228        >>> init = self.alloc_status()
4229        >>> init = F.Depend(init, input)  # Ensure clear_status after input
4230        >>> clear_status = self.clear_status(init)
4231        >>> input = F.Depend(input, clear_status)  # Ensure your compute after clear_status
4232        >>> output = Compute(input)
4233        >>> init = F.Depend(init, output)
4234        >>> flag = self.get_status(init)  # Ensure get_status after your compute
4235        >>> self.clear_status(init)
4236        >>> print(init)
4237        [0. 0. 0. 0. 0. 0. 0. 0.]
4238    """
4239
4240    @prim_attr_register
4241    def __init__(self):
4242        """Initialize NPUClearFloatStatus"""
4243
4244    def infer_shape(self, x_shape):
4245        cls_name = self.name
4246        validator.check_equal_int(len(x_shape), 1, "len(x_shape)", cls_name)
4247        validator.check_equal_int(x_shape[0], 8, "x_shape[0]", cls_name)
4248        return [8]
4249
4250    def infer_dtype(self, x_dtype):
4251        validator.check_tensor_dtype_valid('x', x_dtype, [mstype.float16, mstype.float32], self.name)
4252        return mstype.float32
4253
4254
4255class Cos(Primitive):
4256    r"""
4257    Computes cosine of input element-wise.
4258
4259    .. math::
4260        out_i = cos(x_i)
4261
4262    Inputs:
4263        - **x** (Tensor) - The shape of tensor is
4264          :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4265
4266    Outputs:
4267        Tensor, has the same shape as `x`.
4268
4269    Raises:
4270        TypeError: If `x` is not a Tensor.
4271
4272    Supported Platforms:
4273        ``Ascend`` ``GPU`` ``CPU``
4274
4275    Examples:
4276        >>> cos = ops.Cos()
4277        >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
4278        >>> output = cos(x)
4279        >>> print(output)
4280        [0.971338 0.67487574 0.95233357 0.9959527 ]
4281    """
4282
4283    @prim_attr_register
4284    def __init__(self):
4285        """Initialize Cos"""
4286
4287
4288class ACos(PrimitiveWithInfer):
4289    r"""
4290    Computes arccosine of input tensors element-wise.
4291
4292    .. math::
4293
4294        out_i = cos^{-1}(x_i)
4295
4296    Inputs:
4297        - **x** (Tensor) - The shape of tensor is
4298           :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4299
4300    Outputs:
4301        Tensor, has the same shape as `x`.
4302
4303    Raises:
4304        TypeError: If `x` is not a Tensor.
4305
4306    Supported Platforms:
4307        ``Ascend`` ``GPU`` ``CPU``
4308
4309    Examples:
4310        >>> acos = ops.ACos()
4311        >>> x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
4312        >>> output = acos(x)
4313        >>> print(output)
4314        [0.7377037 1.5307858 1.2661037 0.97641146]
4315    """
4316
4317    @prim_attr_register
4318    def __init__(self):
4319        """Initialize ACos"""
4320
4321    def infer_shape(self, x_shape):
4322        return x_shape
4323
4324    def infer_dtype(self, x_dtype):
4325        validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
4326        return x_dtype
4327
4328
4329class Sin(PrimitiveWithInfer):
4330    r"""
4331    Computes sine of the input element-wise.
4332
4333    .. math::
4334
4335        out_i = sin(x_i)
4336
4337    Inputs:
4338        - **x** (Tensor) - The shape of tensor is
4339           :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4340
4341    Outputs:
4342        Tensor, has the same shape as `x`.
4343
4344    Raises:
4345        TypeError: If `x` is not a Tensor.
4346
4347    Supported Platforms:
4348        ``Ascend`` ``GPU`` ``CPU``
4349
4350    Examples:
4351        >>> sin = ops.Sin()
4352        >>> x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
4353        >>> output = sin(x)
4354        >>> print(output)
4355        [0.5810352  0.27635565 0.41687083 0.5810352 ]
4356    """
4357
4358    @prim_attr_register
4359    def __init__(self):
4360        """Initialize Sin."""
4361
4362    def infer_shape(self, x_shape):
4363        return x_shape
4364
4365    def infer_dtype(self, x_dtype):
4366        validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
4367        return x_dtype
4368
4369
4370class Asin(PrimitiveWithInfer):
4371    r"""
4372    Computes arcsine of input tensors element-wise.
4373
4374    .. math::
4375
4376        out_i = sin^{-1}(x_i)
4377
4378    Inputs:
4379        - **x** (Tensor) - The shape of tensor is
4380           :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4381
4382    Outputs:
4383        Tensor, has the same shape as `x`.
4384
4385    Raises:
4386        TypeError: If `x` is not a Tensor.
4387
4388    Supported Platforms:
4389        ``Ascend`` ``GPU`` ``CPU``
4390
4391    Examples:
4392        >>> asin = ops.Asin()
4393        >>> x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
4394        >>> output = asin(x)
4395        >>> print(output)
4396        [0.8330927  0.04001068  0.30469266  0.59438497]
4397    """
4398
4399    @prim_attr_register
4400    def __init__(self):
4401        """Initialize Asin"""
4402
4403    def infer_shape(self, x_shape):
4404        return x_shape
4405
4406    def infer_dtype(self, x_dtype):
4407        validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
4408        return x_dtype
4409
4410
4411class NMSWithMask(PrimitiveWithInfer):
4412    r"""
4413    When object detection problem is performed in the computer vision field, object detection algorithm generates
4414    a plurality of bounding boxes. Selects some bounding boxes in descending order of score(Descending order is not
4415    supported in Ascend platform currently). Use the box with the highest score calculate the overlap between other
4416    boxes and the current box, and delete the box based on a certain threshold(IOU). The IOU is as follows,
4417
4418    .. math::
4419        \text{IOU} = \frac{\text{Area of Overlap}}{\text{Area of Union}}
4420
4421    .. warning::
4422        Only supports up to 2864 input boxes at one time.
4423
4424    Args:
4425        iou_threshold (float): Specifies the threshold of overlap boxes with respect to
4426            IOU. Default: 0.5.
4427
4428    Inputs:
4429        - **bboxes** (Tensor) - The shape of tensor is :math:`(N, 5)`. Input bounding boxes.
4430          `N` is the number of input bounding boxes. Every bounding box
4431          contains 5 values, the first 4 values are the coordinates(x0, y0, x1, y1) of bounding box which
4432          represents the point of top-left and bottom-right, and the last value is the score of this bounding box.
4433          The data type must be float16 or float32.
4434
4435    Outputs:
4436        tuple[Tensor], tuple of three tensors, they are selected_boxes, selected_idx and selected_mask.
4437
4438        - **selected_boxes** (Tensor) - The shape of tensor is :math:`(N, 5)`. The list of bounding boxes
4439          after non-max suppression calculation.
4440        - **selected_idx** (Tensor) - The shape of tensor is :math:`(N,)`. The indexes list of
4441          valid input bounding boxes.
4442        - **selected_mask** (Tensor) - The shape of tensor is :math:`(N,)`. A mask list of
4443          valid output bounding boxes.
4444
4445    Raises:
4446        ValueError: If the `iou_threshold` is not a float number, or if the first dimension
4447            of input Tensor is less than or equal to 0, or if the data type of the input
4448            Tensor is not float16 or float32.
4449
4450    Supported Platforms:
4451        ``Ascend`` ``GPU`` ``CPU``
4452
4453    Examples:
4454        >>> bbox = np.array([[100.0, 100.0, 50.0, 68.0, 0.63], [150.0, 75.0, 165.0, 115.0, 0.55],
4455        ...                  [12.0, 190.0, 288.0, 200.0, 0.9], [28.0, 130.0, 106.0, 172.0, 0.3]])
4456        >>> bbox[:, 2] += bbox[:, 0]
4457        >>> bbox[:, 3] += bbox[:, 1]
4458        >>> inputs = Tensor(bbox, mindspore.float32)
4459        >>> nms = ops.NMSWithMask(0.1)
4460        >>> output_boxes, indices, mask = nms(inputs)
4461        >>> indices_np = indices.asnumpy()
4462        >>> print(indices_np[mask.asnumpy()])
4463        [0 1 2]
4464    """
4465
4466    @prim_attr_register
4467    def __init__(self, iou_threshold=0.5):
4468        """Initialize NMSWithMask"""
4469        validator.check_value_type("iou_threshold", iou_threshold, [float], self.name)
4470        self.init_prim_io_names(inputs=['bboxes'], outputs=['selected_boxes', 'selected_idx', 'selected_mask'])
4471        self.is_ge = context.get_context("enable_ge")
4472
4473    def infer_shape(self, bboxes_shape):
4474        cls_name = self.name
4475        validator.check_equal_int(len(bboxes_shape), 2, "bboxes rank", cls_name)
4476        validator.check_positive_int(bboxes_shape[0], "bboxes.shape[0]", cls_name)
4477        validator.check_equal_int(bboxes_shape[1], 5, "bboxes.shape[1]", cls_name)
4478        num = bboxes_shape[0]
4479        return bboxes_shape, (num,), (num,)
4480
4481    def infer_dtype(self, bboxes_dtype):
4482        validator.check_tensor_dtype_valid("bboxes", bboxes_dtype, [mstype.float16, mstype.float32], self.name)
4483        return bboxes_dtype, mstype.int32, mstype.bool_
4484
4485
4486class Abs(Primitive):
4487    r"""
4488    Returns absolute value of a tensor element-wise.
4489
4490    .. math::
4491
4492        out_i = |x_i|
4493
4494    Inputs:
4495        - **x** (Tensor) - The input tensor. The shape of tensor is
4496          :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4497
4498    Outputs:
4499        Tensor, has the same shape as the `x`.
4500
4501    Raises:
4502        TypeError: If `x` is not a Tensor.
4503
4504    Supported Platforms:
4505        ``Ascend`` ``GPU`` ``CPU``
4506
4507    Examples:
4508        >>> x = Tensor(np.array([-1.0, 1.0, 0.0]), mindspore.float32)
4509        >>> abs = ops.Abs()
4510        >>> output = abs(x)
4511        >>> print(output)
4512        [1. 1. 0.]
4513    """
4514
4515    @prim_attr_register
4516    def __init__(self):
4517        """Initialize Abs"""
4518        self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
4519
4520
4521class Sign(PrimitiveWithInfer):
4522    r"""
4523    Performs sign on the tensor element-wise.
4524
4525    .. math::
4526        sign(x) = \begin{cases} -1, &if\ x < 0 \cr
4527        0, &if\ x = 0 \cr
4528        1, &if\ x > 0\end{cases}
4529
4530    Inputs:
4531        - **x** (Tensor) - The input tensor.
4532          :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4533
4534    Outputs:
4535        Tensor, has the same shape and type as the `x`.
4536
4537    Raises:
4538        TypeError: If `x` is not a Tensor.
4539
4540    Supported Platforms:
4541        ``Ascend`` ``CPU`` ``GPU``
4542
4543    Examples:
4544         >>> x = Tensor(np.array([[2.0, 0.0, -1.0]]), mindspore.float32)
4545         >>> sign = ops.Sign()
4546         >>> output = sign(x)
4547         >>> print(output)
4548         [[ 1.  0. -1.]]
4549    """
4550
4551    @prim_attr_register
4552    def __init__(self):
4553        pass
4554
4555    def infer_shape(self, x_shape):
4556        return x_shape
4557
4558    def infer_dtype(self, x_dtype):
4559        validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
4560        return x_dtype
4561
4562
4563class Round(PrimitiveWithInfer):
4564    r"""
4565    Returns half to even of a tensor element-wise.
4566
4567    .. math::
4568
4569        out_i \approx x_i
4570
4571    Inputs:
4572        - **x** (Tensor) - The input tensor.
4573
4574    Outputs:
4575        Tensor, has the same shape and type as the `x`.
4576
4577    Raises:
4578        TypeError: If `x` is not a Tensor.
4579
4580    Supported Platforms:
4581        ``Ascend`` ``GPU`` ``CPU``
4582
4583    Examples:
4584         >>> x = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]), mindspore.float32)
4585         >>> round = ops.Round()
4586         >>> output = round(x)
4587         >>> print(output)
4588         [ 1.  2.  2.  2. -4.]
4589    """
4590
4591    @prim_attr_register
4592    def __init__(self):
4593        """Initialize Round"""
4594        self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
4595
4596    def infer_shape(self, x_shape):
4597        return x_shape
4598
4599    def infer_dtype(self, x_dtype):
4600        validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
4601        return x_dtype
4602
4603
4604class Tan(PrimitiveWithInfer):
4605    r"""
4606    Computes tangent of `x` element-wise.
4607
4608    .. math::
4609
4610        out_i = tan(x_i)
4611
4612    Inputs:
4613        - **x** (Tensor) - The shape of tensor is
4614          :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4615          Data type must be float16, float32 or int32.
4616
4617    Outputs:
4618        Tensor, has the same shape as `x`.
4619
4620    Raises:
4621        TypeError: If dtype of `x` is not one of the following: float16, float32, int32.
4622        TypeError: If `x` is not a Tensor.
4623
4624    Supported Platforms:
4625        ``Ascend`` ``CPU``
4626
4627    Examples:
4628        >>> tan = ops.Tan()
4629        >>> x = Tensor(np.array([-1.0, 0.0, 1.0]), mindspore.float32)
4630        >>> output = tan(x)
4631        >>> print(output)
4632        [-1.5574081 0. 1.5574081]
4633    """
4634
4635    @prim_attr_register
4636    def __init__(self):
4637        """Initialize Tan"""
4638
4639    def infer_shape(self, x_shape):
4640        return x_shape
4641
4642    def infer_dtype(self, x_type):
4643        valid_dtypes = [mstype.float16, mstype.float32, mstype.int32]
4644        validator.check_tensor_dtype_valid('x', x_type, valid_dtypes, self.name)
4645        return x_type
4646
4647
4648class Atan(PrimitiveWithInfer):
4649    r"""
4650    Computes the trigonometric inverse tangent of the input element-wise.
4651
4652    .. math::
4653
4654        out_i = tan^{-1}(x_i)
4655
4656    Inputs:
4657        - **x** (Tensor): The shape of tensor is
4658          :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4659          The data type should be one of the following types: float16, float32.
4660
4661    Outputs:
4662        A Tensor, has the same type as the input.
4663
4664    Raises:
4665        TypeError: If `x` is not a Tensor.
4666
4667    Supported Platforms:
4668        ``Ascend`` ``GPU`` ``CPU``
4669
4670    Examples:
4671        >>> x = Tensor(np.array([1.0, 0.0]), mindspore.float32)
4672        >>> atan = ops.Atan()
4673        >>> output = atan(x)
4674        >>> print(output)
4675        [0.7853982 0.       ]
4676    """
4677
4678    @prim_attr_register
4679    def __init__(self):
4680        pass
4681
4682    def infer_shape(self, x_shape):
4683        return x_shape
4684
4685    def infer_dtype(self, x_type):
4686        validator.check_tensor_dtype_valid('x', x_type, mstype.number_type, self.name)
4687        return x_type
4688
4689
4690class Atanh(PrimitiveWithInfer):
4691    r"""
4692    Computes inverse hyperbolic tangent of the input element-wise.
4693
4694    .. math::
4695
4696        out_i = \tanh^{-1}(x_{i})
4697
4698    Inputs:
4699        - **x** (Tensor): The shape of tensor is
4700          :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4701
4702    Outputs:
4703        A Tensor, has the same type as the input.
4704
4705    Raises:
4706        TypeError: If `x` is not a Tensor.
4707
4708    Supported Platforms:
4709        ``Ascend`` ``CPU``
4710
4711    Examples:
4712        >>> x = Tensor(np.array([0, -0.5]), mindspore.float32)
4713        >>> atanh = ops.Atanh()
4714        >>> output = atanh(x)
4715        >>> print(output)
4716        [0. -0.54930614]
4717    """
4718
4719    @prim_attr_register
4720    def __init__(self):
4721        pass
4722
4723    def infer_shape(self, x_shape):
4724        return x_shape
4725
4726    def infer_dtype(self, x_type):
4727        validator.check_tensor_dtype_valid('x', x_type, mstype.number_type, self.name)
4728        return x_type
4729
4730
4731class Atan2(_MathBinaryOp):
4732    r"""
4733    Returns arctangent of x/y element-wise.
4734
4735    It returns :math:`\theta\ \in\ [-\pi, \pi]`
4736    such that :math:`x = r*\sin(\theta), y = r*\cos(\theta)`, where :math:`r = \sqrt{x^2 + y^2}`.
4737
4738    Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
4739    If they have different data types, lower priority data type will be converted to
4740    relatively highest priority data type.
4741    RuntimeError exception will be thrown when the data type conversion of Parameter is required.
4742
4743    Inputs:
4744        - **x** (Tensor) - The input tensor.
4745          :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4746          The data type will give priority to the high-precision data type
4747        - **y** (Tensor) - The input tensor.
4748          It has the same shape with `x`. The data type will give priority to the high-precision data type.
4749
4750    Outputs:
4751        Tensor, the shape is the same as the one after broadcasting,and the data type is same as `x`.
4752
4753    Raises:
4754        TypeError: If `x` or `y` is not a Tensor.
4755
4756    Supported Platforms:
4757        ``Ascend`` ``CPU`` ``GPU``
4758
4759    Examples:
4760        >>> x = Tensor(np.array([0, 1]), mindspore.float32)
4761        >>> y = Tensor(np.array([1, 1]), mindspore.float32)
4762        >>> atan2 = ops.Atan2()
4763        >>> output = atan2(x, y)
4764        >>> print(output)
4765        [0.        0.7853982]
4766    """
4767
4768
4769class SquareSumAll(PrimitiveWithInfer):
4770    r"""
4771    Returns the square sum of a tensor element-wise
4772
4773    .. math::
4774
4775        \left\{\begin{matrix}out_{x} = {\textstyle \sum_{0}^{N}} (x_{i})^2
4776        \\out_{y} = {\textstyle \sum_{0}^{N}} (y_{i})^2
4777        \end{matrix}\right.
4778
4779    Inputs:
4780        - **x** (Tensor) - The input tensor. The data type must be float16 or float32.
4781          :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4782        - **y** (Tensor) - The input tensor has the same type and shape as the `x`.
4783
4784    Note:
4785        SquareSumAll only supports float16 and float32 data type.
4786
4787    Outputs:
4788        - **output_y1** (Tensor) - The same type as the `x`.
4789        - **output_y2** (Tensor) - The same type as the `x`.
4790
4791    Raises:
4792        TypeError: If neither `x` nor `y` is a Tensor.
4793        ValueError: If `x` and `y` are not the same shape.
4794
4795    Supported Platforms:
4796        ``Ascend`` ``GPU``
4797
4798    Examples:
4799        >>> x = Tensor(np.array([0, 0, 2, 0]), mindspore.float32)
4800        >>> y = Tensor(np.array([0, 0, 2, 4]), mindspore.float32)
4801        >>> square_sum_all = ops.SquareSumAll()
4802        >>> output = square_sum_all(x, y)
4803        >>> print(output)
4804        (Tensor(shape=[], dtype=Float32, value= 4),
4805         Tensor(shape=[], dtype=Float32, value= 20))
4806    """
4807
4808    @prim_attr_register
4809    def __init__(self):
4810        """Initialize SquareSumAll"""
4811
4812    def infer_shape(self, x_shape, y_shape):
4813        validator.check("x1_shape", x_shape, "x2_shape", y_shape, Rel.EQ, self.name)
4814        return [], []
4815
4816    def infer_dtype(self, x_type, y_type):
4817        valid_types = (mstype.float16, mstype.float32)
4818        args = {"x1_type": x_type, "x2_type": y_type}
4819        validator.check_tensors_dtypes_same_and_valid(args, valid_types, self.name)
4820        return x_type, y_type
4821
4822
4823class BitwiseAnd(_BitwiseBinaryOp):
4824    r"""
4825    Returns bitwise `and` of two tensors element-wise.
4826
4827    .. math::
4828
4829        out_i = x_{i} \wedge y_{i}
4830
4831    Inputs of `x` and `y` comply with the implicit type conversion rules to
4832    make the data types consistent.
4833    If they have different data types, lower priority data type will be converted to
4834    relatively highest priority data type.
4835    RuntimeError exception will be thrown when the data type conversion of Parameter is required.
4836
4837    Inputs:
4838        - **x** (Tensor) - The input tensor with int16, int32 or uint16 data type.
4839          :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4840        - **y** (Tensor) - The input tensor with same type as the `x`.
4841
4842    Outputs:
4843        Tensor, has the same type as the `x`.
4844
4845    Raises:
4846        TypeError: If `x` or `y` is not a Tensor.
4847
4848    Supported Platforms:
4849        ``Ascend``
4850
4851    Examples:
4852        >>> x = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
4853        >>> y = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
4854        >>> bitwise_and = ops.BitwiseAnd()
4855        >>> output = bitwise_and(x, y)
4856        >>> print(output)
4857        [ 0  0  1 -1  1  0  1]
4858    """
4859
4860
4861class BitwiseOr(_BitwiseBinaryOp):
4862    r"""
4863    Returns bitwise `or` of two tensors element-wise.
4864
4865    .. math::
4866
4867        out_i = x_{i} \mid y_{i}
4868
4869    Inputs of `x` and `y` comply with the implicit type conversion rules to
4870    make the data types consistent.
4871    If they have different data types, lower priority data type will be converted to
4872    relatively highest priority data type.
4873    RuntimeError exception will be thrown when the data type conversion of Parameter is required.
4874
4875    Inputs:
4876        - **x** (Tensor) - The input tensor with int16, int32 or uint16 data type.
4877        - **y** (Tensor) - The input tensor with same type as the `x`.
4878
4879    Outputs:
4880        Tensor, has the same type as the `x`.
4881
4882    Raises:
4883        TypeError: If `x` or `y` is not a Tensor.
4884
4885    Supported Platforms:
4886        ``Ascend``
4887
4888    Examples:
4889        >>> x = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
4890        >>> y = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
4891        >>> bitwise_or = ops.BitwiseOr()
4892        >>> output = bitwise_or(x, y)
4893        >>> print(output)
4894        [ 0  1  1 -1 -1  3  3]
4895    """
4896
4897
4898class BitwiseXor(_BitwiseBinaryOp):
4899    r"""
4900    Returns bitwise `xor` of two tensors element-wise.
4901
4902    .. math::
4903
4904        out_i = x_{i} \oplus y_{i}
4905
4906    Inputs of `x` and `y` comply with the implicit type conversion rules to
4907    make the data types consistent.
4908    If they have different data types, lower priority data type will be converted to
4909    relatively highest priority data type.
4910    RuntimeError exception will be thrown when the data type conversion of Parameter is required.
4911
4912    Inputs:
4913        - **x** (Tensor) - The input tensor with int16, int32 or uint16 data type.
4914        - **y** (Tensor) - The input tensor with same type as the `x`.
4915
4916    Outputs:
4917        Tensor, has the same type as the `x`.
4918
4919    Raises:
4920        TypeError: If `x` or `y` is not a Tensor.
4921
4922    Supported Platforms:
4923        ``Ascend``
4924
4925    Examples:
4926        >>> x = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
4927        >>> y = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
4928        >>> bitwise_xor = ops.BitwiseXor()
4929        >>> output = bitwise_xor(x, y)
4930        >>> print(output)
4931        [ 0  1  0  0 -2  3  2]
4932    """
4933
4934
4935class BesselI0e(PrimitiveWithInfer):
4936    """
4937    Computes BesselI0e of input element-wise.
4938
4939    Inputs:
4940        - **x** (Tensor) - The shape of tensor is
4941          :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4942          Data type must be float16 or float32.
4943
4944    Outputs:
4945        Tensor, has the same shape as `x`.
4946
4947    Raises:
4948        TypeError: If `x` is not a Tensor.
4949
4950    Supported Platforms:
4951        ``Ascend``
4952
4953    Examples:
4954        >>> bessel_i0e = ops.BesselI0e()
4955        >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
4956        >>> output = bessel_i0e(x)
4957        >>> print(output)
4958        [0.7979961  0.5144438  0.75117415  0.9157829 ]
4959    """
4960
4961    @prim_attr_register
4962    def __init__(self):
4963        """Initialize BesselI0e"""
4964
4965    def infer_shape(self, x):
4966        return x
4967
4968    def infer_dtype(self, x):
4969        validator.check_tensor_dtype_valid('x', x, mstype.number_type, self.name)
4970        return x
4971
4972
4973class BesselI1e(PrimitiveWithInfer):
4974    """
4975    Computes BesselI1e of input element-wise.
4976
4977    Inputs:
4978        - **x** (Tensor) - The shape of tensor is
4979          :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
4980          Data type must be float16 or float32.
4981
4982    Outputs:
4983        Tensor, has the same shape as `x`.
4984
4985    Raises:
4986        TypeError: If `x` is not a Tensor.
4987
4988    Supported Platforms:
4989        ``Ascend``
4990
4991    Examples:
4992        >>> bessel_i1e = ops.BesselI1e()
4993        >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
4994        >>> output = bessel_i1e(x)
4995        >>> print(output)
4996        [0.09507662 0.19699717 0.11505538 0.04116856]
4997    """
4998
4999    @prim_attr_register
5000    def __init__(self):
5001        """Initialize BesselI1e"""
5002
5003    def infer_shape(self, x):
5004        return x
5005
5006    def infer_dtype(self, x):
5007        validator.check_tensor_dtype_valid('x', x, mstype.number_type, self.name)
5008        return x
5009
5010
5011class Inv(PrimitiveWithInfer):
5012    r"""
5013    Computes Inv(Reciprocal) of input tensor element-wise.
5014
5015    .. math::
5016
5017        out_i = out_i = \frac{1}{x_{i} }
5018
5019    Inputs:
5020        - **x** (Tensor) - The shape of tensor is
5021          :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5022          Must be one of the following types: float16, float32, int32.
5023
5024    Outputs:
5025        Tensor, has the same shape and data type as `x`.
5026
5027    Raises:
5028        TypeError: If dtype of `x` is not one of float16, float32, int32.
5029
5030    Supported Platforms:
5031        ``Ascend``
5032
5033    Examples:
5034        >>> inv = ops.Inv()
5035        >>> x = Tensor(np.array([0.25, 0.4, 0.31, 0.52]), mindspore.float32)
5036        >>> output = inv(x)
5037        >>> print(output)
5038        [4.        2.5       3.2258065 1.923077 ]
5039    """
5040
5041    @prim_attr_register
5042    def __init__(self):
5043        pass
5044
5045    def infer_shape(self, x_shape):
5046        return x_shape
5047
5048    def infer_dtype(self, x_dtype):
5049        validator.check_tensor_dtype_valid('x_dtype', x_dtype, [mstype.float16, mstype.float32,
5050                                                                mstype.int32], self.name)
5051        return x_dtype
5052
5053
5054class Invert(PrimitiveWithInfer):
5055    r"""
5056    Flips all bits of input tensor element-wise.
5057
5058    .. math::
5059
5060        out_i = -x_{i}
5061
5062    Inputs:
5063        - **x** (Tensor[int16], Tensor[uint16]) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
5064
5065    Outputs:
5066        Tensor, has the same shape as `x`.
5067
5068    Raises:
5069        TypeError: If dtype of `x` is neither int16 nor uint16.
5070
5071    Supported Platforms:
5072        ``Ascend``
5073
5074    Examples:
5075        >>> invert = ops.Invert()
5076        >>> x = Tensor(np.array([25, 4, 13, 9]), mindspore.int16)
5077        >>> output = invert(x)
5078        >>> print(output)
5079        [-26 -5 -14 -10]
5080    """
5081
5082    @prim_attr_register
5083    def __init__(self):
5084        pass
5085
5086    def infer_shape(self, x_shape):
5087        return x_shape
5088
5089    def infer_dtype(self, x_dtype):
5090        validator.check_tensor_dtype_valid('x_dtype', x_dtype, [mstype.int16, mstype.uint16], self.name)
5091        return x_dtype
5092
5093
5094class Eps(PrimitiveWithInfer):
5095    """
5096    Creates a tensor filled with `x` dtype minimum value.
5097
5098    Inputs:
5099        - **x** (Tensor) - Input tensor. The data type must be float16 or float32.
5100          :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
5101
5102    Outputs:
5103        Tensor, has the same type and shape as `x`, but filled with `x` dtype minimum val.
5104
5105    Supported Platforms:
5106        ``Ascend`` ``GPU`` ``CPU``
5107
5108    Examples:
5109        >>> x = Tensor([4, 1, 2, 3], mindspore.float32)
5110        >>> output = ops.Eps()(x)
5111        >>> print(output)
5112        [1.5258789e-05 1.5258789e-05 1.5258789e-05 1.5258789e-05]
5113    """
5114
5115    @prim_attr_register
5116    def __init__(self):
5117        """Initialize Eps"""
5118        self.init_prim_io_names(inputs=['input_x'], outputs=['y'])
5119
5120    def __infer__(self, input_x):
5121        valid_dtypes = [mstype.float16, mstype.float32]
5122        validator.check_tensor_dtype_valid('input_x', input_x['dtype'], valid_dtypes, self.name)
5123
5124        x_nptype = mstype.dtype_to_nptype(input_x['dtype'].element_type())
5125        if x_nptype == np.float16:
5126            min_val = 2 ** (-14)
5127        else:
5128            min_val = 2 ** (-16)
5129
5130        res = np.full(input_x['shape'], min_val, x_nptype)
5131        out = {
5132            'value': Tensor(res),
5133            'shape': input_x['shape'],
5134            'dtype': input_x['dtype'],
5135        }
5136        return out
5137
5138
5139class LinSpace(PrimitiveWithInfer):
5140    r"""
5141    The OP returns a Tensor whose value is num evenly spaced in the interval start and stop (including start and stop),
5142    and the length of the output Tensor is num.
5143
5144    .. math::
5145        \begin{aligned}
5146        &step = (stop - start)/(num - 1)\\
5147        &output = [start, start+step, start+2*step, ... , stop]
5148        \end{aligned}
5149
5150    Inputs:
5151        - **start** (Tensor[float32]) - Start value of interval, With shape of 0-D.
5152        - **stop** (Tensor[float32]) - Last value of interval, With shape of 0-D.
5153        - **num** (int) - Number of ticks in the interval, inclusive of start and stop.
5154
5155    Outputs:
5156        Tensor, has the same shape as `start`.
5157
5158    Supported Platforms:
5159        ``Ascend`` ``GPU``
5160
5161    Examples:
5162        >>> linspace = ops.LinSpace()
5163        >>> start = Tensor(1, mindspore.float32)
5164        >>> stop = Tensor(10, mindspore.float32)
5165        >>> num = 5
5166        >>> output = linspace(start, stop, num)
5167        >>> print(output)
5168        [ 1.    3.25  5.5   7.75 10.  ]
5169    """
5170
5171    @prim_attr_register
5172    def __init__(self):
5173        """Initialize LinSpace"""
5174
5175    def __infer__(self, start, stop, num):
5176        args = {"start": start['dtype'], "stop": start['dtype']}
5177        validator.check_tensors_dtypes_same_and_valid(args, (mstype.float32,), self.name)
5178        start_shape = start['shape']
5179        stop_shape = stop['shape']
5180        validator.check_equal_int(len(start_shape), 0, "rank of start_shape", self.name)
5181        validator.check_equal_int(len(stop_shape), 0, "rank of stop_shape", self.name)
5182        num_v = num['value']
5183        validator.check_value_type('num', num_v, [int], self.name)
5184        validator.check_positive_int(num_v, "num", self.name)
5185        out_shape = [num_v]
5186        out = {'shape': out_shape,
5187               'dtype': start['dtype'],
5188               'value': None}
5189        return out
5190
5191
5192class MatrixInverse(PrimitiveWithInfer):
5193    """
5194    Returns the inverse of the input matrix. If the matrix is irreversible, an error may be reported or an unknown
5195    result may be returned.
5196
5197    Note:
5198        The parameter 'adjoint' is only supporting False right now. Because complex number is not supported at present.
5199
5200    Args:
5201        adjoint (bool) : An optional bool. Default: False.
5202
5203    Inputs:
5204        - **x** (Tensor) - A matrix to be calculated. The matrix must be at least two dimensions, and the last two
5205          dimensions must be the same size. types: float32, float64.
5206
5207    Outputs:
5208        Tensor, has the same type and shape as input `x`.
5209
5210    Raises:
5211        TypeError: If `adjoint` is not a bool.
5212        TypeError: If dtype of `x` is neither float32 nor float64.
5213        ValueError: If the last two dimensions of `x` is not same size.
5214        ValueError: If the dimension of `x` is less than 2.
5215
5216    Supported Platforms:
5217        ``GPU``
5218
5219    Examples:
5220        >>> x = Tensor(np.array([[[-0.710504  , -1.1207525],
5221        ...                       [-1.7651395 , -1.7576632]],
5222        ...                      [[ 0.52412605,  1.9070215],
5223        ...                       [ 1.3384849 ,  1.4274558]]]), mindspore.float32)
5224        >>> matrix_inverse = ops.MatrixInverse(adjoint=False)
5225        >>> output = matrix_inverse(x)
5226        >>> print(output)
5227        [[[ 2.4095483  -1.536419  ]
5228          [-2.4197974   0.97401696]]
5229         [[-0.79111797  1.0569006 ]
5230          [ 0.74180895 -0.2904787 ]]]
5231    """
5232
5233    @prim_attr_register
5234    def __init__(self, adjoint=False):
5235        """Initialize MatrixInverse"""
5236        validator.check_type_name("adjoint", adjoint, False, self.name)
5237        self.adjoint = adjoint
5238
5239    def infer_dtype(self, x_dtype):
5240        valid_type = [mstype.float32, mstype.double]
5241        validator.check_tensor_dtype_valid("x_dtype", x_dtype, valid_type, self.name)
5242        return x_dtype
5243
5244    def infer_shape(self, x_shape):
5245        validator.check_int(len(x_shape), 2, Rel.GE, self.name, None)
5246        validator.check_equal_int(x_shape[-1], x_shape[-2], self.name, None)
5247        return x_shape
5248
5249
5250class IndexAdd(Primitive):
5251    """
5252    Adds tensor y to specified axis and indices of tensor x. The axis should be in the range from 0 to len(x.dim) - 1,
5253    and indices should be in the range from 0 to the size of x at the axis dimension.
5254
5255    Args:
5256        axis (int): The dimension along which to index.
5257
5258    Inputs:
5259        - **x** (Parameter) - The input tensor to add to.
5260        - **indices** (Tensor) - The index of `x` on the `axis` th dimension to add to, with data type int32.
5261          The `indices` must be 1D with the same size as the size of the `axis` th dimension of `y`. The values
5262          of `indices` should be in the range of 0 to the size of the `axis` th dimension of `x`.
5263        - **y** (Tensor) - The input tensor with the value to add. Must have same data type as `x`.
5264          The shape must be the same as `x` except the `axis` th dimension.
5265
5266    Outputs:
5267        Tensor, has the same shape and dtype as x.
5268
5269    Raises:
5270        TypeError: If `x` is not a Tensor.
5271        TypeError: If neither `indices` nor `y` is a Tensor.
5272        ValueError: If axis is out of `x` rank's range.
5273        ValueError: If `x` rank is not the same as `y` rank.
5274        ValueError: If size of `indices` is not equal to dimension of y[axis].
5275        ValueError: If `y`'s shape is not the same as `x` except the `axis` th dimension.
5276
5277    Supported Platforms:
5278        ``Ascend`` ``GPU``
5279
5280    Examples:
5281        >>> class Net(nn.Cell):
5282        ...     def __init__(self):
5283        ...         super(Net, self).__init__()
5284        ...         self.index_add = ops.IndexAdd(axis=1)
5285        ...         self.x = Parameter(Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32))
5286        ...         self.indices = Tensor(np.array([0, 2]), mindspore.int32)
5287        ...
5288        ...     def construct(self, y):
5289        ...         return self.index_add(self.x, self.indices, y)
5290        ...
5291        >>> y = Tensor(np.array([[0.5, 1.0], [1.0, 1.5], [2.0, 2.5]]), mindspore.float32)
5292        >>> net = Net()
5293        >>> output = net(y)
5294        >>> print(output)
5295        [[ 1.5  2.   4. ]
5296         [ 5.   5.   7.5]
5297         [ 9.   8.  11.5]]
5298    """
5299    __mindspore_signature__ = (
5300        sig.make_sig('input_x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
5301        sig.make_sig('indices', dtype=sig.sig_dtype.T1),
5302        sig.make_sig('input_y', dtype=sig.sig_dtype.T)
5303    )
5304
5305    @prim_attr_register
5306    def __init__(self, axis, use_lock=True, check_index_bound=True):
5307        """Initialize InplaceAdd"""
5308        self.init_prim_io_names(inputs=['input_x', 'indices', 'input_y'], outputs=['output'])
5309        self.axis = axis
5310        validator.check_value_type('axis', axis, [int], self.name)
5311
5312
5313class Erfinv(Primitive):
5314    r"""
5315    Computes the inverse error function of input. The inverse error function is defined in the range (-1, 1) as:
5316
5317    .. math::
5318                                erfinv(erf(x)) = x
5319
5320    Inputs:
5321        - **input_x** (Tensor) - The input tensor to compute to, with data type float32, float16.
5322
5323    Outputs:
5324        Tensor, has the same shape and dtype as `input_x`.
5325
5326    Raises:
5327        TypeError: If dtype of `input_x` is not one of: float32, float16.
5328
5329    Supported Platforms:
5330        ``Ascend``
5331
5332    Examples:
5333        >>> x = Tensor(np.array([0, 0.5, -0.9]), mindspore.float32)
5334        >>> erfinv = P.Erfinv()
5335        >>> output = erfinv(x)
5336        >>> print(output)
5337        [ 0.          0.47695306 -1.1630805 ]
5338    """
5339
5340    @prim_attr_register
5341    def __init__(self):
5342        """Initialize Erfinv"""
5343        self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
5344
5345
5346class Conj(PrimitiveWithInfer):
5347    """
5348    Returns a Tensor that is the real part of the input.
5349
5350    Inputs:
5351        - **input** (Tensor, complex) - The input tensor. types: complex64, complex128.
5352
5353    Outputs:
5354        Tensor, has the float type.
5355
5356    Raises:
5357       TypeError: If the dtype of input is not one of: complex64, complex128.
5358
5359    Supported Platforms:
5360        ``GPU``
5361
5362    Examples:
5363        >>> x = Tensor(np.asarray(np.complex(1.3+0.4j)), mindspore.complex64)
5364        >>> conj = ops.Conj()
5365        >>> output = conj(x)
5366        >>> print(output)
5367        1.3-0.4j
5368    """
5369
5370    @prim_attr_register
5371    def __init__(self):
5372        self.init_prim_io_names(
5373            inputs=['input_tensor'],
5374            outputs=['output_tensor'])
5375
5376    def infer_shape(self, input_shape):
5377        return input_shape
5378
5379    def infer_dtype(self, input_dtype):
5380        validator.check_tensor_dtype_valid('input_tensor', input_dtype,
5381                                           [mstype.complex64, mstype.complex128], self.name)
5382        return input_dtype
5383
5384
5385class Real(PrimitiveWithInfer):
5386    """
5387    Returns a Tensor that is the real part of the input.
5388
5389    Inputs:
5390        - **input** (Tensor, complex) - The input tensor. types: complex64, complex128.
5391
5392    Outputs:
5393        Tensor, has the float type.
5394
5395    Raises:
5396       TypeError: If the dtype of input is not one of: complex64, complex128.
5397
5398    Supported Platforms:
5399        ``GPU``
5400
5401    Examples:
5402        >>> x = Tensor(np.asarray(np.complex(1.3+0.4j)), mindspore.complex64)
5403        >>> conj = ops.Real()
5404        >>> output = conj(x)
5405        >>> print(output)
5406        1.3
5407    """
5408
5409    @prim_attr_register
5410    def __init__(self):
5411        self.init_prim_io_names(
5412            inputs=['input_tensor'],
5413            outputs=['output_tensor'])
5414
5415    def infer_shape(self, input_shape):
5416        return input_shape
5417
5418    def infer_dtype(self, input_dtype):
5419        validator.check_tensor_dtype_valid('input_tensor', input_dtype,
5420                                           [mstype.complex64, mstype.complex128], self.name)
5421        if input_dtype == mstype.tensor_type(mstype.complex64):
5422            output_dtype = mstype.float32
5423        elif input_dtype == mstype.tensor_type(mstype.complex128):
5424            output_dtype = mstype.float64
5425        return output_dtype
5426
5427
5428class Imag(PrimitiveWithInfer):
5429    """
5430    Returns a new tensor containing imaginary value of the input.
5431
5432    Inputs:
5433        - **input** (Tensor, complex) - The input tensor. types: complex64, complex128.
5434
5435    Outputs:
5436        Tensor, has the float type.
5437
5438    Raises:
5439       TypeError: If the dtype of input is not one of: complex64, complex128.
5440
5441    Supported Platforms:
5442        ``GPU``
5443
5444    Examples:
5445        >>> x = Tensor(np.asarray(np.complex(1.3+0.4j)), mindspore.complex64)
5446        >>> conj = ops.Imag()
5447        >>> output = conj(x)
5448        >>> print(output)
5449        0.4
5450    """
5451
5452    @prim_attr_register
5453    def __init__(self):
5454        self.init_prim_io_names(
5455            inputs=['input_tensor'],
5456            outputs=['output_tensor'])
5457
5458    def infer_shape(self, input_shape):
5459        return input_shape
5460
5461    def infer_dtype(self, input_dtype):
5462        validator.check_tensor_dtype_valid('input_tensor', input_dtype,
5463                                           [mstype.complex64, mstype.complex128], self.name)
5464        if input_dtype == mstype.tensor_type(mstype.complex64):
5465            output_dtype = mstype.float32
5466        elif input_dtype == mstype.tensor_type(mstype.complex128):
5467            output_dtype = mstype.float64
5468        return output_dtype
5469