• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2020-2021 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15"""math operations, the function docs are adapted from Numpy API."""
16import operator
17import functools
18import itertools
19import sys
20from numpy import dtype as nptype
21
22from ..ops import operations as P
23from ..ops import functional as F
24from ..ops import composite as C
25from ..ops.primitive import constexpr
26from ..common import dtype as mstype
27from ..common import Tensor
28from .._c_expression import typing
29
30from .dtypes import nan, pi, dtype_map, inf
31
32from .array_creations import asarray_const, ones, zeros, empty, full, full_like, diag, \
33    arange, histogram_bin_edges, eye
34from .array_ops import where as where_
35from .array_ops import ravel, expand_dims, moveaxis, concatenate, flip, stack, atleast_1d, \
36    split
37
38from .utils_const import _infer_out_shape, _check_axis_valid, _get_device, \
39    _check_shape_aligned, _raise_type_error, _check_same_type, _check_is_float, \
40    _raise_value_error, _promote, _check_axis_type, _canonicalize_axis, \
41    _is_shape_empty, _check_is_int, _expanded_shape, _check_axis_in_range, \
42    _check_dtype, _list_comprehensions, _tuple_setitem, _add_unit_axes, _seq_prod, \
43    _make_tensor, _promote_for_trigonometric, _raise_runtime_error, _max, _type_convert, \
44    _raise_unimplemented_error, _abs, _in, _tuple_slice, _check_is_inf
45from .utils import _expand, _broadcast_to, _broadcast_to_shape, _check_input_tensor, \
46    _to_tensor, _to_tensor_origin_dtype, _isnan
47
48
49ZERO_TENSOR = asarray_const(0)
50
51
52_mean_keepdims = P.ReduceMean(True)
53_matmul = P.MatMul(False, False)
54_matmul_t = P.MatMul(False, True)
55_reduce_sum_default = P.ReduceSum()
56_reduce_sum_keepdims = P.ReduceSum(True)
57_reduce_min_default = P.ReduceMin()
58_reduce_min_keepdims = P.ReduceMin(True)
59_reduce_max_default = P.ReduceMax()
60_reduce_max_keepdims = P.ReduceMax(True)
61_cumsum_default = P.CumSum()
62_concat = P.Concat(-1)
63_cumprod_default = P.CumProd()
64_round = P.Round()
65_rint = P.Rint()
66
67
68
69def absolute(x, dtype=None):
70    """
71    Calculates the absolute value element-wise.
72
73    Note:
74        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
75        not supported.
76        Currently the backend kernel only supports float calculation, if the input
77        is not a `float`, then it will be casted to :class:`mstype.float32` and casted back.
78
79    Args:
80        x (Tensor): Tensor to be used for calculation.
81        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
82            output Tensor.
83
84    Returns:
85        Tensor.
86
87    Raises:
88        TypeError: If input arguments have types not specified above.
89
90    Supported Platforms:
91        ``Ascend`` ``GPU`` ``CPU``
92
93    Examples:
94        >>> import mindspore.numpy as np
95        >>> x = np.asarray([1, 2, 3, -4, -5], np.float32)
96        >>> output = np.absolute(x)
97        >>> print(output)
98        [1. 2. 3. 4. 5.]
99    """
100    original_dtype = x.dtype
101    if not _check_is_float(original_dtype) and dtype is None:
102        x = x.astype(mstype.float32)
103        return _apply_tensor_op(F.absolute, x, dtype=dtype).astype(original_dtype)
104    return _apply_tensor_op(F.absolute, x, dtype=dtype)
105
106
107def count_nonzero(x, axis=None, keepdims=False):
108    """
109    Counts the number of non-zero values in the tensor `x`.
110
111    Args:
112        x (Tensor): The tensor for which to count non-zeros.
113        axis (Union[int,tuple], optional): Axis or tuple of axes along which to
114            count non-zeros. Default is None, meaning that non-zeros will be counted
115            along a flattened version of `x`.
116        keepdims (bool, optional): If this is set to True, the axes that are counted
117            are left in the result as dimensions with size one. With this option,
118            the result will broadcast correctly against `x`.
119
120    Returns:
121        Tensor, indicating number of non-zero values in the `x` along a given axis.
122        Otherwise, the total number of non-zero values in `x` is returned.
123
124    Raises:
125        TypeError: If axis is not int or tuple.
126        ValueError: If axis is not in range [-x.ndim, x.ndim)
127
128    Supported Platforms:
129        ``Ascend`` ``GPU`` ``CPU``
130
131    Examples:
132        >>> import mindspore.numpy as np
133        >>> x = np.asarray([1, 2, 3, -4, 0, 3, 2, 0])
134        >>> output = np.count_nonzero(x)
135        >>> print(output)
136        6
137    """
138    if _is_shape_empty(x.shape):
139        return ZERO_TENSOR
140    if axis is None:
141        axis = ()
142    return C.count_nonzero(x=x, axis=axis, keep_dims=keepdims)
143
144
145def clip(x, xmin, xmax, dtype=None):
146    """
147    Clips (limits) the values in an array.
148
149    Given an interval, values outside the interval are clipped to the interval edges.
150    For example, if an interval of :math:`[0, 1]` is specified, values smaller than 0 become 0,
151    and values larger than 1 become 1.
152
153    Args:
154        x (Tensor): Tensor containing elements to clip.
155        xmin (Tensor, scalar, None): Minimum value. If None, clipping is not performed
156            on lower interval edge. Not more than one of `xmin` and `xmax` may be None.
157        xmax (Tensor, scalar, None): Maximum value. If None, clipping is not performed
158            on upper interval edge. Not more than one of `xmin` and `xmax` may be None.
159            If `xmin` or `xmax` are tensors, then the three tensors will be broadcasted
160            to match their shapes.
161        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
162            output Tensor.
163
164    Returns:
165        Tensor, a tensor with the elements of `x`, but where values
166        < `xmin` are replaced with `xmin`, and those > `xmax` with `xmax`.
167
168    Raises:
169        TypeError: If inputs have types not specified above.
170        ValueError: If the shapes of `x1` and `x2` cannot broadcast, or both `xmin` and `xmax` are `None`.
171
172    Supported Platforms:
173        ``Ascend`` ``GPU`` ``CPU``
174
175    Examples:
176        >>> import mindspore.numpy as np
177        >>> x = np.asarray([1, 2, 3, -4, 0, 3, 2, 0])
178        >>> output = np.clip(x, 0, 2)
179        >>> print(output)
180        [1 2 2 0 0 2 2 0]
181    """
182    if xmin is None and xmax is None:
183        _raise_value_error("One of max or min must be given.")
184    if xmin is not None:
185        x = maximum(x, xmin, dtype=dtype)
186    if xmax is not None:
187        x = minimum(x, xmax, dtype=dtype)
188    return x
189
190
191def deg2rad(x, dtype=None):
192    """
193    Converts angles from degrees to radians.
194
195    Args:
196        x (Tensor): Angles in degrees.
197        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
198            output Tensor.
199
200    Returns:
201        Tensor, the corresponding angle in radians. This is a tensor scalar if `x`
202        is a tensor scalar.
203
204    Raises:
205        TypeError: if `x` is not a tensor.
206
207    Supported Platforms:
208        ``Ascend`` ``GPU`` ``CPU``
209
210    Examples:
211        >>> import mindspore.numpy as np
212        >>> x = np.asarray([1, 2, 3, -4, -5])
213        >>> output = np.deg2rad(x)
214        >>> print(output)
215        [ 0.01745329  0.03490658  0.05235988 -0.06981317 -0.08726647]
216    """
217    _check_input_tensor(x)
218
219    def convert(a):
220        return a * pi / 180.0
221    return _apply_tensor_op(convert, x, dtype=dtype)
222
223
224def rad2deg(x, dtype=None):
225    """
226    Converts angles from radians to degrees.
227
228    Args:
229        x (Tensor): Angles in radians.
230        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
231            output Tensor.
232
233    Returns:
234        Tensor, the corresponding angle in degrees. This is a tensor scalar if `x`
235        is a tensor scalar.
236
237    Supported Platforms:
238        ``Ascend`` ``GPU`` ``CPU``
239
240    Examples:
241        >>> import mindspore.numpy as np
242        >>> x = np.asarray([1, 2, 3, -4, -5])
243        >>> output = np.rad2deg(x)
244        >>> print(output)
245        [  57.295776  114.59155   171.88733  -229.1831   -286.47888 ]
246    """
247    _check_input_tensor(x)
248
249    def convert(a):
250        return a * 180.0 / pi
251    return _apply_tensor_op(convert, x, dtype=dtype)
252
253
254def add(x1, x2, dtype=None):
255    """
256    Adds arguments element-wise.
257
258    Note:
259        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
260        not supported.
261
262    Args:
263        x1 (Tensor): input to be added.
264        x2 (Tensor): input to be added.
265        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
266            output Tensor.
267
268    Returns:
269        Tensor or scalar, the sum of `x1` and `x2`, element-wise. This is a scalar
270        if both `x1` and `x2` are scalars.
271
272    Supported Platforms:
273        ``Ascend`` ``GPU`` ``CPU``
274
275    Examples:
276        >>> import mindspore.numpy as np
277        >>> x1 = np.full((3, 2), [1, 2])
278        >>> x2 = np.full((3, 2), [3, 4])
279        >>> output = np.add(x1, x2)
280        >>> print(output)
281        [[4 6]
282        [4 6]
283        [4 6]]
284    """
285    # broadcast is not fully supported in tensor_add on CPU,
286    # so we use tensor_sub as a substitute solution
287    if _get_device() == 'CPU':
288        return subtract(x1, F.neg_tensor(_to_tensor(x2)), dtype=dtype)
289    return _apply_tensor_op(F.tensor_add, x1, x2, dtype=dtype)
290
291
292def subtract(x1, x2, dtype=None):
293    """
294    Subtracts arguments, element-wise.
295
296    Note:
297        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
298        not supported.
299
300    Args:
301        x1 (Tensor): the input to be subtracted from.
302        x2 (Tensor): the input to be subtracted by.
303        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
304            output Tensor.
305
306    Returns:
307        Tensor or scalar, the difference of `x1` and `x2`, element-wise. This is a
308        scalar if both `x1` and `x2` are scalars.
309
310    Supported Platforms:
311        ``Ascend`` ``GPU`` ``CPU``
312
313    Examples:
314        >>> import mindspore.numpy as np
315        >>> x1 = np.full((3, 2), [1, 2])
316        >>> x2 = np.full((3, 2), [3, 4])
317        >>> output = np.subtract(x1, x2)
318        >>> print(output)
319        [[-2 -2]
320        [-2 -2]
321        [-2 -2]]
322    """
323    return _apply_tensor_op(F.tensor_sub, x1, x2, dtype=dtype)
324
325
326def multiply(x1, x2, dtype=None):
327    """
328    Multiplies arguments element-wise.
329
330    Note:
331        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
332        not supported.
333
334    Args:
335        x1 (Tensor): input tensor to be multiplied.
336        x2 (Tensor): input tensor to be multiplied.
337        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
338            output Tensor.
339
340    Returns:
341        Tensor or scalar, the product of `x1` and `x2`, element-wise. This is a scalar
342        if both `x1` and `x2` are scalars.
343
344    Supported Platforms:
345        ``Ascend`` ``GPU`` ``CPU``
346
347    Examples:
348        >>> import mindspore.numpy as np
349        >>> x1 = np.full((3, 2), [1, 2])
350        >>> x2 = np.full((3, 2), [3, 4])
351        >>> output = np.multiply(x1, x2)
352        >>> print(output)
353        [[3 8]
354        [3 8]
355        [3 8]]
356    """
357    if _get_device() == 'CPU':
358        _check_input_tensor(x1, x2)
359        # broadcast is not fully supported on CPU backend,
360        # and explicit broadcasting is performed
361        shape_out = _infer_out_shape(F.shape(x1), F.shape(x2))
362        x1 = _broadcast_to_shape(x1, shape_out)
363        x2 = _broadcast_to_shape(x2, shape_out)
364    return _apply_tensor_op(F.tensor_mul, x1, x2, dtype=dtype)
365
366
367def divide(x1, x2, dtype=None):
368    """
369    Returns a true division of the inputs, element-wise.
370
371    Instead of the Python traditional ‘floor division’, this returns a true
372    division.
373
374    Note:
375        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
376        not supported.
377
378    Args:
379        x1 (Tensor): the divident.
380        x2 (Tensor): the divisor.
381        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
382            output Tensor.
383
384    Returns:
385        Tensor or scalar, this is a scalar if both `x1` and `x2` are scalars.
386
387    Supported Platforms:
388        ``Ascend`` ``GPU`` ``CPU``
389
390    Examples:
391        >>> import mindspore.numpy as np
392        >>> x1 = np.full((3, 2), [1, 2])
393        >>> x2 = np.full((3, 2), [3, 4])
394        >>> output = np.divide(x1, x2)
395        >>> print(output)
396        [[0.33333334 0.5       ]
397        [0.33333334 0.5       ]
398        [0.33333334 0.5       ]]
399    """
400    x1, x2 = _to_tensor(x1, x2)
401    if not _check_is_float(F.dtype(x1)) and not _check_is_float(F.dtype(x2)):
402        x1 = F.cast(x1, mstype.float32)
403        x2 = F.cast(x2, mstype.float32)
404    return _apply_tensor_op(F.tensor_div, x1, x2, dtype=dtype)
405
406
407def true_divide(x1, x2, dtype=None):
408    """
409    Returns a true division of the inputs, element-wise.
410
411    Instead of the Python traditional ‘floor division’, this returns a true
412    division.
413
414    Note:
415        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
416        not supported.
417
418    Args:
419        x1 (Tensor): the divident.
420        x2 (Tensor): the divisor.
421        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
422            output Tensor.
423
424    Returns:
425        Tensor or scalar, this is a scalar if both `x1` and `x2` are scalars.
426
427    Supported Platforms:
428        ``Ascend`` ``GPU`` ``CPU``
429
430    Examples:
431        >>> import mindspore.numpy as np
432        >>> x1 = np.full((3, 2), [1, 2])
433        >>> x2 = np.full((3, 2), [3, 4])
434        >>> output = np.true_divide(x1, x2)
435        >>> print(output)
436        [[0.33333334 0.5       ]
437        [0.33333334 0.5       ]
438        [0.33333334 0.5       ]]
439    """
440    return divide(x1, x2, dtype=dtype)
441
442
443def power(x1, x2, dtype=None):
444    """
445    First array elements raised to powers from second array, element-wise.
446
447    Raises each base in `x1` to the positionally-corresponding power in `x2`.
448
449    Note:
450        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
451        not supported.
452        On GPU, the supported dtypes are np.float16, and np.float32.
453
454    Args:
455        x1 (Tensor): the bases.
456        x2 (Tensor): the exponents.
457        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
458            output Tensor.
459
460    Returns:
461        Tensor or scalar, the bases in `x1` raised to the exponents in `x2`. This
462        is a scalar if both `x1` and `x2` are scalars.
463
464    Supported Platforms:
465        ``Ascend`` ``GPU`` ``CPU``
466
467    Examples:
468        >>> import mindspore.numpy as np
469        >>> x1 = np.full((3, 2), [1, 2]).astype('float32')
470        >>> x2 = np.full((3, 2), [3, 4]).astype('float32')
471        >>> output = np.power(x1, x2)
472        >>> print(output)
473        [[ 1. 16.]
474        [ 1. 16.]
475        [ 1. 16.]]
476    """
477    return _apply_tensor_op(F.tensor_pow, x1, x2, dtype=dtype)
478
479
480def float_power(x1, x2, dtype=None):
481    """
482    First array elements raised to powers from second array, element-wise.
483
484    Raise each base in `x1` to the positionally-corresponding power in `x2`. `x1` and
485    `x2` must be broadcastable to the same shape. This differs from the power
486    function in that integers, float16, and float64 are promoted to floats with
487    a minimum precision of float32 so that the result is always inexact. The
488    intent is that the function will return a usable result for negative powers
489    and seldom overflow for positive powers.
490
491    Note:
492        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
493        not supported.
494        Integers and floats are promoted to float32 instead of float64.
495
496    Args:
497        x1 (Tensor): the bases.
498        x2 (Tensor): the exponenets.
499        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
500            output Tensor.
501
502    Returns:
503        Tensor or scalar, the bases in `x1` raised to the exponents in `x2`. This
504        is a scalar if both `x1` and `x2` are scalars.
505
506    Supported Platforms:
507        ``Ascend`` ``GPU`` ``CPU``
508
509    Examples:
510        >>> import mindspore.numpy as np
511        >>> x1 = np.arange(6)
512        >>> x2 = np.array(3)
513        >>> output = np.float_power(x1, x2)
514        >>> print(output)
515        [  0.   1.   8.  27.  64. 125.]
516    """
517    if not _check_same_type(F.dtype(x1), mstype.float32):
518        x1 = F.cast(x1, mstype.float32)
519    if not _check_same_type(F.dtype(x2), mstype.float32):
520        x2 = F.cast(x2, mstype.float32)
521
522    return _apply_tensor_op(F.tensor_pow, x1, x2, dtype=dtype)
523
524
525def minimum(x1, x2, dtype=None):
526    """
527    Element-wise minimum of tensor elements.
528
529    Compares two tensors and returns a new tensor containing the element-wise minima.
530
531    Note:
532        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
533        not supported.
534        On Ascend, input arrays containing inf or NaN are not supported.
535
536    Args:
537        x1 (Tensor): first input tensor to be compared.
538        x2 (Tensor): second input tensor to be compared.
539        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
540            output Tensor.
541
542    Returns:
543       Tensor, element-wise minimum of `x1` and `x2`.
544
545    Raises:
546        TypeError: If inputs have types not specified above.
547        ValueError: If the shapes of `x1` and `x2` cannot be broadcast.
548
549    Supported Platforms:
550        ``Ascend`` ``GPU`` ``CPU``
551
552    Examples:
553        >>> import mindspore.numpy as np
554        >>> a = np.asarray([1, 2])
555        >>> b = np.asarray([[1, 3],[1, 4]])
556        >>> print(np.minimum(a, b))
557        [[1 2]
558        [1 2]]
559    """
560    if isinstance(x1, (int, float, bool, list, tuple)):
561        x1 = asarray_const(x1)
562    elif not isinstance(x1, Tensor):
563        _raise_type_error("Input x1 is expected to be array_like")
564
565    if isinstance(x2, (int, float, bool, list, tuple)):
566        x2 = asarray_const(x2)
567    elif not isinstance(x2, Tensor):
568        _raise_type_error("Input x2 is expected to be array_like")
569
570    # if both are scalars, expand x1 to 1d tensor, since cpu kernel doesn't support
571    # comparisons with 2 scalars
572    if x1.ndim == 0 and x2.ndim == 0:
573        x1 = expand_dims(x1, 0)
574        return _apply_tensor_op(functools.partial(_prop_nan, F.minimum), x1, x2, dtype=dtype).squeeze()
575    if x1.ndim == 0:
576        dtype = x2.dtype
577    elif x2.ndim == 0:
578        dtype = x1.dtype
579    return _apply_tensor_op(functools.partial(_prop_nan, F.minimum), x1, x2, dtype=dtype)
580
581
582def mean(a, axis=None, keepdims=False, dtype=None):
583    """
584    Computes the arithmetic mean along the specified axis.
585
586    Returns the average of the array elements. The average is taken
587    over the flattened array by default, otherwise over the specified
588    axis.
589
590    Note:
591        Numpy arguments `out` is not supported.
592        On GPU, the supported dtypes are np.float16, and np.float32.
593
594    Args:
595        a (Tensor): input tensor containing numbers whose mean is desired.
596                    If a is not an array, a conversion is attempted.
597        axis (None or int or tuple of integers, optional): Axis or axes along
598                    which the means are computed. The default is to compute
599                    the mean  of the flattened array. If this is a tuple of
600                    ints, a mean is performed over multiple axes.
601        keepdims (bool, optional): If this is set to True, the axes which
602                    are reduced are left in the result as dimensions with
603                    size one. With this option, the result will broadcast
604                    correctly against the input tensor.
605        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
606            output Tensor.
607
608    Returns:
609        Tensor or scalar, an array containing the mean values.
610
611    Raises:
612        ValueError: if axes are out of the range of ``[-a.ndim, a.ndim)``, or
613            if the axes contain duplicates.
614
615    Supported Platforms:
616        ``Ascend`` ``GPU`` ``CPU``
617
618    Examples:
619        >>> import mindspore.numpy as np
620        >>> a = np.arange(6, dtype='float32')
621        >>> output = np.mean(a, 0)
622        >>> print(output)
623        2.5
624    """
625    return _reduce(a, P.ReduceMean(keepdims), axis=axis, keepdims=keepdims, dtype=dtype)
626
627
628def inner(a, b):
629    """
630    Returns the inner product of two tensors.
631
632    Ordinary inner product of vectors for 1-D tensors (without complex
633    conjugation), in higher dimensions a sum product over the last
634    axes.
635
636    Note:
637        Numpy argument `out` is not supported.
638        On GPU, the supported dtypes are np.float16, and np.float32.
639        On CPU, the supported dtypes are np.float16, np.float32, and
640        np.float64.
641
642    Args:
643        a (Tensor): input tensor. If `a` and `b` are nonscalar, their last
644                    dimensions must match.
645        b (Tensor): input tensor. If `a` and `b` are nonscalar, their last
646                    dimensions must match.
647
648    Returns:
649        Tensor or scalar.
650
651    Raises:
652        ValueError: if ``x1.shape[-1] != x2.shape[-1]``.
653
654    Supported Platforms:
655        ``Ascend`` ``GPU`` ``CPU``
656
657    Examples:
658        >>> import mindspore.numpy as np
659        >>> a = np.ones((5, 3))
660        >>> b = np.ones((2, 7, 3))
661        >>> output = np.inner(a, b)
662        >>> print(output)
663        [[[3. 3. 3. 3. 3. 3. 3.]
664        [3. 3. 3. 3. 3. 3. 3.]]
665        [[3. 3. 3. 3. 3. 3. 3.]
666        [3. 3. 3. 3. 3. 3. 3.]]
667        [[3. 3. 3. 3. 3. 3. 3.]
668        [3. 3. 3. 3. 3. 3. 3.]]
669        [[3. 3. 3. 3. 3. 3. 3.]
670        [3. 3. 3. 3. 3. 3. 3.]]
671        [[3. 3. 3. 3. 3. 3. 3.]
672        [3. 3. 3. 3. 3. 3. 3.]]]
673    """
674    if F.rank(a) == 0 or F.rank(b) == 0:
675        return F.tensor_mul(a, b)
676
677    _check_shape_aligned(F.shape(a), F.shape(b))
678    aligned_shape_a = (F.shape_mul(F.shape(a)[:-1]), F.shape(a)[-1])
679    aligned_shape_b = (F.shape_mul(F.shape(b)[:-1]), F.shape(a)[-1])
680    a_aligned = F.reshape(a, aligned_shape_a)
681    b_aligned = F.reshape(b, aligned_shape_b)
682
683    res = _matmul_t(a_aligned, b_aligned)
684    res = F.reshape(res, F.shape(a)[:-1] + F.shape(b)[:-1])
685    return res
686
687
688def dot(a, b):
689    """
690    Returns the dot product of two arrays.
691
692    Specifically,
693    If both `a` and `b` are 1-D arrays, it is inner product of vectors
694    (without complex conjugation).
695    If both `a` and `b` are 2-D arrays, it is matrix multiplication.
696    If either `a` or `b` is 0-D (scalar), it is equivalent to multiply.
697    If `a` is an `N-D` array and `b` is a 1-D array, it is a sum product
698    over the last axis of `a` and `b`.
699    If `a` is an `N-D` array and `b` is an `M-D` array (where ``M>=2``), it is a
700    sum product over the last axis of `a` and the second-to-last axis of `b`:
701    ``dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])``
702
703    Note:
704        Numpy argument `out` is not supported.
705        On GPU, the supported dtypes are np.float16, and np.float32.
706        On CPU, the supported dtypes are np.float16, np.float32, and
707        np.float64.
708
709    Args:
710        a (Tensor): input tensor
711        b (Tensor): input tensor
712
713    Returns:
714        Tensor or scalar, the dot product of `a` and `b`. If `a` and `b` are
715        both scalars or both 1-D arrays then a scalar is returned;
716        otherwise an array is returned
717
718    Raises:
719        ValueError: If the last dimension of `a` is not the same size
720            as the second-to-last dimension of `b`.
721
722    Supported Platforms:
723        ``Ascend`` ``GPU`` ``CPU``
724
725    Examples:
726        >>> import mindspore.numpy as np
727        >>> a = np.full((1, 3), 7).astype('float32')
728        >>> b = np.full((2, 3, 4), 5).astype('float32')
729        >>> output = np.dot(a, b)
730        >>> print(output)
731        [[[105. 105. 105. 105.]
732        [105. 105. 105. 105.]]]
733    """
734    ndim_a, ndim_b = F.rank(a), F.rank(b)
735    if ndim_a == 0 or ndim_b == 0:
736        return F.tensor_mul(a, b)
737    if ndim_a > 0 and ndim_b >= 2:
738        perm = F.make_range(ndim_b)
739        perm = perm[:-2] + (perm[-1],) + (perm[-2],)
740        b = F.transpose(b, perm)
741
742    if F.shape(a)[-1] != F.shape(b)[-1]:
743        _raise_value_error('shapes are not aligned')
744    a_aligned = F.reshape(a, (-1, F.shape(a)[-1]))
745    b_aligned = F.reshape(b, (-1, F.shape(b)[-1]))
746
747    res = _matmul_t(a_aligned, b_aligned)
748    res = F.reshape(res, F.shape(a)[:-1] + F.shape(b)[:-1])
749    return res
750
751
752def outer(a, b):
753    """
754    Computes the outer product of two vectors.
755
756    Given two vectors, ``a = [a0, a1, ..., aM]`` and ``b = [b0, b1, ..., bN]``,
757    the outer product is:
758
759    ``[[a0*b0  a0*b1 ... a0*bN ]``
760
761    ``[a1*b0    .              ]``
762
763    ``[ ...          .         ]``
764
765    ``[aM*b0            aM*bN ]]``
766
767    Note:
768        Numpy argument ``out`` is not supported.
769        On GPU, the supported dtypes are np.float16, and np.float32.
770        On CPU, the supported dtypes are np.float16, np.float32, and
771        np.float64.
772
773    Args:
774        a (Tensor): first input vector. Input is flattened if not
775                    already 1-dimensional.
776        b (Tensor): second input vector. Input is flattened if not
777                    already 1-dimensional.
778
779    Returns:
780        Tensor or scalar, ``out[i, j] = a[i] * b[j]``.
781
782    Raises:
783        TypeError: if the input is not a tensor.
784
785    Supported Platforms:
786        ``Ascend`` ``GPU`` ``CPU``
787
788    Examples:
789        >>> import mindspore.numpy as np
790        >>> a = np.full(7, 2).astype('float32')
791        >>> b = np.full(4, 3).astype('float32')
792        >>> output = np.outer(a, b)
793        >>> print(output)
794        [[6. 6. 6. 6.]
795        [6. 6. 6. 6.]
796        [6. 6. 6. 6.]
797        [6. 6. 6. 6.]
798        [6. 6. 6. 6.]
799        [6. 6. 6. 6.]
800        [6. 6. 6. 6.]]
801    """
802    _check_input_tensor(a, b)
803    if F.rank(a) != 1:
804        a = ravel(a)
805    if F.rank(b) != 1:
806        b = ravel(b)
807    a = F.reshape(a, (F.shape(a)[0], 1))
808    b = _expand(b, 2)
809    return _matmul(a, b)
810
811
812def tensordot(a, b, axes=2):
813    """
814    Computes tensor dot product along specified axes.
815
816    Given two tensors, `a` and `b`, and an array_like object containing two array_like
817    objects, `(a_axes, b_axes)`, sum the products of `a`’s and `b`’s elements (components)
818    over the axes specified by `a_axes` and `b_axes`. The third argument can be a single
819    non-negative integer_like scalar, `N`; if it is such, then the last `N` dimensions of
820    `a` and the first `N` dimensions of `b` are summed over.
821    Three common use cases are:
822
823    - ``axes = 0`` : tensor product
824
825    - ``axes = 1`` : tensor dot product
826
827    - ``axes = 2`` : (default) tensor double contraction
828
829    When axes is integer_like, the sequence for evaluation will be: first the `-Nth`
830    axis in `a` and 0th axis in `b`, and the -1th axis in `a` and `Nth` axis in `b` last.
831    When there is more than one axis to sum over - and they are not the last (first)
832    axes of `a` `(b)` - the argument axes should consist of two sequences of the same
833    length, with the first axis to sum over given first in both sequences, the second
834    axis second, and so forth.
835    The shape of the result consists of the non-contracted axes of the first tensor,
836    followed by the non-contracted axes of the second.
837
838    Note:
839        On CPU, the supported dypes are np.float16 and np.float32.
840        On GPU, the supported dypes are np.float16 and np.float32.
841
842    Args:
843        a (Tensor): Tensor to "dot".
844        b (Tensor): Tensor to “dot”.
845        axes (int or sequence of ints):
846
847            integer_like: If an int `N`, sum over the last `N` axes of `a` and the first `N`
848            axes of `b` in order. The sizes of the corresponding axes must match.
849
850            sequence of ints: Or, a list of axes to be summed over, first sequence
851            applying to `a`, second to `b`. Both elements `array_like` must be of the same
852            length.
853
854    Returns:
855        Tensor, or list of tensors, the tensor dot product of the input.
856
857    Supported Platforms:
858        ``Ascend`` ``GPU`` ``CPU``
859
860    Examples:
861        >>> import mindspore.numpy as np
862        >>> a = np.ones((3, 4, 5))
863        >>> b = np.ones((4, 3, 2))
864        >>> output = np.tensordot(a, b, axes=([1,0],[0,1]))
865        >>> print(output.shape)
866        (5, 2)
867    """
868    if F.rank(a)*F.rank(b) == 0 and axes == 0:
869        return F.tensor_mul(a, b)
870    return C.tensor_dot(a, b, axes)
871
872
873def std(x, axis=None, ddof=0, keepdims=False):
874    """
875    Computes the standard deviation along the specified axis.
876    The standard deviation is the square root of the average of the squared deviations
877    from the mean, i.e., :math:`std = sqrt(mean(abs(x - x.mean())**2))`.
878
879    Returns the standard deviation, which is computed for the flattened array by default,
880    otherwise over the specified axis.
881
882    Note:
883        Numpy arguments `dtype`, `out` and `where` are not supported.
884
885    Args:
886        x (Tensor): A Tensor to be calculated.
887        axis (Union[None, int, tuple(int)]): Axis or axes along which the standard
888            deviation is computed. Default: `None`.
889
890            If `None`, compute the standard deviation of the flattened array.
891        ddof (int): Means Delta Degrees of Freedom. The divisor used in calculations is :math:`N - ddof`,
892            where :math:`N` represents the number of elements. Default: 0.
893        keepdims: If this is set to True, the axes which are reduced are left in the result as
894            dimensions with size one. With this option, the result will broadcast correctly against the input tensor.
895            If the default value is passed, then keepdims will not be passed through to the std method of
896            sub-classes of tensor, however any non-default value will be. If the sub-class’ method does not
897            implement keepdims any exceptions will be raised. Default: `False`.
898
899    Returns:
900        Standard deviation tensor.
901
902    Supported Platforms:
903        ``Ascend`` ``GPU`` ``CPU``
904
905    Examples:
906        >>> import mindspore.numpy as np
907        >>> input_x = np.array([1., 2., 3., 4.])
908        >>> output = np.std(input_x)
909        >>> print(output)
910        1.118034
911    """
912    x = _to_tensor(x)
913    return x.std(axis, ddof, keepdims)
914
915
916def var(x, axis=None, ddof=0, keepdims=False):
917    """
918    Computes the variance along the specified axis.
919    The variance is the average of the squared deviations from the mean, i.e.,
920    :math:`var = mean(abs(x - x.mean())**2)`.
921
922    Returns the variance, which is computed for the flattened array by default,
923    otherwise over the specified axis.
924
925    Note:
926        Numpy arguments `dtype`, `out` and `where` are not supported.
927
928    Args:
929        x (Tensor): A Tensor to be calculated.
930        axis (Union[None, int, tuple(int)]): Axis or axes along which the variance is computed.
931            The default is to compute the variance of the flattened array. Default: `None`.
932        ddof (int): Means Delta Degrees of Freedom. Default: 0.
933            The divisor used in calculations is :math:`N - ddof`, where :math:`N` represents the number of elements.
934        keepdims (bool): If this is set to True, the axes which are reduced are left in the result as
935            dimensions with size one. With this option, the result will broadcast correctly against the input tensor.
936            If the default value is passed, then keepdims will not be passed through to the var method of
937            sub-classes of tensor, however any non-default value will be. If the sub-class’ method does not
938            implement keepdims any exceptions will be raised. Default: `False`.
939
940    Supported Platforms:
941        ``Ascend`` ``GPU`` ``CPU``
942
943    Returns:
944        Standard deviation tensor.
945
946    Examples:
947        >>> import mindspore.numpy as np
948        >>> input_x = np.array([1., 2., 3., 4.])
949        >>> output = np.var(input_x)
950        >>> print(output)
951        1.25
952    """
953    x = _to_tensor(x)
954    return x.var(axis, ddof, keepdims)
955
956
957def ptp(x, axis=None, keepdims=False):
958    """
959    Range of values (maximum - minimum) along an axis.
960    The name of the function comes from the acronym for ‘peak to peak’.
961
962    Note:
963        Numpy arguments `dtype` and `out` are not supported.
964
965    Args:
966        x (Tensor): Input tensor.
967        axis (Union[None, int, tuple(int)]): Axis or axes along which the range is computed.
968            The default is to compute the variance of the flattened array. Default: None.
969        keepdims (bool): If this is set to True, the axes which are reduced are left in the result as
970            dimensions with size one. With this option, the result will broadcast correctly against the input tensor.
971            If the default value is passed, then keepdims will not be passed through to the ptp method of
972            sub-classes of tensor, however any non-default value will be. Default is False.
973
974    Returns:
975        Tensor.
976
977    Raises:
978        TypeError: if inputs have types not specified above.
979
980    Supported Platforms:
981        ``Ascend`` ``GPU`` ``CPU``
982
983    Examples:
984        >>> import mindspore.numpy as np
985        >>> x = np.array([[4.0, 9.0, 2.0, 10.0], [6.0, 9.0, 7.0, 12.0]])
986        >>> print(np.ptp(x, axis=1))
987        [8. 6.]
988        >>> print(np.ptp(x, axis=0))
989        [2. 0. 5. 2.]
990    """
991    _check_input_tensor(x)
992    return x.ptp(axis, keepdims)
993
994
995def average(x, axis=None, weights=None, returned=False):
996    """
997    Computes the weighted average along the specified axis.
998
999    Args:
1000        x (Tensor): A Tensor to be averaged.
1001        axis (Union[None, int, tuple(int)]): Axis along which to average `x`. Default: `None`.
1002            If the axis is `None`, it will average over all of the elements of the tensor `x`.
1003            If the axis is negative, it counts from the last to the first axis.
1004        weights (Union[None, Tensor]): Weights associated with the values in `x`. Default: `None`.
1005            If `weights` is `None`, all the data in `x` are assumed to have a weight equal to one.
1006            If `weights` is 1-D tensor, the length must be the same as the given axis.
1007            Otherwise, `weights` should have the same shape as `x`.
1008        returned (bool): Default: `False`.
1009            If `True`, the tuple (average, sum_of_weights) is returned.
1010            If `False`, only the average is returned.
1011
1012    Returns:
1013        Averaged Tensor. If returned is `True`, return tuple.
1014
1015    Supported Platforms:
1016        ``Ascend`` ``GPU`` ``CPU``
1017
1018    Examples:
1019        >>> import mindspore.numpy as np
1020        >>> input_x = np.array([[1., 2.], [3., 4.]])
1021        >>> output = np.average(input_x, axis=0, weights=input_x, returned=True)
1022        >>> print(output)
1023        (Tensor(shape=[2], dtype=Float32, value= [ 2.50000000e+00,  3.33333325e+00]),
1024         Tensor(shape=[2], dtype=Float32, value= [ 4.00000000e+00,  6.00000000e+00]))
1025    """
1026    _check_input_tensor(x)
1027    if axis is not None:
1028        _check_axis_type(axis, True, True, False)
1029        axis = _canonicalize_axis(axis, x.ndim)
1030
1031    x_avg = full((), nan, F.dtype(x))
1032    sum_of_weights = None
1033
1034    if weights is None:
1035        x_avg = mean(x, axis)
1036        sum_of_weights = compute_weights_for_mean(x, x_avg, axis)
1037    else:
1038        _check_input_tensor(weights)
1039        if x.shape == weights.shape:
1040            x_avg, sum_of_weights = comput_avg(x, axis, weights)
1041        elif F.rank(weights) == 1:
1042            if not isinstance(axis, int):
1043                _raise_type_error("Axis must be specified when shapes of x and weights differ.")
1044            perm = _expanded_shape(x.ndim, weights.shape[0], axis)
1045            weights = weights.reshape(perm)
1046            x_avg, sum_of_weights = comput_avg(x, axis, weights)
1047        else:
1048            _raise_type_error("Weights should be None, 1-D or the same shape as input x.")
1049
1050    if returned:
1051        if x_avg.shape != sum_of_weights.shape:
1052            sum_of_weights = _broadcast_to(sum_of_weights, sum_of_weights.shape, x_avg.shape, x_avg.ndim)
1053        return (x_avg, sum_of_weights)
1054    return x_avg
1055
1056
1057def compute_weights_for_mean(x, x_avg, axis):
1058    """Computes weights for np.average."""
1059    if axis is None:
1060        sum_of_weights = full((), x.size, F.dtype(x))
1061    else:
1062        fill_value = 1
1063        if isinstance(axis, int) or (isinstance(axis, tuple) and F.tuple_len(axis) == 1):
1064            fill_value = x.shape[axis] if isinstance(axis, int) else x.shape[axis[0]]
1065        elif axis is None:
1066            for sh in x.shape:
1067                fill_value *= sh
1068        else:
1069            for ax in axis:
1070                fill_value *= x.shape[ax]
1071        sum_of_weights = full_like(x_avg, fill_value, F.dtype(x))
1072    return sum_of_weights
1073
1074
1075def comput_avg(x, axis, weights):
1076    """Computes average value of input x with given parameters."""
1077    axis = () if axis is None else axis
1078    x_mul = F.tensor_mul(x, weights)
1079    x_sum = _reduce_sum_default(x_mul, axis)
1080    sum_of_weights = _reduce_sum_default(weights, axis)
1081    x_avg = F.tensor_div(x_sum, sum_of_weights)
1082    return x_avg, sum_of_weights
1083
1084
1085def matmul(x1, x2, dtype=None):
1086    """
1087    Returns the matrix product of two arrays.
1088
1089    Note:
1090        Numpy arguments `out`, `casting`, `order`, `subok`, `signature`, and `extobj` are
1091        not supported.
1092        On GPU, the supported dtypes are np.float16 and np.float32.
1093        On CPU, the supported dtypes are np.float16 and np.float32.
1094
1095    Args:
1096        x1 (Tensor): Input tensor, scalar not allowed.
1097        x2 (Tensor): Input tensor, scalar not allowed.
1098        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
1099            output Tensor.
1100
1101    Returns:
1102        Tensor or scalar, the matrix product of the inputs. This is a scalar only
1103        when both `x1`, `x2` are 1-d vectors.
1104
1105    Raises:
1106        ValueError: If the last dimension of `x1` is not the same size as the
1107            second-to-last dimension of `x2`, or if a scalar value is passed in.
1108
1109    Supported Platforms:
1110        ``Ascend`` ``GPU`` ``CPU``
1111
1112    Examples:
1113        >>> import mindspore.numpy as np
1114        >>> x1 = np.arange(2*3*4).reshape(2, 3, 4).astype('float32')
1115        >>> x2 = np.arange(4*5).reshape(4, 5).astype('float32')
1116        >>> output = np.matmul(x1, x2)
1117        >>> print(output)
1118        [[[  70.   76.   82.   88.   94.]
1119        [ 190.  212.  234.  256.  278.]
1120        [ 310.  348.  386.  424.  462.]]
1121        [[ 430.  484.  538.  592.  646.]
1122        [ 550.  620.  690.  760.  830.]
1123        [ 670.  756.  842.  928. 1014.]]]
1124    """
1125    return C.matmul(x1, x2, dtype=dtype)
1126
1127
1128def square(x, dtype=None):
1129    """
1130    Returns the element-wise square of the input.
1131
1132    Note:
1133        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
1134        not supported.
1135        On GPU, the supported dtypes are np.float16 and np.float32.
1136
1137    Args:
1138        x (Tensor): Input data.
1139        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
1140            output Tensor.
1141
1142    Returns:
1143        Tensor or scalar, element-wise ``x*x``, of the same shape and dtype as `x`.
1144        This is a scalar if `x` is a scalar..
1145
1146    Supported Platforms:
1147        ``Ascend`` ``GPU`` ``CPU``
1148
1149    Examples:
1150        >>> import mindspore.numpy as np
1151        >>> x = np.square(np.arange(6).reshape(2, 3).astype('float32'))
1152        >>> print(x)
1153        [[ 0.  1.  4.]
1154        [ 9. 16. 25.]]
1155    """
1156    return _apply_tensor_op(F.square, x, dtype=dtype)
1157
1158
1159def sqrt(x, dtype=None):
1160    """
1161    Returns the non-negative square-root of an array, element-wise.
1162
1163    Note:
1164        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
1165        not supported.
1166        On GPU, the supported dtypes are np.float16 and np.float32.
1167
1168    Args:
1169        x (Tensor): The values whose square-roots are required.
1170        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
1171            output Tensor.
1172
1173    Returns:
1174        Tensor or scalar, an array of the same shape as `x`, containing the positive
1175        square-root of each element in `x`. For negative elements, nan is returned.
1176        This is a scalar if `x` is a scalar.
1177
1178    Supported Platforms:
1179        ``Ascend`` ``GPU`` ``CPU``
1180
1181    Examples:
1182        >>> import mindspore.numpy as np
1183        >>> x = np.arange(6).reshape(2, 3).astype('float32')
1184        >>> x_squared = np.square(x)
1185        >>> output = np.sqrt(x_squared)
1186        >>> print(output)
1187        [[ 0. 1. 2.]
1188        [ 3. 4. 5.]]
1189    """
1190    return _apply_tensor_op(F.sqrt, x, dtype=dtype)
1191
1192
1193def reciprocal(x, dtype=None):
1194    """
1195    Returns the reciprocal of the argument, element-wise.
1196
1197    Calculates ``1/x``.
1198
1199    Note:
1200        Numpy arguments `casting`, `order`, `subok`, `signature`, and `extobj` are
1201        not supported.
1202        When `where` is provided, `out` must have a tensor value. `out` is not supported
1203        for storing the result, however it can be used in combination with `where` to set
1204        the value at indices for which `where` is set to False.
1205
1206    Args:
1207        x (Tensor): Input array. For integer arguments with absolute value larger
1208            than 1 the result is always zero because of the way Python handles
1209            integer division. For integer zero the result is an overflow.
1210        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
1211            output Tensor.
1212
1213    Returns:
1214        Tensor or scalar, this is a scalar if `x` is a scalar.
1215
1216    Supported Platforms:
1217        ``Ascend`` ``GPU`` ``CPU``
1218
1219    Examples:
1220        >>> import mindspore.numpy as np
1221        >>> x = np.arange(1, 7).reshape(2, 3).astype('float32')
1222        >>> output = np.reciprocal(x)
1223        >>> print(output)
1224        [[1.         0.5        0.33333334]
1225        [0.25       0.2        0.16666667]]
1226    """
1227    return _apply_tensor_op(lambda x: F.tensor_div(1, x), x, dtype=dtype)
1228
1229
1230def log(x, dtype=None):
1231    """
1232    Returns the natural logarithm, element-wise.
1233
1234    The natural logarithm log is the inverse of the exponential function, so that
1235    ``log(exp(x)) = x``. The natural logarithm is logarithm in base e.
1236
1237    Note:
1238        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
1239        not supported.
1240        On GPU, the supported dtypes are np.float16, and np.float32.
1241        On CPU, the supported dtypes are np.float16, np.float32, and np.float64.
1242
1243    Args:
1244        x (Tensor): Input array.
1245        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
1246            output Tensor.
1247
1248    Returns:
1249        Tensor or scalar, the natural logarithm of `x`, element-wise. This is a
1250        scalar if `x` is a scalar.
1251
1252    Supported Platforms:
1253        ``Ascend`` ``GPU`` ``CPU``
1254
1255    Examples:
1256        >>> import mindspore.numpy as np
1257        >>> x = np.array([2, 3, 4]).astype('float32')
1258        >>> output = np.log(x)
1259        >>> print(output)
1260        [0.69314575 1.09861    1.3862929 ]
1261    """
1262    return _apply_tensor_op(F.log, x, dtype=dtype)
1263
1264
1265def _prop_nan(fn, x1, x2):
1266    """Selects NaN if either element is NaN"""
1267    has_nan = F.logical_or(_isnan(x1), _isnan(x2))
1268    nan_tensor = F.fill(_promote(F.dtype(x1), F.dtype(x2)), F.shape(has_nan), nan)
1269    res = fn(x1, x2)
1270    return F.select(has_nan, nan_tensor, res)
1271
1272
1273def maximum(x1, x2, dtype=None):
1274    """
1275    Returns the element-wise maximum of array elements.
1276
1277    Compares two arrays and returns a new array containing the element-wise maxima.
1278
1279    Note:
1280        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
1281        not supported.
1282        On Ascend, input arrays containing inf or NaN are not supported.
1283
1284    Args:
1285        x1 (Tensor): Input array
1286        x2 (Tensor): The array holding the elements to be compared. If
1287            ``x1.shape != x2.shape``, they must be broadcastable to a common shape
1288            (which becomes the shape of the output).
1289        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
1290            output Tensor.
1291
1292    Returns:
1293        Tensor or scalar, the maximum of `x1` and `x2`, element-wise. This is a scalar
1294        if both `x1` and `x2` are scalars.
1295
1296    Supported Platforms:
1297        ``Ascend`` ``GPU`` ``CPU``
1298
1299    Examples:
1300        >>> import mindspore.numpy as np
1301        >>> output = np.maximum(np.array([2, 3, 4]), np.array([1, 5, 2]))
1302        >>> print(output)
1303        [2 5 4]
1304    """
1305    if isinstance(x1, (int, float, bool, list, tuple)):
1306        x1 = asarray_const(x1)
1307    elif not isinstance(x1, Tensor):
1308        _raise_type_error("Input x1 is expected to be array_like")
1309
1310    if isinstance(x2, (int, float, bool, list, tuple)):
1311        x2 = asarray_const(x2)
1312    elif not isinstance(x2, Tensor):
1313        _raise_type_error("Input x2 is expected to be array_like")
1314
1315    # F.maximum does not support when both operands are scalar
1316    if x1.ndim == 0 and x2.ndim == 0:
1317        x1 = expand_dims(x1, 0)
1318        return _apply_tensor_op(functools.partial(_prop_nan, F.maximum), x1, x2, dtype=dtype).squeeze()
1319    if x1.ndim == 0:
1320        dtype = x2.dtype
1321    elif x2.ndim == 0:
1322        dtype = x1.dtype
1323    return _apply_tensor_op(functools.partial(_prop_nan, F.maximum), x1, x2, dtype=dtype)
1324
1325
1326def heaviside(x1, x2, dtype=None):
1327    """
1328    Computes the Heaviside step function.
1329
1330    Note:
1331        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
1332        not supported.
1333
1334    Args:
1335        x1 (Tensor): Input values.
1336        x2 (Tensor): The value of the function when `x1` is 0. If
1337            ``x1.shape != x2.shape``, they must be broadcastable to a common shape
1338            (which becomes the shape of the output).
1339        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
1340            output Tensor.
1341
1342    Returns:
1343        Tensor or scalar, the output array, element-wise Heaviside step function
1344        of `x1`. This is a scalar if both `x1` and `x2` are scalars.
1345
1346    Supported Platforms:
1347        ``Ascend`` ``GPU`` ``CPU``
1348
1349    Examples:
1350        >>> import mindspore.numpy as np
1351        >>> output = np.heaviside(np.array([-1.5, 0, 2.0]), np.array(0.5))
1352        >>> print(output)
1353        [0.  0.5 1. ]
1354        >>> output = np.heaviside(np.array([-1.5, 0, 2.0]), np.array(1))
1355        >>> print(output)
1356        [0. 1. 1.]
1357    """
1358
1359    def _heaviside(x1, x2):
1360        """Computes heaviside without passing keyword arguments"""
1361        # performs type promotion
1362        dtype1 = F.dtype(x1)
1363        dtype2 = F.dtype(x2)
1364        dtype_out = _promote(dtype1, dtype2)
1365        if not _check_same_type(dtype1, dtype_out):
1366            x1 = F.cast(x1, dtype_out)
1367        if not _check_same_type(dtype2, dtype_out):
1368            x2 = F.cast(x2, dtype_out)
1369
1370        # performs broadcast
1371        shape_out = _infer_out_shape(F.shape(x1), F.shape(x2))
1372        x1 = _broadcast_to_shape(x1, shape_out)
1373        x2 = _broadcast_to_shape(x2, shape_out)
1374
1375        x2 = F.select(x1 < 0, zeros(shape_out, dtype_out), x2)
1376        x2 = F.select(x1 > 0, ones(shape_out, dtype_out), x2)
1377        return x2
1378
1379    return _apply_tensor_op(_heaviside, x1, x2, dtype=dtype)
1380
1381
1382def amax(a, axis=None, keepdims=False, initial=None, where=True):
1383    """
1384    Returns the maximum of an array or maximum along an axis.
1385
1386    Note:
1387        Numpy argument `out` is not supported.
1388        On GPU, the supported dtypes are np.float16, and np.float32.
1389
1390    Args:
1391        a (Tensor): Input data.
1392        axis (None or int or tuple of integers, optional): defaults to None. Axis or
1393            axes along which to operate. By default, flattened input is used. If
1394            this is a tuple of integers, the maximum is selected over multiple axes,
1395            instead of a single axis or all the axes as before.
1396        keepdims (boolean, optional): defaults to False.
1397            If this is set to True, the axes which are reduced are left in the
1398            result as dimensions with size one. With this option, the result will
1399            broadcast correctly against the input array.
1400        initial (scalar, optional): defaults to None.
1401            The minimum value of an output element. Must be present to allow
1402            computation on empty slice.
1403        where (boolean Tensor, optional): defaults to True.
1404            A boolean array which is broadcasted to match the dimensions of array,
1405            and selects elements to include in the reduction. If non-default value
1406            is passed, initial must also be provided.
1407
1408    Returns:
1409        Tensor or scalar, maximum of `a`. If `axis` is None, the result is a scalar
1410        value. If `axis` is given, the result is an array of dimension ``a.ndim - 1``.
1411
1412    Raises:
1413        TypeError: if the input is not a tensor.
1414
1415    Supported Platforms:
1416        ``Ascend`` ``GPU`` ``CPU``
1417
1418    Examples:
1419        >>> import mindspore.numpy as np
1420        >>> a = np.arange(4).reshape((2,2)).astype('float32')
1421        >>> output = np.amax(a)
1422        >>> print(output)
1423        3.0
1424        >>> output = np.amax(a, axis=0)
1425        >>> print(output)
1426        [2. 3.]
1427        >>> output = np.amax(a, axis=1)
1428        >>> print(output)
1429        [1. 3.]
1430        >>> output = np.amax(a, where=np.array([False, True]), initial=-1, axis=0)
1431        >>> print(output)
1432        [-1.  3.]
1433    """
1434    return a.max(axis, keepdims, initial, where)
1435
1436
1437def amin(a, axis=None, keepdims=False, initial=None, where=True):
1438    """
1439    Returns the minimum of an array or minimum along an axis.
1440
1441    Note:
1442        Numpy argument `out` is not supported.
1443        On GPU, the supported dtypes are np.float16, and np.float32.
1444
1445    Args:
1446        a (Tensor): Input data.
1447        axis (None or int or tuple of integers, optional): defaults to None. Axis or
1448            axes along which to operate. By default, flattened input is used. If
1449            this is a tuple of integers, the minimum is selected over multiple axes,
1450            instead of a single axis or all the axes as before.
1451        keepdims (bool, optional): defaults to False.
1452            If this is set to True, the axes which are reduced are left in the
1453            result as dimensions with size one. With this option, the result will
1454            broadcast correctly against the input array.
1455        initial (Number, optional): defaults to None.
1456            The maximum value of an output element. Must be present to allow
1457            computation on empty slice.
1458        where (bool Tensor, optional): defaults to True.
1459            A boolean array which is broadcasted to match the dimensions of array,
1460            and selects elements to include in the reduction. If non-default value
1461            is passed, initial must also be provided.
1462
1463    Returns:
1464        Tensor or scalar, minimum of `a`. If axis is None, the result is a scalar
1465        value. If `axis` is given, the result is an array of dimension ``a.ndim - 1``.
1466
1467    Raises:
1468        TypeError: if the input is not a tensor.
1469
1470    Supported Platforms:
1471        ``Ascend`` ``GPU`` ``CPU``
1472
1473    Examples:
1474        >>> import mindspore.numpy as np
1475        >>> a = np.arange(4).reshape((2,2)).astype('float32')
1476        >>> output = np.amin(a)
1477        >>> print(output)
1478        0.0
1479        >>> output = np.amin(a, axis=0)
1480        >>> print(output)
1481        [0. 1.]
1482        >>> output = np.amin(a, axis=1)
1483        >>> print(output)
1484        [0. 2.]
1485        >>> output = np.amin(a, where=np.array([False, True]), initial=10, axis=0)
1486        >>> print(output)
1487        [10.  1.]
1488    """
1489    return a.min(axis, keepdims, initial, where)
1490
1491
1492def hypot(x1, x2, dtype=None):
1493    """
1494    Given the “legs” of a right triangle, returns its hypotenuse.
1495
1496    Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or `x2` is scalar_like
1497    (i.e., unambiguously cast-able to a scalar type), it is broadcast for use
1498    with each element of the other argument. (See Examples)
1499
1500    Note:
1501        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
1502        not supported.
1503        On GPU, the supported dtypes are np.float16 and np.float32.
1504        On CPU, the supported dtypes are np.float16, np.float32, and np.float64.
1505
1506    Args:
1507        x1 (Tensor): Leg of the traingle(s).
1508        x2 (Tensor): Leg of the triangle(s). If ``x1.shape != x2.shape``, they
1509            must be broadcastable to a common shape (which becomes the shape of
1510            the output).
1511        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
1512            output Tensor.
1513
1514    Returns:
1515        Tensor or scalar, the hypotenuse of the triangle(s). This is a scalar if
1516        both `x1` and `x2` are scalars.
1517
1518    Supported Platforms:
1519        ``Ascend`` ``GPU`` ``CPU``
1520
1521    Examples:
1522        >>> import mindspore.numpy as np
1523        >>> output = np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
1524        >>> print(output)
1525        [[5. 5. 5.]
1526        [5. 5. 5.]
1527        [5. 5. 5.]]
1528        >>> output = np.hypot(3*np.ones((3, 3)), np.array([4.0]))
1529        >>> print(output)
1530        [[5. 5. 5.]
1531        [5. 5. 5.]
1532        [5. 5. 5.]]
1533    """
1534
1535    def _hypot(x1, x2):
1536        """Computes hypotenuse without passing keyword arguments"""
1537        if _get_device() == 'CPU':
1538            # broadcast is not fully supported in tensor_add on CPU,
1539            # so we use tensor_sub as a substitute solution
1540            return F.sqrt(F.tensor_sub(F.square(x1), F.neg_tensor(F.square(x2))))
1541        return F.sqrt(F.tensor_add(F.square(x1), F.square(x2)))
1542
1543    return _apply_tensor_op(_hypot, x1, x2, dtype=dtype)
1544
1545
1546def floor(x, dtype=None):
1547    """
1548    Returns the floor of the input, element-wise.
1549
1550    The floor of the scalar `x` is the largest integer `i`, such that ``i <= x``.
1551
1552    Note:
1553        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
1554        not supported.
1555        On GPU, the supported dtypes are np.float16 and np.float32.
1556        On CPU, the supported dtypes are np.float16, np.float32, and np.float64.
1557
1558    Args:
1559        x (Tensor): input data.
1560        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
1561            output Tensor.
1562
1563    Returns:
1564        Tensor or scalar, the floor of each element in `x`. This is a scalar if `x`
1565        is a scalar.
1566
1567    Supported Platforms:
1568        ``Ascend`` ``GPU`` ``CPU``
1569
1570    Examples:
1571        >>> import mindspore.numpy as np
1572        >>> output = np.floor(np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]))
1573        >>> print(output)
1574        [-2. -2. -1.  0.  1.  1.  2.]
1575    """
1576    return _apply_tensor_op(F.floor, x, dtype=dtype)
1577
1578
1579def floor_divide(x1, x2, dtype=None):
1580    """
1581    Returns the largest integer smaller or equal to the division of the inputs.
1582    It is equivalent to the Python // operator and pairs with the
1583    Python % (remainder), function so that ``a = a % b + b * (a // b)`` up to roundoff.
1584
1585    Note:
1586        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
1587        not supported.
1588
1589    Args:
1590        x1 (Tensor): Input array.
1591        x2 (Tensor): Input array.
1592        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
1593            output Tensor.
1594
1595    Returns:
1596        Tensor or scalar.
1597
1598    Supported Platforms:
1599        ``Ascend`` ``GPU`` ``CPU``
1600
1601    Examples:
1602        >>> import mindspore.numpy as np
1603        >>> output = np.floor_divide(np.array([1., 2., 3., 4.]), np.array(2.5))
1604        >>> print(output)
1605        [0. 0. 1. 1.]
1606    """
1607    return _apply_tensor_op(F.tensor_floordiv, x1, x2, dtype=dtype)
1608
1609
1610def _remainder(x1, x2, c_style=False):
1611    """Computes remainder without applying keyword arguments."""
1612    dtype = _promote(F.dtype(x1), F.dtype(x2))
1613    if not _check_is_float(dtype):
1614        x1 = F.cast(x1, mstype.float32)
1615        x2 = F.cast(x2, mstype.float32)
1616
1617    quotient = F.tensor_div(x1, x2)
1618    if c_style:
1619        quotient = fix(quotient)
1620    else:
1621        quotient = F.floor(quotient)
1622    prod = F.tensor_mul(x2, quotient)
1623    res = F.tensor_sub(x1, prod)
1624    if _check_is_int(dtype):
1625        zeros_tensor = zeros(F.shape(quotient), F.dtype(quotient))
1626        x2_zeros = F.equal(x2, zeros_tensor)
1627        res = F.select(x2_zeros, zeros_tensor, res)
1628
1629    if not _check_same_type(F.dtype(res), dtype):
1630        res = F.cast(res, dtype)
1631    return res
1632
1633
1634def remainder(x1, x2, dtype=None):
1635    """
1636    Returns element-wise remainder of division.
1637
1638    Computes the remainder complementary to the floor_divide function. It is
1639    equivalent to the Python modulus operator ``x1 % x2`` and has the same sign
1640    as the divisor `x2`. The MATLAB function equivalent to np.remainder is mod.
1641
1642    Note:
1643        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
1644        not supported.
1645
1646    Args:
1647        x1 (Tensor): input array.
1648        x2 (Tensor): input array.
1649        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
1650            output Tensor.
1651
1652    Returns:
1653        Tensor or scalar, the element-wise remainder of the quotient
1654        ``floor_divide(x1, x2)``. This is a scalar if both `x1` and `x2` are scalars.
1655
1656    Supported Platforms:
1657        ``Ascend`` ``GPU`` ``CPU``
1658
1659    Examples:
1660        >>> import mindspore.numpy as np
1661        >>> output = np.remainder(np.array([4, 7]), np.array([2, 3]))
1662        >>> print(output)
1663        [0 1]
1664        >>> output = np.remainder(np.arange(7), np.array(5))
1665        >>> print(output)
1666        [0 1 2 3 4 0 1]
1667    """
1668    return _apply_tensor_op(_remainder, x1, x2, dtype=dtype)
1669
1670
1671def fix(x):
1672    """
1673    Rounds to nearest integer towards zero.
1674
1675    Rounds an array of floats element-wise to nearest integer towards zero. The
1676    rounded values are returned as floats.
1677
1678    Note:
1679        Numpy argument `out` is not supported.
1680
1681    Args:
1682        x (Tensor): An array of floats to be rounded.
1683
1684    Returns:
1685        Tensor.
1686
1687    Raises:
1688        TypeError: if the input is not a tensor.
1689
1690    Supported Platforms:
1691        ``Ascend`` ``GPU`` ``CPU``
1692
1693    Examples:
1694        >>> import mindspore.numpy as np
1695        >>> output = np.fix(np.array([2.1, 2.9, -2.1, -2.9]))
1696        >>> print(output)
1697        [ 2.  2. -2. -2.]
1698    """
1699    _check_input_tensor(x)
1700    if not _check_is_float(F.dtype(x)):
1701        x = F.cast(x, mstype.float32)
1702    floored = F.floor(x)
1703    # change to F.ceil once supported on CPU.
1704    ceiled = F.neg_tensor(F.floor(F.neg_tensor(x)))
1705    is_neg = F.tensor_lt(x, zeros(F.shape(x), F.dtype(x)))
1706    return F.select(is_neg, ceiled, floored)
1707
1708
1709def fmod(x1, x2, dtype=None):
1710    """
1711    Returns the element-wise remainder of division.
1712
1713    This is the NumPy implementation of the C library function fmod, the remainder
1714    has the same sign as the dividend `x1`. It is equivalent to the Matlab(TM) rem
1715    function and should not be confused with the Python modulus operator ``x1 % x2``.
1716
1717    Note:
1718        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
1719        not supported.
1720
1721    Args:
1722        x1 (Tensor): the first input arrays.
1723        x2 (Tensor): the second input arrays.
1724        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
1725            output Tensor.
1726
1727    Returns:
1728        Tensor or scalar, the remainder of the division of `x1` by `x2`. This is a
1729        scalar if both `x1` and `x2` are scalars.
1730
1731    Supported Platforms:
1732        ``Ascend`` ``GPU`` ``CPU``
1733
1734    Examples:
1735        >>> import mindspore.numpy as np
1736        >>> output = np.fmod(np.array([-3, -2, -1, 1, 2, 3]), np.array(2))
1737        >>> print(output)
1738        [-1  0 -1  1  0  1]
1739    """
1740    return _apply_tensor_op(lambda x1, x2: _remainder(x1, x2, c_style=True), x1, x2, dtype=dtype)
1741
1742
1743def trunc(x, dtype=None):
1744    """
1745    Returns the truncated value of the input, element-wise.
1746
1747    The truncated value of the scalar `x` is the nearest integer `i` which is closer to zero
1748    than `x` is. In short, the fractional part of the signed number `x` is discarded.
1749
1750    Note:
1751        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
1752        not supported.
1753
1754    Args:
1755        x (Tensor): input data.
1756        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
1757            output Tensor.
1758
1759    Returns:
1760        Tensor or scalar, the truncated value of each element in `x`. This is a scalar if `x` is
1761        a scalar.
1762
1763    Supported Platforms:
1764        ``Ascend`` ``GPU`` ``CPU``
1765
1766    Examples:
1767        >>> import mindspore.numpy as np
1768        >>> output = np.trunc(np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]))
1769        >>> print(output)
1770        [-1. -1. -0.  0.  1.  1.  2.]
1771    """
1772    return _apply_tensor_op(fix, x, dtype=dtype)
1773
1774
1775def exp(x, dtype=None):
1776    """
1777    Calculates the exponential of all elements in the input array.
1778
1779    Note:
1780        Numpy arguments `casting`, `order`, `subok`, `signature`, and `extobj` are
1781        not supported.
1782        When `where` is provided, `out` must have a tensor value. `out` is not supported
1783        for storing the result, however it can be used in combination with `where` to set
1784        the value at indices for which `where` is set to False.
1785        On GPU, the supported dtypes are np.float16, and np.float32.
1786        On CPU, the supported dtypes are np.float16, np.float32, np.float64.
1787
1788    Args:
1789        x (Tensor): input data.
1790        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
1791            output Tensor.
1792
1793    Returns:
1794        Tensor or scalar, element-wise exponential of `x`. This is a scalar if both
1795        `x1` and `x2` are scalars.
1796
1797    Supported Platforms:
1798        ``Ascend`` ``GPU`` ``CPU``
1799
1800    Examples:
1801        >>> import mindspore.numpy as np
1802        >>> output = np.exp(np.arange(5).astype(np.float32))
1803        >>> print(output)
1804        [ 1.         2.718282   7.3890557 20.085537  54.598145 ]
1805    """
1806    return _apply_tensor_op(F.tensor_exp, x, dtype=dtype)
1807
1808
1809def expm1(x, dtype=None):
1810    """
1811    Calculates ``exp(x) - 1`` for all elements in the array.
1812
1813    Note:
1814        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
1815        not supported.
1816        On GPU, the supported dtypes are np.float16, and np.float32.
1817        On CPU, the supported dtypes are np.float16, and np.float32.
1818
1819    Args:
1820        x (Tensor): input data.
1821        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
1822            output Tensor.
1823
1824    Returns:
1825        Tensor or scalar, element-wise exponential minus one, ``out = exp(x) - 1``.
1826        This is a scalar if both `x1` and `x2` are scalars.
1827
1828    Supported Platforms:
1829        ``Ascend`` ``GPU`` ``CPU``
1830
1831    Examples:
1832        >>> import mindspore.numpy as np
1833        >>> output = np.expm1(np.arange(5).astype(np.float32))
1834        >>> print(output)
1835        [ 0.         1.7182819  6.389056  19.085537  53.59815  ]
1836    """
1837    return _apply_tensor_op(F.tensor_expm1, x, dtype=dtype)
1838
1839
1840def divmod_(x1, x2, dtype=None):
1841    """
1842    Returns element-wise quotient and remainder simultaneously.
1843
1844    Args:
1845        x1(Union[Tensor]): Dividend tensor.
1846        x2(Union[Tensor, int, float, bool]): Divisor. If ``x1.shape != x2.shape``,
1847            they must be broadcastable to a common shape.
1848        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
1849            output Tensor.
1850
1851    Returns:
1852        Element-wise quotient and remainder from floor division, in format of (quotient, remainder)
1853
1854    Raises:
1855        TypeError: if `x1` and `x2` are not Tensor or scalar.
1856
1857    Supported Platforms:
1858        ``Ascend`` ``GPU`` ``CPU``
1859
1860    Examples:
1861        >>> import mindspore.numpy as np
1862        >>> a = np.array([1, 2, 3, 4, 5])
1863        >>> print(np.divmod(a, 1.5))
1864        (Tensor(shape=[5], dtype=Float32,
1865         value= [ 0.00000000e+00,  1.00000000e+00,  2.00000000e+00,  2.00000000e+00,  3.00000000e+00]),
1866         Tensor(shape=[5], dtype=Float32,
1867         value= [ 1.00000000e+00,  5.00000000e-01,  0.00000000e+00,  1.00000000e+00,  5.00000000e-01]))
1868    """
1869    q = F.tensor_floordiv(x1, x2)
1870    r = remainder(x1, x2)
1871    if dtype is not None:
1872        q = q.astype(dtype)
1873        r = r.astype(dtype)
1874    return (q, r)
1875
1876
1877def _handle_prepend_append(combined, tensor, additional_tensor, axis):
1878    """Concatenates prepend or append to tensor."""
1879    if isinstance(additional_tensor, (int, float, bool)):
1880        additional_tensor = asarray_const(additional_tensor)
1881    elif not isinstance(additional_tensor, Tensor):
1882        _raise_type_error("prepend must be scalar or Tensor, but got ", additional_tensor)
1883    additional_shape = tensor.shape
1884    additional_shape = _tuple_setitem(additional_shape, axis, 1)
1885    additional_tensor = _broadcast_to_shape(additional_tensor, additional_shape)
1886    combined += (additional_tensor,)
1887    return combined
1888
1889
1890def diff(a, n=1, axis=-1, prepend=None, append=None):
1891    """
1892    Calculates the n-th discrete difference along the given axis.
1893
1894    The first difference is given by :math:`out[i] = a[i+1] - a[i]` along the given axis,
1895    higher differences are calculated by using `diff` iteratively.
1896
1897    Note:
1898        Since zero-shaped Tensor is not supported in MindSpore, a value error is raised if
1899        an empty Tensor is encountered.
1900
1901    Args:
1902        a (Tensor): Input tensor.
1903        n (int, optional): The number of times values are differenced. If zero,
1904            the input is returned as-is.
1905        axis (int, optional): The axis along which the difference is taken, default
1906            is the last axis.
1907        prepend/append (Tensor, optional): Values to prepend or append to a along
1908            `axis` prior to performing the difference. Scalar values are expanded to
1909            arrays with length 1 in the direction of `axis` and the shape of the input
1910            array in along all other axes. Otherwise the dimension and shape must
1911            match `a` except along axis.
1912
1913    Returns:
1914        The n-th differences. The shape of the output is the same as a except along
1915        `axis` where the dimension is smaller by `n`. The type of the output is the same
1916        as the type of the difference between any two elements of `a`. This is the same
1917        as the type of `a` in most cases.
1918
1919    Raises:
1920        TypeError: If inputs have types not specified above.
1921        ValueError: If ``n < 0``.
1922
1923    Supported Platforms:
1924        ``Ascend`` ``GPU`` ``CPU``
1925
1926    Examples:
1927        >>> import mindspore.numpy as np
1928        >>> arr = np.array([1, 3, -1, 0, 4])
1929        >>> print(np.diff(arr, n=2))
1930        [-6  5  3]
1931    """
1932    # This implementation is inspired by jax.numpy
1933    _check_input_tensor(a)
1934    axis = _canonicalize_axis(axis, a.ndim)
1935    if not isinstance(n, int):
1936        _raise_type_error("Input n should be int, but got ", n)
1937    if n < 0:
1938        _raise_value_error("Input n must > 0.")
1939    if n == 0:
1940        return a
1941
1942    combined = ()
1943    if prepend is not None:
1944        combined = _handle_prepend_append(combined, a, prepend, axis)
1945
1946    combined += (a,)
1947
1948    if append is not None:
1949        combined = _handle_prepend_append(combined, a, append, axis)
1950
1951    if combined:
1952        a = concatenate(combined, axis)
1953
1954    # if n > maximum length allowed, the tensor is empty, and is not supported
1955    if n >= a.shape[axis]:
1956        _raise_value_error("n is bigger then the specified dimension, this will result in an empty tensor.")
1957
1958    original_dtype = a.dtype
1959    # will change once F.tensor_slice supports types other than float32
1960    if not _check_is_float(original_dtype):
1961        a = a.astype(mstype.float32)
1962    a = moveaxis(a, axis, -1)
1963    for _ in F.make_range(n):
1964        slice_start = _list_comprehensions(F.rank(a) - 1, 0, True)
1965        slice_size = F.shape(a)[:-1] + (F.shape(a)[-1] - 1,)
1966        minuend = F.tensor_slice(a, slice_start + (1,), slice_size)
1967        subtrahend = F.tensor_slice(a, slice_start + (0,), slice_size)
1968        a = F.tensor_sub(minuend, subtrahend)
1969    if not _check_is_float(original_dtype):
1970        a = a.astype(original_dtype)
1971    return moveaxis(a, -1, axis)
1972
1973
1974def ediff1d(ary, to_end=None, to_begin=None):
1975    """
1976    The differences between consecutive elements of a tensor.
1977
1978    Args:
1979        ary (Tensor): If necessary, will be flattened before the differences are taken.
1980        to_end (Tensor or scalar, optional): Number(s) to append at the end of the
1981            returned differences.
1982        to_begin (Tensor or scalar, optional): Number(s) to prepend at the beginning
1983            of the returned differences.
1984
1985    Returns:
1986        The differences.
1987
1988    Raises:
1989        TypeError: If inputs have types not specified above.
1990
1991    Supported Platforms:
1992        ``Ascend`` ``GPU`` ``CPU``
1993
1994    Examples:
1995        >>> import mindspore.numpy as np
1996        >>> arr = np.array([1, 3, -1, 0, 4])
1997        >>> print(np.ediff1d(arr))
1998        [ 2 -4  1  4]
1999    """
2000    _check_input_tensor(ary)
2001    combined = ()
2002
2003    if to_begin is not None:
2004        if isinstance(to_begin, Tensor):
2005            to_begin = to_begin.ravel()
2006        else:
2007            to_begin = _to_tensor(to_begin).ravel()
2008        to_begin = to_begin.astype(ary.dtype)
2009        combined += (to_begin,)
2010
2011    combined += (diff(ary.ravel()),)
2012
2013    if to_end is not None:
2014        if isinstance(to_end, Tensor):
2015            to_end = to_end.ravel()
2016        else:
2017            to_end = _to_tensor(to_end).ravel()
2018        to_end = to_end.astype(ary.dtype)
2019        combined += (to_end,)
2020
2021    return P.Concat(0)(combined)
2022
2023
2024def trapz(y, x=None, dx=1.0, axis=-1):
2025    """
2026    Integrates along the given axis using the composite trapezoidal rule.
2027
2028    Integrates `y` (x) along given axis.
2029
2030    Args:
2031        y (Tensor): Input array to integrate.
2032        x (Union[int, float, bool, list, tuple, Tensor], optional): The sample points
2033            corresponding to the `y` values. If `x` is None, the sample points are
2034            assumed to be evenly spaced `dx` apart. The default is None.
2035        dx (scalar, optional): The spacing between sample points when `x` is None. The
2036            default is 1.0.
2037        axis (int, optional): The axis along which to integrate. Defaults to -1.
2038
2039    Returns:
2040        Tensor of float, definite integral as approximated by trapezoidal rule.
2041
2042    Raises:
2043        ValueError: If axis is out of range of ``[-y.ndim, y.ndim)``.
2044
2045    Supported Platforms:
2046        ``Ascend`` ``GPU`` ``CPU``
2047
2048    Examples:
2049        >>> import mindspore.numpy as np
2050        >>> a = np.arange(6).reshape(2, 3)
2051        >>> output = np.trapz(a,  x=[-2, 1, 2], axis=1)
2052        >>> print(output)
2053        [ 3. 15.]
2054        >>> output = np.trapz(a,  dx=3, axis=0)
2055        >>> print(output)
2056        [ 4.5  7.5 10.5]
2057    """
2058    y = _to_tensor(y)
2059    ndim = F.rank(y)
2060    _check_axis_in_range(axis, ndim)
2061    axis = axis + ndim if axis < 0 else axis
2062    y_start_axis_left = _list_comprehensions(axis, 0, True)
2063    y_start_axis_right = _list_comprehensions(ndim - axis - 1, 0, True)
2064    shape = F.shape(y)
2065    y_slice_size = _tuple_setitem(shape, axis, shape[axis] - 1)
2066    if x is not None:
2067        x = _to_tensor(x)
2068        dx = diff(x)
2069    else:
2070        dx = _to_tensor(dx)
2071    dx = _expand(dx, ndim - axis, axis=-1)
2072    dx = _broadcast_to_shape(dx, y_slice_size)
2073    if not _check_is_float(F.dtype(y)):
2074        # trapz returns float
2075        y = F.cast(y, mstype.float32)
2076    dx = F.cast(dx, F.dtype(y))
2077
2078    # product of dx and y with the last column removed
2079    y_slice_left = F.tensor_slice(y, y_start_axis_left + (0,) + y_start_axis_right, y_slice_size)
2080    prod_left = F.tensor_mul(y_slice_left, dx)
2081    # product of dx and y with the first column removed
2082    y_slice_right = F.tensor_slice(y, y_start_axis_left + (1,) + y_start_axis_right, y_slice_size)
2083    prod_right = F.tensor_mul(y_slice_right, dx)
2084    prod_sum = F.tensor_div(F.tensor_add(prod_left, prod_right), _to_tensor(2.0).astype(F.dtype(y)))
2085    return F.reduce_sum(prod_sum, axis)
2086
2087
2088def _gcd(x1, x2):
2089    """Calculates gcd without applying keyword arguments."""
2090    dtype = _promote(F.dtype(x1), F.dtype(x2))
2091    if not _check_is_float(dtype):
2092        # F.reduce_sum only supports float
2093        x1 = F.cast(x1, mstype.float32)
2094        x2 = F.cast(x2, mstype.float32)
2095    x1 = F.absolute(x1)
2096    x2 = F.absolute(x2)
2097    cond_ge = F.tensor_ge(x1, x2)
2098    a = where_(cond_ge, x1, x2)
2099    b = where_(cond_ge, x2, x1)
2100    b = where_(F.equal(b, ZERO_TENSOR), a, b)
2101    r = _remainder(a, b)
2102    while F.tensor_gt(F.reduce_sum(r), ZERO_TENSOR):
2103        r = _remainder(a, b)
2104        has_terminated = F.equal(r, ZERO_TENSOR)
2105        a = where_(has_terminated, a, b)
2106        b = where_(has_terminated, b, r)
2107    if not _check_same_type(F.dtype(b), dtype):
2108        b = F.cast(b, dtype)
2109    return b
2110
2111
2112def gcd(x1, x2, dtype=None):
2113    """
2114    Returns the greatest common divisor of ``|x1|`` and ``|x2|``.
2115
2116    Note:
2117        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
2118        not supported.
2119
2120    Args:
2121        x1 (Tensor): input data.
2122        x2 (Tensor): input data.
2123        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
2124            output Tensor.
2125
2126    Returns:
2127        Tensor or scalar, the greatest common divisor of the absolute value of the inputs.
2128        This is a scalar if both `x1` and `x2` are scalars.
2129
2130    Supported Platforms:
2131        ``Ascend`` ``GPU`` ``CPU``
2132
2133    Examples:
2134        >>> import mindspore.numpy as np
2135        >>> output = np.gcd(np.arange(6), np.array(20))
2136        >>> print(output)
2137        [20  1  2  1  4  5]
2138    """
2139    return _apply_tensor_op(_gcd, x1, x2, dtype=dtype)
2140
2141
2142def lcm(x1, x2, dtype=None):
2143    """
2144    Returns the lowest common multiple of ``|x1|`` and ``|x2|``.
2145
2146    Note:
2147        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
2148        not supported.
2149
2150    Args:
2151        x1 (Tensor): input data.
2152        x2 (Tensor): input data.
2153        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
2154            output Tensor.
2155
2156    Returns:
2157        Tensor or scalar, the lowest common multiple of the absolute value of the inputs.
2158        This is a scalar if both `x1` and `x2` are scalars.
2159
2160    Supported Platforms:
2161        ``Ascend`` ``GPU`` ``CPU``
2162
2163    Examples:
2164        >>> import mindspore.numpy as np
2165        >>> output = np.lcm(np.arange(6), np.array(20))
2166        >>> print(output)
2167        [ 0 20 20 60 20 20]
2168    """
2169    def _lcm(x1, x2):
2170        """Calculates lcm without applying keyword arguments"""
2171        common_divisor = _gcd(x1, x2)
2172        dtype = _promote(F.dtype(x1), F.dtype(x2))
2173        x1 = x1.astype(mstype.float32)
2174        x2 = x2.astype(mstype.float32)
2175        q1 = F.tensor_div(x1, common_divisor)
2176        q2 = F.tensor_div(x2, common_divisor)
2177        res = F.tensor_mul(F.tensor_mul(q1, q2), common_divisor)
2178        has_zero = F.equal(multiply(x1, x2), ZERO_TENSOR)
2179        res = where_(has_zero, ZERO_TENSOR, res)
2180        return F.absolute(res).astype(dtype)
2181
2182    return _apply_tensor_op(_lcm, x1, x2, dtype=dtype)
2183
2184
2185def convolve(a, v, mode='full'):
2186    """
2187    Returns the discrete, linear convolution of two one-dimensional sequences.
2188
2189    Note:
2190        If `v` is longer than `a`, the tensors are swapped before computation.
2191
2192    Args:
2193        a (Union[list, tuple, Tensor]): First one-dimensional input tensor.
2194        v (Union[list, tuple, Tensor]): Second one-dimensional input tensor.
2195
2196        mode (str, optional): By default, mode is `\'full\'`. This returns the
2197            convolution at each point of overlap, with an output shape of :math:`(N+M-1,)`.
2198            At the end-points of the convolution, the signals do not overlap completely,
2199            and boundary effects may be seen.
2200            If `mode` is `\'same\'`, it returns output of length :math:`max(M, N)`. Boundary
2201            effects are still visible.
2202            If `mode` is `\'valid\'`, it returns output of length :math:`max(M, N) - min(M, N) + 1`.
2203            The convolution product is only given for points where the signals overlap
2204            completely. Values outside the signal boundary have no effect.
2205
2206    Returns:
2207        Tensor, discrete, linear convolution of a and v.
2208
2209    Raises:
2210        TypeError: if the inputs have types not specified above.
2211        ValueError: if a and v are empty or have wrong dimensions
2212
2213    Supported Platforms:
2214        ``GPU``
2215
2216    Examples:
2217        >>> import mindspore.numpy as np
2218        >>> output = np.convolve([1., 2., 3., 4., 5.], [2., 3.], mode="valid")
2219        >>> print(output)
2220        [ 7. 12. 17. 22.]
2221    """
2222    if not isinstance(a, Tensor):
2223        a = asarray_const(a)
2224    if not isinstance(v, Tensor):
2225        v = asarray_const(v)
2226    a_size = F.shape_mul(a.shape)
2227    v_size = F.shape_mul(v.shape)
2228    if a_size == 0 or v_size == 0:
2229        _raise_value_error("Inputs cannot be empty.")
2230    a = _expand(a, 1)
2231    v = _expand(v, 1)
2232    final_dtype = _promote(a.dtype, v.dtype)
2233    a = a.astype("float32")
2234    v = v.astype("float32")
2235    if a.ndim != 1 or v.ndim != 1:
2236        _raise_value_error("a and v must be 1-D tensor.")
2237    if a_size < v_size:
2238        a, v = v, a
2239        a_size, v_size = v_size, a_size
2240    v = v[::-1]
2241    return _compute_1d_conv(a, v, mode).astype(final_dtype)
2242
2243
2244def _handle_weights(weights, num_samples):
2245    """Checks fweight and aweight in np.cov."""
2246    weights = asarray_const(weights)
2247    if not _check_is_int(weights.dtype):
2248        _raise_type_error("weights must be integer")
2249    weights = weights.astype("float32")
2250    if weights.ndim > 1:
2251        _raise_runtime_error("cannot handle multidimensional weights")
2252    if weights.shape[0] != num_samples:
2253        _raise_runtime_error("incompatible numbers of samples and weights")
2254    return absolute(weights)
2255
2256
2257def _handle_inputs(cov_input, rowvar):
2258    """Checks input arrays for np.cov."""
2259    if not isinstance(cov_input, Tensor):
2260        cov_input = asarray_const(cov_input)
2261    if cov_input.ndim > 2:
2262        _raise_value_error("input array has dimension more than 2.")
2263    cov_input = cov_input.astype("float32")
2264    cov_input = _expand(cov_input, 2)
2265    if not isinstance(rowvar, bool):
2266        _raise_type_error("input rowvar should be boolean.")
2267    if not rowvar and cov_input.shape[0] != 1:
2268        cov_input = cov_input.T
2269    return cov_input
2270
2271
2272def _handle_facts(w, m, ddof, aweights):
2273    """Computes facts for np.cov"""
2274    fact = None
2275    if w is None:
2276        fact = m.shape[1] - ddof
2277    else:
2278        w_sum = _reduce_sum_default(w, -1)
2279        if ddof == 0:
2280            fact = w_sum
2281        elif aweights is None:
2282            fact = w_sum - ddof
2283        else:
2284            fact = w_sum - ddof * F.reduce_sum(w * aweights) / w_sum
2285    return fact
2286
2287
2288def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=None, dtype=None):
2289    """
2290    Estimates a covariance matrix, given data and weights.
2291
2292    Covariance indicates the level to which two variables vary together. If we examine
2293    N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, then the covariance matrix
2294    element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`. The element
2295    :math:`C_{ii}` is the variance of :math:`x_i`.
2296
2297    Note:
2298        `fweights` and `aweights` must be all positive, in Numpy if negative values
2299        are detected, a value error will be raised, in MindSpore we converts all values
2300        to positive instead.
2301
2302    Args:
2303        m (Union[Tensor, list, tuple]): A 1-D or 2-D tensor containing multiple variables
2304            and observations. Each row of `m` represents a variable, and each column
2305            represents a single observation of all those variables. Also see `rowvar` below.
2306        y (Union[Tensor, list, tuple], optional): An additional set of variables
2307            and observations. `y` has the same form as that of `m`, default is ``None``.
2308        rowvar(bool, optional): If `rowvar` is ``True`` (default), then each row represents
2309            a variable, with observations in the columns. Otherwise, the relationship
2310            is transposed: each column represents a variable, while the rows contain
2311            observations.
2312        bias (bool, optional): Default Normalization (``False``) is by :math:`(N - 1)`, where
2313            :math:`N` is the number of observations given (unbiased estimate). If bias is
2314            ``True``, then Normalization is by `N`. These values can be overridden by
2315            using the keyword `ddof`.
2316        ddof (int, optional): If not ``None``, the default value implied by `bias` is
2317            overridden. Note that :math:`ddof=1` will return the unbiased estimate, even
2318            if both fweights and aweights are specified, and :math:`ddof=0` will return
2319            the simple average. See the notes for the details. The default value
2320            is ``None``.
2321        fweights (Union[Tensor, list, tuple], optional): 1-D tensor of integer
2322            frequency weights; the number of times each observation vector should
2323            be repeated. The default value is ``None``.
2324        aweights (Union[Tensor, list, tuple], optional): 1-D tensor of observation
2325            vector weights. These relative weights are typically larger for observations
2326            considered more important and smaller for observations considered less
2327            important. If :math:`ddof=0` the tensor of weights can be used to assign probabilities
2328            to observation vectors. The default value is ``None``.
2329        dtype (Union[:class:`mindspore.dtype`, str], optional): Data-type of the
2330            result. By default, the return data-type will have mstype.float32 precision.
2331
2332    Returns:
2333        Tensor, the covariance matrix of the variables.
2334
2335    Raises:
2336        TypeError: if the inputs have types not specified above.
2337        ValueError: if `m` and `y` have wrong dimensions.
2338        RuntimeError: if `aweights` and `fweights` have dimensions > 2.
2339
2340    Supported Platforms:
2341        ``Ascend`` ``GPU`` ``CPU``
2342
2343    Examples:
2344        >>> import mindspore.numpy as np
2345        >>> output = np.cov([[2., 3., 4., 5.], [0., 2., 3., 4.], [7., 8., 9., 10.]])
2346        >>> print(output)
2347        [[1.6666666 2.1666667 1.6666666]
2348        [2.1666667 2.9166667 2.1666667]
2349        [1.6666666 2.1666667 1.6666666]]
2350    """
2351    # This implementation was inspired by original numpy implementation.
2352    m = _handle_inputs(m, rowvar)
2353
2354    if m.shape[0] == 0:
2355        return empty((0, 0), dtype="float32")
2356
2357    if y is not None:
2358        y = _handle_inputs(y, rowvar)
2359        m = concatenate((m, y), axis=0)
2360
2361    if ddof is None:
2362        if not bias:
2363            ddof = 1
2364        else:
2365            ddof = 0
2366
2367    # Handle fweights and aweights
2368    w = _handle_weights(fweights, m.shape[1]) if fweights is not None else None
2369
2370    if aweights is not None:
2371        aweights = _handle_weights(aweights, m.shape[1])
2372        w = aweights if w is None else w * aweights
2373
2374    avg = average(m, axis=1, weights=w)
2375
2376    # Determine the Normalization
2377    fact = _handle_facts(w, m, ddof, aweights)
2378
2379    m = m - F.expand_dims(avg, -1)
2380    if w is None:
2381        m_t = m.T
2382    else:
2383        m_t = (m * w).T
2384    res = true_divide(dot(m, m_t), fact).squeeze()
2385    if dtype is not None:
2386        return res.astype(dtype)
2387    return res
2388
2389
2390@constexpr
2391def _real_axes(ndim_orig, ndim_out, axes_orig):
2392    """Returns the real axes to be reduced after performing broadcast"""
2393    _diff = ndim_out - ndim_orig
2394    axes = F.make_range(_diff)
2395    axes_orig = map(functools.partial(operator.add, _diff), axes_orig)
2396    return axes + tuple(axes_orig)
2397
2398
2399@constexpr
2400def _shape_reduced_keepdims(shape, axes):
2401    """
2402    Reduces dimensions corresponding to argument axes while
2403    keeping the number of dimensions unchanged.
2404    """
2405    ndim_out = F.tuple_len(shape)
2406    shape_out = [1]*ndim_out
2407    for i in range(ndim_out):
2408        if not i in axes:
2409            shape_out[i] = shape[i]
2410    return tuple(shape_out)
2411
2412
2413@constexpr
2414def _shape_reduced(shape, axes):
2415    """Removes dimensions corresponding to argument axes"""
2416    ndim_orig = F.tuple_len(shape)
2417    ndim_out = ndim_orig - F.tuple_len(axes)
2418    shape_out = [0]*ndim_out
2419    idx_out = 0
2420    for i in range(ndim_orig):
2421        if not i in axes:
2422            shape_out[idx_out] = shape[i]
2423            idx_out += 1
2424    return tuple(shape_out)
2425
2426
2427def _reduce(a, reduce_fn, cmp_fn=None, axis=None, keepdims=False, initial=None, where=True, dtype=None):
2428    """
2429    Applies comparison based on cmp_fn and reduction based on reduce_fn.
2430    If cmp_fn is None, only reduction is performed.
2431    """
2432    a = _to_tensor(a)
2433
2434    shape = F.shape(a)
2435    ndim = F.rank(a)
2436    if dtype is None:
2437        dtype = F.dtype(a)
2438    axes = _check_axis_valid(axis, ndim)
2439    if initial is not None:
2440        if ((isinstance(initial, Tensor) and F.rank(initial) > 0) or
2441                not isinstance(initial, (int, float, bool, Tensor))):
2442            _raise_type_error('initial should be scalar')
2443
2444    if _is_shape_empty(shape):
2445        if not axes:
2446            return a
2447        if keepdims:
2448            shape_out = _shape_reduced_keepdims(shape, axes)
2449        else:
2450            shape_out = _shape_reduced(shape, axes)
2451        if _is_shape_empty(shape_out):
2452            return empty(shape_out, dtype)
2453        if initial is None:
2454            if cmp_fn is None:
2455                initial = nan
2456            else:
2457                _raise_value_error('initial value must be provided for zero-size arrays')
2458        return full(shape_out, initial, dtype)
2459
2460    if initial is not None:
2461        initial = full(shape, initial, dtype)
2462        a = cmp_fn(a, initial)
2463
2464    if isinstance(where, Tensor):
2465        if initial is None:
2466            _raise_value_error('initial value must be provided for where masks')
2467        ndim_orig = F.rank(a)
2468        a = where_(where, a, initial)
2469        axes = _real_axes(ndim_orig, F.rank(a), axes)
2470
2471    return reduce_fn(a, axes).astype(dtype)
2472
2473
2474def nanmax(a, axis=None, dtype=None, keepdims=False):
2475    """
2476    Return the maximum of an array or maximum along an axis, ignoring any NaNs.
2477
2478    Note:
2479        Numpy arguments `out` is not supported.
2480        For all NaN slices, a very small negative number is returned instead of NaN.
2481
2482    Args:
2483        a (Union[int, float, list, tuple, Tensor]): Array containing numbers whose maximum
2484            is desired. If `a` is not an array, a conversion is attempted.
2485        axis (Union[int, tuple of int, None], optional): Axis or axes along which the maximum is
2486            computed. The default is to compute the maximum of the flattened array.
2487        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
2488            output Tensor.
2489        keepdims (boolean, optional): defaults to False. If this is set to True, the axes which
2490            are reduced are left in the result as dimensions with size one. With this option,
2491            the result will broadcast correctly against the original `a`.
2492
2493    Returns:
2494        Tensor.
2495
2496    Raises:
2497        ValueError: if axes are out of the range of ``[-a.ndim, a.ndim)``, or
2498            if the axes contain duplicates.
2499
2500    Supported Platforms:
2501        ``GPU`` ``CPU``
2502
2503    Examples:
2504        >>> import mindspore.numpy as np
2505        >>> a = np.array([[1, 2], [3, np.nan]])
2506        >>> output = np.nanmax(a)
2507        >>> print(output)
2508        3.0
2509        >>> output = np.nanmax(a, axis=0)
2510        >>> print(output)
2511        [3. 2.]
2512    """
2513    a = _to_tensor(a)
2514    if not isinstance(keepdims, int):
2515        _raise_type_error("integer argument expected, got", keepdims)
2516    nan_mask = _isnan(a)
2517    a = F.select(nan_mask, full(F.shape(a), -sys.maxsize - 1, F.dtype(a)), a)
2518    reduce_fn = _reduce_max_keepdims if keepdims else _reduce_max_default
2519    return _reduce(a, reduce_fn, axis=axis, keepdims=keepdims, dtype=dtype)
2520
2521
2522def nanmin(a, axis=None, dtype=None, keepdims=False):
2523    """
2524    Returns the minimum of array elements over a given axis, ignoring any NaNs.
2525
2526    Note:
2527        Numpy arguments `out` is not supported.
2528        For all-NaN slices, a very large number is returned instead of NaN.
2529        On Ascend, since checking for NaN is currently not supported, it is not recommended to
2530        use np.nanmin. If the array does not contain NaN, np.min should be used instead.
2531
2532    Args:
2533        a (Union[int, float, list, tuple, Tensor]): Array containing numbers whose minimum
2534            is desired. If `a` is not an array, a conversion is attempted.
2535        axis (Union[int, tuple of int, None], optional): Axis or axes along which the minimum is
2536            computed. The default is to compute the minimum of the flattened array.
2537        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
2538            output Tensor.
2539        keepdims (boolean, optional): defaults to False. If this is set to True, the axes which
2540            are reduced are left in the result as dimensions with size one. With this option,
2541            the result will broadcast correctly against the original `a`.
2542
2543    Returns:
2544        Tensor.
2545
2546    Raises:
2547        ValueError: if axes are out of the range of ``[-a.ndim, a.ndim)``, or
2548            if the axes contain duplicates.
2549
2550    Supported Platforms:
2551        ``GPU`` ``CPU``
2552
2553    Examples:
2554        >>> import mindspore.numpy as np
2555        >>> a = np.array([[1, 2], [3, np.nan]])
2556        >>> output = np.nanmin(a)
2557        >>> print(output)
2558        1.0
2559        >>> output = np.nanmin(a, axis=0)
2560        >>> print(output)
2561        [1. 2.]
2562    """
2563    a = _to_tensor(a)
2564    if not isinstance(keepdims, int):
2565        _raise_type_error("integer argument expected, got", keepdims)
2566    nan_mask = _isnan(a)
2567    a = F.select(nan_mask, full(F.shape(a), sys.maxsize, F.dtype(a)), a)
2568    reduce_fn = _reduce_min_keepdims if keepdims else _reduce_min_default
2569    return _reduce(a, reduce_fn, axis=axis, keepdims=keepdims, dtype=dtype)
2570
2571
2572def _reduce_nansum(x, axis, keepdims=False):
2573    """Computes reduce sum treating NaNs as zeros."""
2574    x = F.select(_isnan(x), zeros(F.shape(x), F.dtype(x)), x)
2575    if keepdims:
2576        return _reduce_sum_keepdims(x, axis)
2577    return _reduce_sum_default(x, axis)
2578
2579
2580def nansum(a, axis=None, dtype=None, keepdims=False):
2581    """
2582    Returns the sum of array elements over a given axis treating Not a Numbers (NaNs) as zero.
2583
2584    Note:
2585        Numpy arguments `out` is not supported.
2586
2587    Args:
2588        a (Union[int, float, list, tuple, Tensor]): Array containing numbers
2589            whose sum is desired. If `a` is not an array, a conversion is attempted.
2590        axis (Union[int, tuple of int, None], optional): Axis or axes along which the sum is
2591            computed. The default is to compute the sum of the flattened array.
2592        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
2593            output Tensor.
2594        keepdims (boolean, optional): defaults to False. If this is set to True, the axes which
2595            are reduced are left in the result as dimensions with size one. With this option,
2596            the result will broadcast correctly against the original `a`.
2597
2598    Returns:
2599        Tensor.
2600
2601    Raises:
2602        ValueError: if axes are out of the range of ``[-a.ndim, a.ndim)``, or
2603            if the axes contain duplicates.
2604
2605    Supported Platforms:
2606        ``GPU`` ``CPU``
2607
2608    Examples:
2609        >>> import mindspore.numpy as np
2610        >>> a = np.array([[1, 1], [1, np.nan]])
2611        >>> output = np.nansum(a)
2612        >>> print(output)
2613        3.0
2614        >>> output = np.nansum(a, axis=0)
2615        >>> print(output)
2616        [2. 1.]
2617    """
2618    a = _to_tensor(a)
2619    nan_mask = _isnan(a)
2620    a = F.select(nan_mask, zeros(F.shape(a), F.dtype(a)), a)
2621    return _reduce(a, functools.partial(_reduce_nansum, keepdims=keepdims), axis=axis,
2622                   keepdims=keepdims, dtype=dtype)
2623
2624
2625def _count_nonnan(a, axis, keepdims=False):
2626    """Counts the number of elements excluding NaNs."""
2627    nonnan_mask = F.select(_isnan(a), zeros(F.shape(a), F.dtype(a)), ones(F.shape(a), F.dtype(a)))
2628    if keepdims:
2629        return _reduce_sum_keepdims(nonnan_mask, axis)
2630    return _reduce_sum_default(nonnan_mask, axis)
2631
2632
2633def nanmean(a, axis=None, dtype=None, keepdims=False):
2634    """
2635    Computes the arithmetic mean along the specified axis, ignoring NaNs.
2636
2637    Returns the average of the array elements. The average is taken over the flattened
2638    array by default, otherwise over the specified axis. float32 intermediate and
2639    return values are used for integer inputs.
2640
2641    Note:
2642        Numpy arguments `out` is not supported.
2643
2644    Args:
2645        a (Union[int, float, list, tuple, Tensor]): Array containing numbers
2646            whose mean is desired. If `a` is not an array, a conversion is attempted.
2647        axis (Union[int, tuple of int, None], optional): Axis or axes along which the mean is
2648            computed. The default is to compute the mean of the flattened array.
2649        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
2650            output Tensor.
2651        keepdims (boolean, optional): defaults to False. If this is set to True, the axes which
2652            are reduced are left in the result as dimensions with size one. With this option,
2653            the result will broadcast correctly against the original `a`.
2654
2655    Returns:
2656        Tensor.
2657
2658    Raises:
2659        ValueError: if axes are out of the range of ``[-a.ndim, a.ndim)``, or
2660            if the axes contain duplicates.
2661
2662    Supported Platforms:
2663        ``GPU`` ``CPU``
2664
2665    Examples:
2666        >>> import mindspore.numpy as np
2667        >>> a = np.array([[1, np.nan], [3, 4]])
2668        >>> output = np.nanmean(a)
2669        >>> print(output)
2670        2.6666667
2671        >>> output = np.nanmean(a, axis=0)
2672        >>> print(output)
2673        [2. 4.]
2674        >>> output = np.nanmean(a, axis=1)
2675        >>> print(output)
2676        [1.  3.5]
2677    """
2678    if dtype is None:
2679        dtype = mstype.float32
2680    a = _to_tensor(a)
2681    axis = _check_axis_valid(axis, F.rank(a))
2682    sum_a = nansum(a, axis=axis, dtype=dtype, keepdims=keepdims)
2683    return F.tensor_div(sum_a, _count_nonnan(a, axis, keepdims))
2684
2685
2686def _nanvar(a, axis, ddof=0, keepdims=False):
2687    """Computes nanvar without applying keyword arguments."""
2688    mean_a = nanmean(a, axis=axis, keepdims=True)
2689    pow_a = F.tensor_pow(F.tensor_sub(a, mean_a), 2)
2690    sum_a = _reduce_nansum(pow_a, axis, keepdims)
2691    count = _count_nonnan(a, axis, keepdims)
2692    return divide(sum_a, F.tensor_sub(count, ddof))
2693
2694
2695def nanvar(a, axis=None, dtype=None, ddof=0, keepdims=False):
2696    """
2697    Computes the variance along the specified axis, while ignoring NaNs.
2698
2699    Returns the variance of the array elements, a measure of the spread of a distribution. The
2700    variance is computed for the flattened array by default, otherwise over the specified axis.
2701
2702    Note:
2703        Numpy arguments `out` is not supported.
2704        On GPU, the supported dtypes are np.float16, and np.float32.
2705
2706    Args:
2707        a (Union[int, float, list, tuple, Tensor]): Array containing numbers
2708            whose variance is desired. If `a` is not an array, a conversion is attempted.
2709        axis (Union[int, tuple of int, None], optional): Axis or axes along which the variance is
2710            computed. The default is to compute the variance of the flattened array.
2711        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
2712            output Tensor.
2713        ddof (int, optional): “Delta Degrees of Freedom”: the divisor used in the calculation is
2714            ``N - ddof``, where `N` represents the number of non-NaN elements. By default `ddof`
2715            is zero.
2716        keepdims (boolean, optional): defaults to False. If this is set to True, the axes which
2717            are reduced are left in the result as dimensions with size one. With this option,
2718            the result will broadcast correctly against the original `a`.
2719
2720    Returns:
2721        Tensor.
2722
2723    Raises:
2724        ValueError: if axes are out of the range of ``[-a.ndim, a.ndim)``, or
2725            if the axes contain duplicates.
2726
2727    Supported Platforms:
2728        ``GPU`` ``CPU``
2729
2730    Examples:
2731        >>> import mindspore.numpy as np
2732        >>> a = np.array([[1, np.nan], [3, 4]])
2733        >>> output = np.nanvar(a)
2734        >>> print(output)
2735        1.5555557
2736        >>> output = np.nanvar(a, axis=0)
2737        >>> print(output)
2738        [1. 0.]
2739        >>> output = np.nanvar(a, axis=1)
2740        >>> print(output)
2741        [0.   0.25]
2742    """
2743    if dtype is None:
2744        dtype = mstype.float32
2745    return _reduce(a, functools.partial(_nanvar, ddof=ddof, keepdims=keepdims), axis=axis,
2746                   keepdims=keepdims, dtype=dtype)
2747
2748
2749def nanstd(a, axis=None, dtype=None, ddof=0, keepdims=False):
2750    """
2751    Computes the standard deviation along the specified axis, while ignoring NaNs.
2752
2753    Returns the standard deviation, a measure of the spread of a distribution, of the non-NaN
2754    array elements. The standard deviation is computed for the flattened array by default,
2755    otherwise over the specified axis.
2756
2757    Note:
2758        Numpy arguments `out` is not supported.
2759        On GPU, the supported dtypes are np.float16, and np.float32.
2760
2761    Args:
2762        a (Union[int, float, list, tuple, Tensor]): Calculates the standard deviation of the non-NaN values.
2763        axis (Union[int, tuple of int, None], optional): Axis or axes along which the standard
2764            deviation is computed. The default is to compute the standard deviation of the
2765            flattened array.
2766        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
2767            output Tensor.
2768        ddof (int, optional): “Delta Degrees of Freedom”: the divisor used in the calculation is
2769            ``N - ddof``, where `N` represents the number of non-NaN elements. By default `ddof`
2770            is zero.
2771        keepdims (boolean, optional): defaults to False. If this is set to True, the axes which
2772            are reduced are left in the result as dimensions with size one. With this option,
2773            the result will broadcast correctly against the original `a`.
2774
2775    Returns:
2776        Tensor.
2777
2778    Raises:
2779        ValueError: if axes are out of the range of ``[-a.ndim, a.ndim)``, or
2780            if the axes contain duplicates.
2781
2782    Supported Platforms:
2783        ``GPU`` ``CPU``
2784
2785    Examples:
2786        >>> import mindspore.numpy as np
2787        >>> a = np.array([[1, np.nan], [3, 4]])
2788        >>> output = np.nanstd(a)
2789        >>> print(output)
2790        1.2472192
2791        >>> output = np.nanstd(a, axis=0)
2792        >>> print(output)
2793        [1. 0.]
2794        >>> output = np.nanstd(a, axis=1)
2795        >>> print(output)
2796        [0.  0.5]
2797    """
2798    if dtype is None:
2799        dtype = mstype.float32
2800    return _reduce(a, lambda a, axis: F.sqrt(_nanvar(a, axis, ddof=ddof, keepdims=keepdims)),
2801                   axis=axis, keepdims=keepdims, dtype=dtype)
2802
2803
2804def exp2(x, dtype=None):
2805    """
2806    Calculates ``2**p`` for all p in the input array.
2807
2808    Note:
2809        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
2810        not supported.
2811        On GPU, the supported dtypes are np.float16, and np.float32.
2812
2813    Args:
2814        x (Tensor): input values.
2815        dtype (:class:`mindspore.dtype`, optional): defaults to :class:`None`. Overrides the dtype of the
2816            output Tensor.
2817
2818    Returns:
2819        Tensor or scalar, element-wise 2 to the power `x`.
2820
2821    Supported Platforms:
2822        ``Ascend`` ``GPU`` ``CPU``
2823
2824    Examples:
2825        >>> import mindspore.numpy as np
2826        >>> x = np.array([2, 3]).astype(np.float32)
2827        >>> output = np.exp2(x)
2828        >>> print(output)
2829        [4. 8.]
2830    """
2831    return _apply_tensor_op(lambda x: F.tensor_pow(2, x), x, dtype=dtype)
2832
2833
2834def kron(a, b):
2835    """
2836    Kronecker product of two arrays.
2837
2838    Computes the Kronecker product, a composite array made of blocks of the second
2839    array scaled by the first.
2840
2841    Note:
2842        Booleans are not supported.
2843
2844    Args:
2845        a (Union[int, float, list, tuple, Tensor]): input values.
2846        b (Union[int, float, list, tuple, Tensor]): input values.
2847
2848    Returns:
2849        Tensor.
2850
2851    Supported Platforms:
2852        ``Ascend`` ``GPU`` ``CPU``
2853
2854    Examples:
2855        >>> import mindspore.numpy as np
2856        >>> output = np.kron([1,10,100], [5,6,7])
2857        >>> print(output)
2858        [  5   6   7  50  60  70 500 600 700]
2859        >>> output = np.kron([5,6,7], [1,10,100])
2860        >>> print(output)
2861        [  5  50 500   6  60 600   7  70 700]
2862        >>> output = np.kron(np.eye(2), np.ones((2,2)))
2863        >>> print(output)
2864        [[1. 1. 0. 0.]
2865        [1. 1. 0. 0.]
2866        [0. 0. 1. 1.]
2867        [0. 0. 1. 1.]]
2868    """
2869    a, b = _to_tensor(a, b)
2870    ndim = _max(F.rank(a), F.rank(b))
2871    if ndim == 0:
2872        return F.tensor_mul(a, b)
2873    a = _expand(a, ndim)
2874    b = _expand(b, ndim)
2875    shape_a = F.shape(a)
2876    shape_b = F.shape(b)
2877
2878    # scales a by the shape of b
2879    kron_shape = _seq_prod(shape_a, shape_b)
2880    a = F.reshape(a, _add_unit_axes(shape_a, 2*ndim, True))
2881    a = F.tile(a, _add_unit_axes(shape_b, 2*ndim, False))
2882    a = moveaxis(a, F.make_range(ndim, 2*ndim), F.make_range(1, 2*ndim, 2))
2883    a = F.reshape(a, kron_shape)
2884    # scales b by the shape of a
2885    b = F.tile(b, shape_a)
2886    return F.tensor_mul(a, b)
2887
2888
2889def cross(a, b, axisa=- 1, axisb=- 1, axisc=- 1, axis=None):
2890    """
2891    Returns the cross product of two (arrays of) vectors.
2892
2893    The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular to both
2894    `a` and `b`. If `a` and `b` are arrays of vectors, the vectors are defined by the
2895    last axis of `a` and `b` by default, and these axes can have dimensions 2 or 3.
2896    Where the dimension of either `a` or `b` is 2, the third component of the input
2897    vector is assumed to be zero and the cross product calculated accordingly. In cases
2898    where both input vectors have dimension 2, the z-component of the cross product is
2899    returned.
2900
2901    Args:
2902        a (Union[list, tuple, Tensor]): Components of the first vector(s).
2903        b (Union[list, tuple, Tensor]): Components of the second vector(s).
2904        axisa (int, optional): Axis of `a` that defines the vector(s). By default, the last
2905            axis.
2906        axisb (int, optional): Axis of `b` that defines the vector(s). By default, the last
2907            axis.
2908        axisc (int, optional): Axis of `c` containing the cross product vector(s). Ignored
2909            if both input vectors have dimension 2, as the return is scalar. By default,
2910            the last axis.
2911        axis (int, optional): If defined, the axis of `a`, `b` and `c` that defines the
2912            vector(s) and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
2913
2914    Returns:
2915        Tensor, vector cross product(s).
2916
2917    Raises:
2918        ValueError: when the dimensions of the vector(s) in `a` and/or `b` does not equal 2
2919            or 3.
2920
2921    Supported Platforms:
2922        ``Ascend`` ``GPU`` ``CPU``
2923
2924    Examples:
2925        >>> import mindspore.numpy as np
2926        >>> x = np.array([[1,2,3], [4,5,6]])
2927        >>> y = np.array([[4,5,6], [1,2,3]])
2928        >>> output = np.cross(x, y)
2929        >>> print(output)
2930        [[-3  6 -3]
2931        [ 3 -6  3]]
2932        >>> output = np.cross(x, y, axisc=0)
2933        >>> print(output)
2934        [[-3  3]
2935        [ 6 -6]
2936        [-3  3]]
2937    """
2938    a, b = _to_tensor(a, b)
2939    if axis is not None:
2940        axisa, axisb, axisc = axis, axis, axis
2941
2942    _check_axis_in_range(axisa, F.rank(a))
2943    _check_axis_in_range(axisb, F.rank(b))
2944    a = moveaxis(a, axisa, -1)
2945    b = moveaxis(b, axisb, -1)
2946    shape_a = F.shape(a)
2947    shape_b = F.shape(b)
2948    if F.shape(a)[-1] not in (2, 3) or F.shape(b)[-1] not in (2, 3):
2949        _raise_value_error('incompatible dimensions for cross product (dimension must be 2 or 3)')
2950    a_has_z = shape_a[-1] == 3
2951    b_has_z = shape_b[-1] == 3
2952    shape_out = _infer_out_shape(shape_a[:-1], shape_b[:-1])
2953    if a_has_z or b_has_z:
2954        shape_out += (3,)
2955    _check_axis_in_range(axisc, len(shape_out))
2956
2957    dtype = _promote(F.dtype(a), F.dtype(b))
2958    if _get_device() == 'CPU':
2959        # F.tensor_slice only supports float on CPU
2960        if not _check_is_float(F.dtype(a)):
2961            a = F.cast(a, mstype.float32)
2962        if not _check_is_float(F.dtype(b)):
2963            b = F.cast(b, mstype.float32)
2964
2965    a_slice_start = _list_comprehensions(F.rank(a) - 1, 0, True)
2966    a_slice_size = shape_a[:-1] + (1,)
2967    b_slice_start = _list_comprehensions(F.rank(b) - 1, 0, True)
2968    b_slice_size = shape_b[:-1] + (1,)
2969
2970    def _get_slice_product(idx_a, idx_b):
2971        return multiply(F.tensor_slice(a, a_slice_start + (idx_a,), a_slice_size),
2972                        F.tensor_slice(b, b_slice_start + (idx_b,), b_slice_size))
2973
2974    cz = F.tensor_sub(_get_slice_product(0, 1), _get_slice_product(1, 0)) # ax*by - ay*bx
2975    if not a_has_z and not b_has_z:
2976        return F.reshape(cz, shape_out).astype(dtype)
2977
2978    if a_has_z and b_has_z:
2979        cx = F.tensor_sub(_get_slice_product(1, 2), _get_slice_product(2, 1)) # ay*bz - az*by
2980        cy = F.tensor_sub(_get_slice_product(2, 0), _get_slice_product(0, 2)) # az*bx - ax*bz
2981    elif a_has_z:
2982        cx = F.neg_tensor(_get_slice_product(2, 1)) # -az*by
2983        cy = _get_slice_product(2, 0)               # az*bx
2984    else: # b_has_z
2985        cx = _get_slice_product(1, 2)               # ay*bz
2986        cy = F.neg_tensor(_get_slice_product(0, 2)) # -ax*bz
2987    res = _concat((cx, cy, cz)).reshape(shape_out)
2988    return moveaxis(res, -1, axisc).astype(dtype)
2989
2990
2991def ceil(x, dtype=None):
2992    """
2993    Returns the ceiling of the input, element-wise.
2994
2995    The ceil of the scalar `x` is the smallest integer `i`, such that ``i >= x``.
2996
2997    Note:
2998        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
2999        not supported.
3000        On GPU, the supported dtypes are np.float16, and np.float32.
3001
3002    Args:
3003        x (Tensor): input values.
3004        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
3005            output Tensor.
3006
3007    Returns:
3008        Tensor or scalar, the floor of each element in `x`. This is a scalar if `x` is a scalar.
3009
3010    Supported Platforms:
3011        ``Ascend`` ``GPU`` ``CPU``
3012
3013    Examples:
3014        >>> import mindspore.numpy as np
3015        >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
3016        >>> output = np.ceil(a)
3017        >>> print(output)
3018        [-1. -1. -0.  1.  2.  2.  2.]
3019    """
3020    return _apply_tensor_op(lambda x: F.neg_tensor(F.floor(F.neg_tensor(x.astype(mstype.float32)))),
3021                            x, dtype=dtype)
3022
3023
3024def _infer_shape_rem(shape1, shape2, ndim1, ndim2, transpose_b):
3025    """Infers the shape of the last two dimensions after performing matmul."""
3026    shape_rem = ()
3027    if ndim1 >= 2:
3028        shape_rem += (shape1[-2],)
3029    if transpose_b:
3030        if ndim2 >= 2:
3031            shape_rem += (shape2[-2],)
3032    else:
3033        if ndim1 >= 1:
3034            shape_rem += (shape2[-1],)
3035    return shape_rem
3036
3037
3038def positive(a, dtype=None):
3039    """
3040    Numerical positive, element-wise.
3041
3042    Note:
3043        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
3044        not supported.
3045
3046    Args:
3047        a (Tensor): Input tensor.
3048        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
3049            output Tensor.
3050
3051    Returns:
3052        Tensor.
3053
3054    Supported Platforms:
3055        ``Ascend`` ``GPU`` ``CPU``
3056
3057    Examples:
3058        >>> import mindspore.numpy as np
3059        >>> a = np.asarray([1, -1]).astype('float32')
3060        >>> output = np.positive(a)
3061        >>> print(output)
3062        [1. -1.]
3063    """
3064    _check_input_tensor(a)
3065    neg_tensor = F.neg_tensor(a)
3066    return _apply_tensor_op(F.neg_tensor, neg_tensor, dtype=dtype)
3067
3068
3069def negative(a, dtype=None):
3070    """
3071    Numerical negative, element-wise.
3072
3073    Note:
3074        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
3075        not supported.
3076
3077    Args:
3078        a (Tensor): Input tensor.
3079        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
3080            output Tensor.
3081
3082    Returns:
3083        Tensor.
3084
3085    Supported Platforms:
3086        ``Ascend`` ``GPU`` ``CPU``
3087
3088    Examples:
3089        >>> import mindspore.numpy as np
3090        >>> a = np.asarray([1, -1]).astype('float32')
3091        >>> output = np.negative(a)
3092        >>> print(output)
3093        [-1. 1.]
3094    """
3095    return _apply_tensor_op(F.neg_tensor, a, dtype=dtype)
3096
3097
3098def cumsum(a, axis=None, dtype=None):
3099    """
3100    Returns the cumulative sum of the elements along a given axis.
3101
3102    Note:
3103        If ``a.dtype`` is :class:`int8`, :class:`int16` or :class:`bool`, the result
3104        `dtype` will be elevated to :class:`int32`.
3105
3106    Args:
3107        a (Tensor): Input tensor.
3108        axis (int, optional): Axis along which the cumulative sum is computed. The
3109            default (None) is to compute the cumsum over the flattened array.
3110        dtype (:class:`mindspore.dtype`, optional): If not specified, stay the same as `a`,
3111            unless `a` has an integer dtype with a precision less than that of the
3112            default platform integer. In that case, the default platform integer
3113            is used.
3114
3115    Returns:
3116        Tensor.
3117
3118    Raises:
3119        TypeError: If input arguments have types not specified above.
3120        ValueError: If axis is out of range.
3121
3122    Supported Platforms:
3123        ``Ascend`` ``GPU`` ``CPU``
3124
3125    Examples:
3126        >>> import mindspore.numpy as np
3127        >>> output = np.cumsum(np.ones((3,3)), axis=0)
3128        >>> print(output)
3129        [[1. 1. 1.]
3130         [2. 2. 2.]
3131         [3. 3. 3.]]
3132    """
3133    _check_input_tensor(a)
3134    return a.cumsum(axis, dtype)
3135
3136
3137def nancumsum(a, axis=None, dtype=None):
3138    """
3139    Return the cumulative sum of array elements over a given axis treating Not a Numbers (NaNs)
3140    as zero. The cumulative sum does not change when NaNs are encountered and leading NaNs are
3141    replaced by zeros.
3142
3143    Zeros are returned for slices that are all-NaN or empty.
3144
3145    Note:
3146        If ``a.dtype`` is :class:`int8`, :class:`int16` or :class:`bool`, the result
3147        `dtype` will be elevated to :class:`int32`.
3148
3149    Args:
3150        a (Tensor): Input tensor.
3151        axis (int, optional): Axis along which the cumulative sum is computed. The
3152            default (None) is to compute the cumsum over the flattened array.
3153        dtype (:class:`mindspore.dtype`, optional): If not specified, stay the same as `a`,
3154            unless `a` has an integer dtype with a precision less than that of the
3155            default platform integer. In that case, the default platform integer
3156            is used.
3157
3158    Returns:
3159        Tensor.
3160
3161    Raises:
3162        TypeError: If input arguments have types not specified above.
3163        ValueError: If axis is out of range.
3164
3165    Supported Platforms:
3166        ``GPU`` ``CPU``
3167
3168    Examples:
3169        >>> import mindspore.numpy as np
3170        >>> a = np.array([[1, 2], [3, np.nan]])
3171        >>> output = np.nancumsum(a)
3172        >>> print(output)
3173        [1. 3. 6. 6.]
3174        >>> output = np.nancumsum(a, axis=0)
3175        >>> print(output)
3176        [[1. 2.]
3177        [4. 2.]]
3178        >>> output = np.nancumsum(a, axis=1)
3179        >>> print(output)
3180        [[1. 3.]
3181        [3. 3.]]
3182    """
3183    a = F.select(_isnan(a), zeros(F.shape(a), F.dtype(a)), a)
3184    return a.cumsum(axis, dtype)
3185
3186
3187def cbrt(x, dtype=None):
3188    """
3189    Returns the cube-root of a tensor, element-wise.
3190
3191    Note:
3192        Numpy arguments `casting`, `order`, `subok`, `signature`, and `extobj` are
3193        not supported.
3194
3195    Args:
3196        x (Tensor): Input tensor.
3197        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
3198            output Tensor.
3199
3200    Returns:
3201        Tensor.
3202
3203    Supported Platforms:
3204        ``Ascend`` ``GPU`` ``CPU``
3205
3206    Examples:
3207        >>> import mindspore.numpy as np
3208        >>> a = np.asarray([1, -1, 3, -8, 64])
3209        >>> output = np.cbrt(a)
3210        >>> print(output)
3211        [ 1.        -1.         1.4422495 -2.         4.       ]
3212    """
3213    def _cbrt(x):
3214        compute_type = promote_types(x.dtype, "float32")
3215        x = x.astype(compute_type)
3216        # TODO: use P.Sign() once gpu support is added
3217        abs_x = F.absolute(x)
3218        sign_x = abs_x / x
3219        return sign_x * F.tensor_pow(abs_x, 1. / 3.)
3220    return _apply_tensor_op(_cbrt, x, dtype=dtype)
3221
3222
3223def log1p(x, dtype=None):
3224    """
3225    Returns the natural logarithm of one plus the input array, element-wise.
3226
3227    Calculates ``log(1 + x)``.
3228
3229    Note:
3230        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
3231        not supported.
3232
3233    Args:
3234        x (Tensor): Input array.
3235        dtype (:class:`mindspore.dtype`): Default: :class:`None`. Overrides the dtype of the
3236            output Tensor.
3237
3238    Returns:
3239        Tensor or scalar. This is a scalar if `x` is a scalar.
3240
3241    Supported Platforms:
3242        ``Ascend`` ``GPU`` ``CPU``
3243
3244    Examples:
3245        >>> import mindspore.numpy as np
3246        >>> x = np.array([1, 2, 3]).astype('float16')
3247        >>> output = np.log1p(x)
3248        >>> print(output)
3249        [0.6934 1.099 1.387 ]
3250    """
3251    return _apply_tensor_op(lambda x: F.log(x + 1), x, dtype=dtype)
3252
3253
3254def logaddexp(x1, x2, dtype=None):
3255    """
3256    Logarithm of the sum of exponentiations of the inputs.
3257
3258    Calculates ``log(exp(x1) + exp(x2))``. This function is useful in statistics where the
3259    calculated probabilities of events may be so small as to exceed the range of normal
3260    floating point numbers. In such cases the logarithm of the calculated probability is
3261    stored. This function allows adding probabilities stored in such a fashion.
3262
3263    Note:
3264        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
3265        not supported.
3266
3267    Args:
3268        x1 (Tensor): Input array.
3269        x2 (Tensor): Input array. If ``x1.shape != x2.shape``, they must be broadcastable to
3270            a common shape (which becomes the shape of the output).
3271        dtype (:class:`mindspore.dtype`): Default: :class:`None`. Overrides the dtype of the
3272            output Tensor.
3273
3274    Returns:
3275        Tensor or scalar. This is a scalar if both `x1` and `x2` are scalars.
3276
3277    Supported Platforms:
3278        ``Ascend`` ``GPU`` ``CPU``
3279
3280    Examples:
3281        >>> import mindspore.numpy as np
3282        >>> x1 = np.array([1, 2, 3]).astype('float16')
3283        >>> x2 = np.array(2).astype('float16')
3284        >>> output = np.logaddexp(x1, x2)
3285        >>> print(output)
3286        [2.312 2.693 3.312]
3287    """
3288    def _logaddexp(x1, x2):
3289        return F.log(F.tensor_add(F.tensor_exp(x1), F.tensor_exp(x2)))
3290    return _apply_tensor_op(_logaddexp, x1, x2, dtype=dtype)
3291
3292
3293def log2(x, dtype=None):
3294    """
3295    Base-2 logarithm of `x`.
3296
3297    Note:
3298        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
3299        not supported.
3300
3301    Args:
3302        x (Tensor): Input tensor.
3303        dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
3304            output Tensor.
3305
3306    Returns:
3307        Tensor or scalar. This is a scalar if `x` is a scalar.
3308
3309    Supported Platforms:
3310        ``Ascend`` ``GPU`` ``CPU``
3311
3312    Examples:
3313        >>> import mindspore.numpy as np
3314        >>> x = np.array([2, 4, 8]).astype('float16')
3315        >>> output = np.log2(x)
3316        >>> print(output)
3317        [1. 2. 3.]
3318    """
3319    tensor_2 = _make_tensor(2, x.dtype)
3320
3321    def _log2(x):
3322        return F.log(x) / F.log(tensor_2)
3323
3324    return _apply_tensor_op(_log2, x, dtype=dtype)
3325
3326
3327def logaddexp2(x1, x2, dtype=None):
3328    """
3329    Logarithm of the sum of exponentiations of the inputs in base of 2.
3330
3331    Calculates ``log2(2**x1 + 2**x2)``.
3332    This function is useful in machine learning when the calculated probabilities of events
3333    may be so small as to exceed the range of normal floating point numbers.
3334    In such cases the base-2 logarithm of the calculated probability can be used instead.
3335    This function allows adding probabilities stored in such a fashion.
3336
3337    Note:
3338        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
3339        not supported.
3340
3341    Args:
3342        x1 (Tensor): Input tensor.
3343        x2 (Tensor): Input tensor. If ``x1.shape != x2.shape``, they must be broadcastable to
3344            a common shape (which becomes the shape of the output).
3345        dtype (:class:`mindspore.dtype`): Default: :class:`None`. Overrides the dtype of the
3346            output Tensor.
3347
3348    Returns:
3349        Tensor or scalar. This is a scalar if both `x1` and `x2` are scalars.
3350
3351    Supported Platforms:
3352        ``Ascend`` ``GPU`` ``CPU``
3353
3354    Examples:
3355        >>> import mindspore.numpy as np
3356        >>> x1 = np.array([2, 4, 8]).astype('float16')
3357        >>> x2 = np.array(2).astype('float16')
3358        >>> output = np.logaddexp2(x1, x2)
3359        >>> print(output)
3360        [3. 4.32 8.02]
3361    """
3362    _check_input_tensor(x1, x2)
3363    add_exp = F.tensor_add(F.tensor_pow(2, x1), F.tensor_pow(2, x2))
3364    return log2(add_exp, dtype=dtype)
3365
3366
3367def log10(x, dtype=None):
3368    """
3369    Base-10 logarithm of `x`.
3370
3371    Note:
3372        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
3373        not supported.
3374
3375    Args:
3376        x (Tensor): Input tensor.
3377        dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
3378            output Tensor.
3379
3380    Returns:
3381        Tensor or scalar. This is a scalar if `x` is a scalar.
3382
3383    Supported Platforms:
3384        ``Ascend`` ``GPU`` ``CPU``
3385
3386    Examples:
3387        >>> import mindspore.numpy as np
3388        >>> x = np.array([10, 100, 1000]).astype('float16')
3389        >>> output = np.log10(x)
3390        >>> print(output)
3391        [1. 2. 3.]
3392    """
3393    tensor_10 = _make_tensor(10, x.dtype)
3394
3395    def _log10(x):
3396        return F.log(x) / F.log(tensor_10)
3397
3398    return _apply_tensor_op(_log10, x, dtype=dtype)
3399
3400
3401def _cast_type_for_trigonometric(x):
3402    _check_input_tensor(x)
3403    if x.dtype != mstype.float16 or x.dtype != mstype.float32 or x.dtype != mstype.float64:
3404        dtype = _promote_for_trigonometric(x.dtype)
3405        x = F.cast(x, dtype)
3406    return x
3407
3408
3409def sin(x, dtype=None):
3410    """
3411    Trigonometric sine, element-wise.
3412
3413    Note:
3414        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
3415        not supported.
3416
3417    Args:
3418        x (Tensor): Input tensor.
3419        dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
3420            output Tensor.
3421
3422    Returns:
3423        Tensor or scalar. This is a scalar if `x` is a scalar.
3424
3425    Supported Platforms:
3426        ``Ascend`` ``GPU`` ``CPU``
3427
3428    Examples:
3429        >>> import mindspore.numpy as np
3430        >>> x = np.array([-5, -1, 0, 2, 4, 100]).astype('float32')
3431        >>> output = np.sin(x)
3432        >>> print(output)
3433        [ 0.9589243  -0.84147096  0.   0.9092974  -0.7568025  -0.50636566]
3434    """
3435    x = _cast_type_for_trigonometric(x)
3436    return _apply_tensor_op(F.sin, x, dtype=dtype)
3437
3438
3439def cos(x, dtype=None):
3440    """
3441    Cosine element-wise.
3442
3443    Note:
3444        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
3445        not supported.
3446
3447    Args:
3448        x (Tensor): Input tensor.
3449        dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
3450            output Tensor.
3451
3452    Returns:
3453        Tensor or scalar. This is a scalar if `x` is a scalar.
3454
3455    Supported Platforms:
3456        ``Ascend`` ``GPU`` ``CPU``
3457
3458    Examples:
3459        >>> import mindspore.numpy as np
3460        >>> x = np.arange(5).astype('float32')
3461        >>> print(np.cos(x))
3462        [ 1.          0.5403023  -0.41614684 -0.9899925  -0.6536436 ]
3463    """
3464    x = _cast_type_for_trigonometric(x)
3465    return _apply_tensor_op(F.cos, x, dtype=dtype)
3466
3467
3468def tan(x, dtype=None):
3469    """
3470    Computes tangent element-wise.
3471
3472    Equivalent to :math:`np.sin(x)/np.cos(x)` element-wise.
3473
3474    Note:
3475        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
3476        not supported.
3477
3478    Args:
3479        x (Tensor): Input tensor.
3480        dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
3481            output Tensor.
3482
3483    Returns:
3484        Tensor or scalar. This is a scalar if `x` is a scalar.
3485
3486    Raises:
3487        TypeError: If the input is not a tensor or is :class:`tensor.dtype` is :class:`mindspore.float64`.
3488
3489    Supported Platforms:
3490        ``Ascend`` ``CPU``
3491
3492    Examples:
3493        >>> import mindspore.numpy as np
3494        >>> x = np.array([-5, -1, 0, 2, 4, 100]).astype('float32')
3495        >>> print(np.tan(x))
3496        [ 3.380515   -1.5574077   0.         -2.1850398   1.1578213  -0.58721393]
3497    """
3498    x = _cast_type_for_trigonometric(x)
3499    return _apply_tensor_op(F.tan, x, dtype=dtype)
3500
3501
3502def arcsin(x, dtype=None):
3503    """
3504    Inverse sine, element-wise.
3505
3506    Note:
3507        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
3508        not supported.
3509
3510    Args:
3511        x (Tensor): Input tensor. y-coordinate on the unit circle.
3512        dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
3513            output Tensor.
3514
3515    Returns:
3516        Output Tensor.
3517
3518    Raises:
3519        TypeError: If the input is not a tensor.
3520
3521    Supported Platforms:
3522        ``Ascend`` ``GPU`` ``CPU``
3523
3524    Examples:
3525        >>> import mindspore.numpy as np
3526        >>> x = np.asarray([1, -1], np.float32)
3527        >>> output = np.arcsin(x)
3528        >>> print(output)
3529        [ 1.5707964 -1.5707964]
3530    """
3531    x = _cast_type_for_trigonometric(x)
3532    return _apply_tensor_op(F.asin, x, dtype=dtype)
3533
3534
3535def arccos(x, dtype=None):
3536    """
3537    Trigonometric inverse cosine, element-wise.
3538
3539    Note:
3540        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
3541        not supported.
3542
3543    Args:
3544        x (Tensor): Input tensor. x-coordinate on the unit circle.
3545            For real arguments, the domain is :math:`[-1, 1]`.
3546        dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
3547            output Tensor.
3548
3549    Returns:
3550        Tensor.
3551
3552    Raises:
3553        TypeError: If the input is not a tensor.
3554
3555    Supported Platforms:
3556        ``Ascend`` ``GPU`` ``CPU``
3557
3558    Examples:
3559        >>> import mindspore.numpy as np
3560        >>> x = np.asarray([1, -1], np.float32)
3561        >>> output = np.arccos(x)
3562        >>> print(output)
3563        [0.        3.1415927]
3564    """
3565    x = _cast_type_for_trigonometric(x)
3566    return _apply_tensor_op(F.acos, x, dtype=dtype)
3567
3568
3569def arctan(x, dtype=None):
3570    """
3571    Trigonometric inverse tangent, element-wise.
3572
3573    The inverse of tan, so that if :math:`y = tan(x)` then :math:`x = arctan(y)`.
3574
3575    Note:
3576        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
3577        not supported.
3578
3579    Args:
3580        x (Tensor): Input tensor.
3581        dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
3582            output Tensor.
3583
3584    Returns:
3585        Tensor or scalar. This is a scalar if `x` is a scalar.
3586
3587    Supported Platforms:
3588        ``Ascend`` ``GPU`` ``CPU``
3589
3590    Examples:
3591        >>> import mindspore.numpy as np
3592        >>> x = np.arange(5).astype('float32')
3593        >>> print(np.arctan(x))
3594        [0.        0.7853982 1.1071488 1.2490457 1.3258177]
3595    """
3596    x = _cast_type_for_trigonometric(x)
3597    return _apply_tensor_op(F.atan, x, dtype=dtype)
3598
3599
3600def sinh(x, dtype=None):
3601    """
3602    Hyperbolic sine, element-wise.
3603
3604    Note:
3605        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
3606        not supported.
3607
3608    Args:
3609        x (Tensor): Input tensor.
3610        dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
3611            output Tensor.
3612
3613    Returns:
3614        Tensor or scalar. This is a scalar if `x` is a scalar.
3615
3616    Supported Platforms:
3617        ``Ascend`` ``CPU``
3618
3619    Examples:
3620        >>> import mindspore.numpy as np
3621        >>> x = np.arange(5).astype('float32')
3622        >>> print(np.sinh(x))
3623        [ 0.         1.1752012  3.6268604 10.017875  27.289917 ]
3624    """
3625    x = _cast_type_for_trigonometric(x)
3626    return _apply_tensor_op(F.sinh, x, dtype=dtype)
3627
3628
3629def cosh(x, dtype=None):
3630    """
3631    Hyperbolic cosine, element-wise.
3632
3633    Note:
3634        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
3635        not supported.
3636
3637    Args:
3638        x (Tensor): Input tensor.
3639        dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
3640            output Tensor.
3641
3642    Returns:
3643        Tensor or scalar. This is a scalar if `x` is a scalar.
3644
3645    Supported Platforms:
3646        ``Ascend`` ``CPU``
3647
3648    Examples:
3649        >>> import mindspore.numpy as np
3650        >>> x = np.arange(5).astype('float32')
3651        >>> print(np.cosh(x))
3652        [ 1.         1.5430807  3.7621956 10.067662  27.308233 ]
3653    """
3654    x = _cast_type_for_trigonometric(x)
3655    return _apply_tensor_op(F.cosh, x, dtype=dtype)
3656
3657
3658def tanh(x, dtype=None):
3659    """
3660    Computes hyperbolic tangent element-wise.
3661
3662    Note:
3663        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
3664        not supported.
3665
3666    Args:
3667        x (Tensor): Input tensor.
3668        dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
3669            output Tensor.
3670
3671    Returns:
3672        Tensor or scalar. This is a scalar if `x` is a scalar.
3673
3674    Supported Platforms:
3675        ``Ascend`` ``GPU`` ``CPU``
3676
3677    Examples:
3678        >>> import mindspore.numpy as np
3679        >>> x = np.arange(5).astype('float32')
3680        >>> print(np.tanh(x))
3681        [0.        0.7615942 0.9640276 0.9950548 0.9993293]
3682    """
3683    x = _cast_type_for_trigonometric(x)
3684    return _apply_tensor_op(F.tanh, x, dtype=dtype)
3685
3686
3687def arcsinh(x, dtype=None):
3688    """
3689    Inverse hyperbolic sine element-wise.
3690
3691    Note:
3692        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
3693        not supported.
3694
3695    Args:
3696        x (Tensor): Input tensor.
3697        dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
3698            output Tensor.
3699
3700    Returns:
3701        Tensor or scalar. This is a scalar if `x` is a scalar.
3702
3703    Supported Platforms:
3704        ``Ascend`` ``GPU`` ``CPU``
3705
3706    Examples:
3707        >>> import mindspore.numpy as np
3708        >>> x = np.arange(5).astype('float32')
3709        >>> print(np.arcsinh(x))
3710        [0.        0.8813736 1.4436355 1.8184465 2.0947125]
3711    """
3712    x = _cast_type_for_trigonometric(x)
3713    return _apply_tensor_op(F.asinh, x, dtype=dtype)
3714
3715
3716def arccosh(x, dtype=None):
3717    """
3718    Inverse hyperbolic cosine, element-wise.
3719
3720    Note:
3721        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
3722        not supported.
3723
3724    Args:
3725        x (Tensor): Input tensor.
3726        dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
3727            output Tensor.
3728
3729    Returns:
3730        Tensor or scalar. This is a scalar if `x` is a scalar.
3731
3732    Supported Platforms:
3733        ``Ascend`` ``GPU`` ``CPU``
3734
3735    Examples:
3736        >>> import mindspore.numpy as np
3737        >>> x = np.arange(1, 5).astype('float32')
3738        >>> print(np.arccosh(x))
3739        [0.        1.316958  1.7627472 2.063437 ]
3740    """
3741    x = _cast_type_for_trigonometric(x)
3742    return _apply_tensor_op(F.acosh, x, dtype=dtype)
3743
3744
3745def arctanh(x, dtype=None):
3746    """
3747    Inverse hyperbolic tangent element-wise.
3748
3749    Note:
3750        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
3751        not supported.
3752
3753    Args:
3754        x (Tensor): Input tensor.
3755        dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
3756            output Tensor.
3757
3758    Returns:
3759        Tensor or scalar. This is a scalar if `x` is a scalar.
3760
3761    Supported Platforms:
3762        ``Ascend`` ``CPU``
3763
3764    Examples:
3765        >>> import mindspore.numpy as np
3766        >>> x = np.array([-0.99, -0.75, -0.5, 0, 0.5]).astype('float32')
3767        >>> print(np.arctanh(x))
3768        [-2.646653   -0.97295505 -0.54930615  0.          0.54930615]
3769    """
3770    x = _cast_type_for_trigonometric(x)
3771    return _apply_tensor_op(F.atanh, x, dtype=dtype)
3772
3773
3774def arctan2(x1, x2, dtype=None):
3775    """
3776    Element-wise arc tangent of :math:`x1/x2` choosing the quadrant correctly.
3777
3778    Note:
3779        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
3780        not supported.
3781
3782    Args:
3783        x1 (Tensor): input tensor.
3784        x2 (Tensor): input tensor.
3785        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
3786            output Tensor.
3787
3788    Returns:
3789        Tensor or scalar, the sum of `x1` and `x2`, element-wise. This is a scalar
3790        if both `x1` and `x2` are scalars.
3791
3792    Supported Platforms:
3793        ``Ascend`` ``CPU`` ``GPU``
3794
3795    Examples:
3796        >>> import mindspore.numpy as np
3797        >>> x1 = np.array([-1, +1, +1, -1])
3798        >>> x2 = np.array([-1, -1, +1, +1])
3799        >>> output = np.arctan2(x1, x2)
3800        >>> print(output)
3801        [-2.3561945   2.3561945   0.78539819 -0.78539819]
3802    """
3803    x1 = _cast_type_for_trigonometric(x1)
3804    x2 = _cast_type_for_trigonometric(x2)
3805    return _apply_tensor_op(F.atan2, x1, x2, dtype=dtype)
3806
3807
3808def promote_types(type1, type2):
3809    """
3810    Returns the data type with the smallest size and smallest scalar kind.
3811
3812    Note:
3813        The promotion rule is slightly different from original Numpy, but more like
3814        jax, due to the preference on ``32-bit`` over ``64-bit`` data types.
3815
3816    Args:
3817        type1 (Union[:class:`mindspore.dtype`, str]): First data type.
3818        type2 (Union[:class:`mindspore.dtype`, str]): Second data type.
3819
3820    Returns:
3821        The promoted data type.
3822
3823    Raises:
3824        TypeError: if the input are not valid :class:`mindspore.dtype` input.
3825
3826    Supported Platforms:
3827        ``Ascend`` ``GPU`` ``CPU``
3828
3829    Examples:
3830        >>> import mindspore.numpy as np
3831        >>> output = np.promote_types(np.float32, np.float64)
3832        >>> print(output)
3833        Float64
3834    """
3835    type1 = _check_dtype(type1)
3836    type2 = _check_dtype(type2)
3837    return _promote(type1, type2)
3838
3839
3840def corrcoef(x, y=None, rowvar=True, dtype=None):
3841    r"""
3842    Returns Pearson product-moment correlation coefficients.
3843
3844    Please refer to the documentation for cov for more detail. The relationship
3845    between the correlation coefficient matrix, R, and the covariance matrix, C, is
3846    :math:`R_{ij} = \frac{ C_{ij} } { \sqrt{ C_{ii} * C_{jj} } }`
3847    The values of R are between -1 and 1, inclusive.
3848
3849    Note:
3850        Currently, complex numbers are not supported.
3851
3852    Args:
3853        x (Union[int, float, bool, tuple, list, Tensor]): A 1-D or 2-D array containing
3854            multiple variables and observations. Each row of `x` represents a variable,
3855            and each column a single observation of all those variables. Also see rowvar below.
3856        y (Union[int, float, bool, tuple, list, Tensor], optional): An additional set
3857            of variables and observations.
3858        rowvar (bool, optional): If rowvar is `True` (default), then each row represents
3859            a variable, with observations in the columns. Otherwise, the relationship
3860            is transposed: each column represents a variable, while the rows contain observations.
3861        dtype (:class:`mindspore.dtype`, optional): Data-type of the result. By default,
3862            the return data-type will have at least float32 precision.
3863
3864    Returns:
3865        Tensor, The correlation coefficient matrix of the variables.
3866
3867    Raises:
3868        TypeError: if the inputs have types not specified above.
3869        ValueError: if `x` and `y` have wrong dimensions.
3870
3871    Supported Platforms:
3872        ``Ascend`` ``GPU`` ``CPU``
3873
3874    Examples:
3875        >>> import mindspore.numpy as np
3876        >>> output = np.corrcoef([[2., 3., 4., 5.], [0., 2., 3., 4.], [7., 8., 9., 10.]])
3877        >>> print(output)
3878        [[1.         0.9827076  1.        ]
3879        [0.9827077  0.99999994 0.9827077 ]
3880        [1.         0.9827076  1.        ]]
3881    """
3882    # This implementation was adapted from original Numpy.
3883    c = cov(x, y, rowvar)
3884    if not c.shape:
3885        return F.tensor_div(c, c)
3886    d = diag(c)
3887    stddev = sqrt(d)
3888    c /= F.expand_dims(stddev, -1)
3889    c /= F.expand_dims(stddev, 0)
3890    c = clip(c, -1, 1)
3891    if dtype is not None:
3892        return c.astype(dtype)
3893    return c
3894
3895
3896def _slice_along_axis(f, axis, slice_start, slice_end):
3897    """
3898    Slice a tensor along a given axis, a helper function for gradient
3899
3900    Args:
3901        f (Tensor): Input Tensor.
3902        axis (int): Specified axis.
3903        slice_start (int): The start of the slice.
3904        slice_end (int): The end of the int.
3905
3906    Returns:
3907        Sliced tensor.
3908    """
3909    slice_size = slice_end - slice_start
3910    index_start = (0,) * f.ndim
3911    index_end = f.shape
3912    index_start = _tuple_setitem(index_start, axis, slice_start)
3913    index_end = _tuple_setitem(index_end, axis, slice_size)
3914    return F.tensor_slice(f, index_start, index_end)
3915
3916
3917def _gradient_along_axis(f, h, axis):
3918    """compute the gradients of `f` along a given axis, a helper function of gradient."""
3919    end = f.shape[axis]
3920    upper_edge = _slice_along_axis(f, axis, 1, 2) - _slice_along_axis(f, axis, 0, 1)
3921    lower_edge = _slice_along_axis(f, axis, end-1, end) - _slice_along_axis(f, axis, end-2, end-1)
3922    if end <= 2:
3923        a_grad = concatenate((upper_edge, lower_edge), axis)
3924    else:
3925        middle = (_slice_along_axis(f, axis, 2, end) - _slice_along_axis(f, axis, 0, end-2)) * 0.5
3926        a_grad = concatenate((upper_edge, middle, lower_edge), axis)
3927    return a_grad / h
3928
3929
3930def check_gradient_arguments(f, axis, edge_order):
3931    """check arguments for gradient"""
3932    if edge_order != 1:
3933        _raise_unimplemented_error("edge_order != 1 not implemented")
3934    if not isinstance(f, Tensor):
3935        f = asarray_const(f)
3936    if f.dtype != mstype.float64:
3937        f = f.astype(mstype.float32)
3938    if axis is None:
3939        axis = F.make_range(f.ndim)
3940    else:
3941        _check_axis_type(axis, True, True, True)
3942        axis = _canonicalize_axis(axis, f.ndim)
3943        axis = (axis,) if isinstance(axis, int) else axis
3944    return f, axis, edge_order
3945
3946
3947def gradient(f, *varargs, axis=None, edge_order=1):
3948    """
3949    Returns the gradient of a N-dimensional array.
3950    The gradient is computed using second order accurate central differences
3951    in the interior points and either first or second order accurate one-sides
3952    (forward or backwards) differences at the boundaries.
3953    The returned gradient hence has the same shape as the input array.
3954
3955    Note:
3956        Currently we only support `edge_order`=1 and uniform spacing of `varargs`.
3957
3958    Args:
3959        f (Union[tuple, list, Tensor]): An N-dimensional array containing samples of
3960            a scalar function.
3961        varargs (Union[tuple[number], tuple[tensor scalar]], optional)
3962            Spacing between f values. Default unitary spacing for all dimensions.
3963            Spacing can be specified using:
3964            1. single scalar to specify a sample distance for all dimensions.
3965            2. N scalars to specify a constant sample distance for each dimension.
3966        edge_order (int): Gradient is calculated using N-th order accurate differences
3967            at the boundaries. Default: 1.
3968        axis (Union[None, int, tuple(int), list(int)], optional): Gradient is calculated
3969            only along the given axis or axes. The default :class:`(axis = None)` is to calculate
3970            the gradient for all the axes of the input tensor. `axis` may be negative,
3971            in which case it counts from the last to the first `axis`.
3972
3973    Returns:
3974        gradient, a list of tensors (or a single tensor if there is only one dimension
3975        to be calculated). Each derivative has the same shape as f.
3976
3977    Raises:
3978        TypeError: if the inputs have types not specified above.
3979        ValueError: if `axis` values out of bounds, or shape of `f` has entries < 1.
3980        NotImplementedError: if `edge_order` != 1, or `varargs` contains non-scalar entries.
3981
3982    Supported Platforms:
3983        ``Ascend`` ``GPU`` ``CPU``
3984
3985    Examples:
3986        >>> import mindspore.numpy as np
3987        >>> output = np.gradient([[1, 2, 6], [3, 4, 5]], axis=-1)
3988        >>> print(output)
3989        [[1.  2.5 4. ]
3990        [1.  1.  1. ]]
3991    """
3992    # This implementation was adapted from Numpy and jax.numpy
3993    f, axis, edge_order = check_gradient_arguments(f, axis, edge_order)
3994
3995    len_axes = len(axis)
3996    n = len(varargs)
3997    dx = None
3998    # check varargs and make varags the same length as axis
3999    if n == 0 or varargs is None:
4000        # no spacing
4001        dx = (1,) * len_axes
4002    elif n == 1:
4003        # single value for all axes
4004        dx = varargs * len_axes
4005    elif n == len_axes:
4006        dx = varargs
4007    else:
4008        _raise_type_error("Invalid number of arguments")
4009
4010    a_grad = []
4011
4012    for idx in F.make_range(len_axes):
4013        h = dx[idx]
4014        ax = axis[idx]
4015        if f.shape[ax] < 2:
4016            _raise_value_error("Shape of array too small to calculate a numerical gradient, "
4017                               "at least 2 elements are required.")
4018        # if h is not scalar
4019        if not (isinstance(h, (int, float, bool)) or (isinstance(h, Tensor) and h.ndim == 0)):
4020            _raise_unimplemented_error("Non-constant spacing not implemented")
4021
4022        a_grad.append(_gradient_along_axis(f, h, ax))
4023
4024    if len(axis) == 1:
4025        return a_grad[0]
4026
4027    return a_grad
4028
4029
4030def sum_(a, axis=None, dtype=None, keepdims=False, initial=None):
4031    """
4032    Returns sum of array elements over a given axis.
4033
4034    Note:
4035        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and
4036        `extobj` are not supported.
4037
4038    Args:
4039        x (Union[int, float, bool, list, tuple, Tensor]): Elements to sum.
4040        axis (Union[None, int, tuple(int)]): Axis or axes along which a sum is performed. Default: None.
4041            If None, sum all of the elements of the input array.
4042            If axis is negative it counts from the last to the first axis.
4043            If axis is a tuple of integers, a sum is performed on all of the axes specified in the tuple
4044            instead of a single axis or all the axes as before.
4045        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
4046            output Tensor.
4047        keepdims (bool): If this is set to True, the axes which are reduced are left in the result as
4048            dimensions with size one. With this option, the result will broadcast correctly against the input array.
4049            If the default value is passed, then keepdims will not be passed through to the sum method of
4050            sub-classes of ndarray, however any non-default value will be. If the sub-class’ method does not
4051            implement keepdims any exceptions will be raised. Default: `False`.
4052        initial (scalar): Starting value for the sum, if `None`, which refers to the first element of the reduction.
4053            Default: `None`.
4054
4055    Returns:
4056        Tensor. An array with the same shape as a, with the specified axis removed.
4057        If a is a 0-d array, or if axis is None, a scalar is returned.
4058        If an output array is specified, a reference to out is returned.
4059
4060    Raises:
4061        TypeError: If input is not array_like or `axis` is not int or tuple of integers or
4062            `keepdims` is not integer or `initial` is not scalar.
4063        ValueError: If any axis is out of range or duplicate axes exist.
4064
4065    Supported Platforms:
4066        ``Ascend`` ``GPU`` ``CPU``
4067
4068    Examples:
4069        >>> import mindspore.numpy as np
4070        >>> print(np.sum([0.5, 1.5]))
4071        2.0
4072        >>> x = np.arange(10).reshape(2, 5).astype('float32')
4073        >>> print(np.sum(x, axis=1))
4074        [10. 35.]
4075    """
4076    a = _to_tensor(a)
4077    return a.sum(axis, dtype, keepdims, initial)
4078
4079
4080@constexpr
4081def _min_cost_chain_matmul(dims):
4082    """
4083    Returns indices of splits that has the minimal cost for matmul.
4084    s[i, j] holds the index of the split with minimal cost for arrays[i, i + 1, ... j]
4085    """
4086    dims = tuple(dims)
4087    n = len(dims) - 1
4088    m = [[0]*n for _ in range(n)]
4089    s = [[0]*n for _ in range(n)]
4090    for pos in range(1, n):
4091        for i in range(n - pos):
4092            j = i + pos
4093            m[i][j] = sys.maxsize
4094            for k in range(i, j):
4095                cost = m[i][k] + m[k + 1][j] + dims[i]*dims[k + 1]*dims[j + 1]
4096                if cost < m[i][j]:
4097                    m[i][j] = cost
4098                    s[i][j] = k
4099    return s
4100
4101
4102@constexpr
4103def _get_dims(shapes):
4104    """
4105    Returns the chain of the dimensions in arrays.
4106    dims[i] == arrays[i - 1].shape[1] == arrays[i].shape[0]
4107    """
4108    shapes = tuple(shapes)
4109    if any(len(shape) != 2 for shape in shapes):
4110        raise ValueError('Array must be 2 dimensional')
4111    dims = tuple(map(operator.itemgetter(0), shapes))
4112    if any(shape[1] != dim for shape, dim in zip(shapes[:-1], dims[1:])):
4113        raise ValueError(f'shapes not aligned')
4114    return dims + (shapes[-1][1],)
4115
4116
4117def _multi_dot(arrays, i, j, order):
4118    """Computes multi dot recursively using minimal cost."""
4119    if i == j:
4120        return arrays[i]
4121    return dot(_multi_dot(arrays, i, order[i][j], order),
4122               _multi_dot(arrays, order[i][j] + 1, j, order))
4123
4124
4125def multi_dot(arrays):
4126    """
4127    Computes the dot product of two or more arrays in a single function call, while automatically
4128    selecting the fastest evaluation order.
4129    multi_dot chains numpy.dot and uses optimal parenthesization of the matrices
4130    `[1] <en.wikipedia.org/wiki/Matrix_chain_multiplication>`. Depending on the shapes of the
4131    matrices, this can speed up the multiplication a lot.
4132    If the first argument is 1-D it is treated as a row vector. If the last argument is 1-D it
4133    is treated as a column vector. The other arguments must be 2-D.
4134
4135    Note:
4136        Numpy argument `out` is not supported.
4137
4138    Args:
4139        arrays (sequence of array_like): If the first argument is 1-D it is treated as row
4140            vector. If the last argument is 1-D it is treated as column vector. The other
4141            arguments must be 2-D.
4142
4143    Returns:
4144        Tensor, the dot product of the supplied arrays.
4145
4146    Raises:
4147        ValueError: arrays are not 2-D.
4148
4149    Supported Platforms:
4150        ``Ascend`` ``GPU`` ``CPU``
4151
4152    Examples:
4153        >>> import mindspore.numpy as np
4154        >>> A = np.ones((10000, 100))
4155        >>> B = np.ones((100, 1000))
4156        >>> C = np.ones((1000, 5))
4157        >>> D = np.ones((5, 333))
4158        >>> output = np.multi_dot([A, B, C, D])
4159        >>> print(output)
4160        [[500000. 500000. 500000. ... 500000. 500000. 500000.]
4161        [500000. 500000. 500000. ... 500000. 500000. 500000.]
4162        [500000. 500000. 500000. ... 500000. 500000. 500000.]
4163        ...
4164        [500000. 500000. 500000. ... 500000. 500000. 500000.]
4165        [500000. 500000. 500000. ... 500000. 500000. 500000.]
4166        [500000. 500000. 500000. ... 500000. 500000. 500000.]]
4167    """
4168    if len(arrays) < 2:
4169        _raise_value_error('Expecting at least 2 arrays')
4170    if isinstance(arrays, (tuple, list)):
4171        arrays = _to_tensor(*arrays)
4172    else:
4173        arrays = _to_tensor(arrays)
4174        num = len(arrays)
4175        arrays = F.reshape(arrays, (-1,) + _tuple_slice(F.shape(arrays), 2, None))
4176        arrays = split(arrays, num)
4177    if len(arrays) == 2:
4178        return dot(*arrays)
4179
4180    shape_out = ()
4181    arrs = []
4182    for arr in arrays:
4183        arrs.append(arr)
4184
4185    if F.rank(arrs[0]) == 1:
4186        arrs[0] = F.reshape(arrs[0], (1, arrs[0].size))
4187    else:
4188        shape_out += (F.shape(arrs[0])[0],)
4189    if F.rank(arrs[-1]) == 1:
4190        arrs[-1] = F.reshape(arrs[-1], (arrs[-1].size, 1))
4191    else:
4192        shape_out += (F.shape(arrs[-1])[1],)
4193
4194    shapes = []
4195    for arr in arrs:
4196        shapes.append(F.shape(arr))
4197    dims = _get_dims(shapes)
4198    order = _min_cost_chain_matmul(dims)
4199    res = _multi_dot(arrs, 0, len(arrs) - 1, order)
4200    return F.reshape(res, shape_out)
4201
4202
4203def argmax(a, axis=None):
4204    """
4205    Returns the indices of the maximum values along an axis.
4206
4207    Note:
4208        Numpy argument `out` is not supported.
4209        On Ascend, in case of multiple occurrences of the maximum values, the return
4210        indices may not necessarily correspond to the first occurrence.
4211
4212    Args:
4213        a (Union[int, float, bool, list, tuple, Tensor]): Input array.
4214        axis (int, optional): By default, the index is into
4215            the flattened array, otherwise along the specified axis.
4216
4217    Returns:
4218        Tensor, array of indices into the array. It has the same
4219        shape as a.shape with the dimension along axis removed.
4220
4221    Raises:
4222        ValueError: if axis is out of range.
4223
4224    Supported Platforms:
4225        ``Ascend`` ``GPU`` ``CPU``
4226
4227    Examples:
4228        >>> import mindspore.numpy as np
4229        >>> a = np.arange(10, 16).reshape(2, 3)
4230        >>> print(np.argmax(a))
4231        5
4232        >>> print(np.argmax(a, axis=0))
4233        [1 1 1]
4234        >>> print(np.argmax(a, axis=1))
4235        [2 2]
4236    """
4237    a = _to_tensor(a)
4238    return a.argmax(axis)
4239
4240
4241def argmin(a, axis=None):
4242    """
4243    Returns the indices of the minimum values along an axis.
4244
4245    Note:
4246        Numpy argument `out` is not supported.
4247
4248    Args:
4249        a (Union[int, float, bool, list, tuple, Tensor]): Input array.
4250        axis (int, optional): By default, the index is into
4251            the flattened array, otherwise along the specified axis.
4252
4253    Returns:
4254        Tensor, array of indices into the array. It has the same
4255        shape as a.shape with the dimension along axis removed.
4256
4257    Raises:
4258        ValueError: if axis is out of range.
4259
4260    Supported Platforms:
4261        ``Ascend`` ``GPU`` ``CPU``
4262
4263    Examples:
4264        >>> import mindspore.numpy as np
4265        >>> a = np.arange(10, 16).reshape(2, 3)
4266        >>> print(np.argmin(a))
4267        0
4268        >>> print(np.argmin(a, axis=0))
4269        [0 0 0]
4270        >>> print(np.argmin(a, axis=1))
4271        [0 0]
4272    """
4273    a = _to_tensor(a)
4274    return a.argmin(axis)
4275
4276
4277@constexpr
4278def _get_sort_range(size):
4279    """Returns the range for number of searches (log2(size)) on a sorted array with the given size."""
4280    return tuple(range(ceil(log2(_to_tensor(size + 1).astype(mstype.float32))).astype(mstype.int32)))
4281
4282
4283def searchsorted(a, v, side='left', sorter=None):
4284    """
4285    Finds indices where elements should be inserted to maintain order.
4286    Finds the indices into a sorted array `a` such that, if the corresponding elements
4287    in `v` were inserted before the indices, the order of `a` would be preserved.
4288
4289    Args:
4290        a (Union[list, tuple, Tensor]): 1-D input array. If `sorter` is
4291            None, then it must be sorted in ascending order, otherwise `sorter` must be
4292            an array of indices that sort it.
4293        v (Union[int, float, bool, list, tuple, Tensor]): Values to insert into `a`.
4294        side ('left', 'right', optional): If ‘left’, the index of the first suitable
4295            location found is given. If ‘right’, return the last such index. If there is
4296            no suitable index, return either 0 or N (where N is the length of `a`).
4297        sorter (Union[int, float, bool, list, tuple, Tensor]): 1-D optional array of
4298            integer indices that sort array `a` into ascending order. They are typically
4299            the result of argsort.
4300
4301    Returns:
4302        Tensor, array of insertion points with the same shape as `v`.
4303
4304    Raises:
4305        ValueError: if argument for `side` or `sorter` is invalid.
4306
4307    Supported Platforms:
4308        ``Ascend`` ``GPU`` ``CPU``
4309
4310    Examples:
4311        >>> from mindspore import numpy as np
4312        >>> print(np.searchsorted([1,2,3,4,5], 3))
4313        2
4314        >>> print(np.searchsorted([1,2,3,4,5], 3, side='right'))
4315        3
4316        >>> print(np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3]))
4317        [0 5 1 2]
4318    """
4319    if side not in ('left', 'right'):
4320        _raise_value_error('invalid value for keyword "side"')
4321    a = _to_tensor(a).astype(mstype.float32)
4322    if F.rank(a) != 1:
4323        _raise_value_error('`a` should be 1-D array')
4324    v = _to_tensor(v)
4325    shape = F.shape(v)
4326    if sorter is not None:
4327        if F.rank(sorter) != 1 or sorter.size != a.size:
4328            _raise_value_error('sorter must be 1-D array with the same size as `a`')
4329        sorter = _to_tensor(sorter)
4330        sorter = F.expand_dims(sorter, -1)
4331        a = F.gather_nd(a, sorter)
4332    less_op = F.tensor_le if side == 'left' else F.tensor_lt
4333    i = F.fill(mstype.int32, shape, 0)
4334    j = F.fill(mstype.int32, shape, a.size)
4335    two = F.fill(mstype.int32, shape, 2)
4336
4337    for _ in _get_sort_range(a.size):
4338        mid = floor_divide(add(i, j), two)
4339        mask = less_op(v, F.gather_nd(a, F.expand_dims(mid, -1)))
4340        i = F.select(mask, i, mid)
4341        j = F.select(mask, mid, j)
4342    return j
4343
4344
4345def interp(x, xp, fp, left=None, right=None):
4346    """
4347    One-dimensional linear interpolation for monotonically increasing sample points.
4348    Returns the one-dimensional piecewise linear interpolant to a function with given
4349    discrete data points `(xp, fp)`, evaluated at `x`.
4350
4351    Note:
4352        Numpy argument `period` is not supported.
4353        Complex values are not supported.
4354
4355    Args:
4356        x (Union[int, float, bool, list, tuple, Tensor]): The x-coordinates at which
4357            to evaluate the interpolated values.
4358        xp (Union[int, float, bool, list, tuple, Tensor]): 1-D sequence of floats, the
4359            x-coordinates of the data points, must be increasing.
4360        fp (Union[int, float, bool, list, tuple, Tensor]): 1-D sequence of floats, the
4361            y-coordinates of the data points, same length as `xp`.
4362        left (float, optional): Value to return for ``x < xp[0]``, default is ``fp[0]``
4363            once obtained.
4364        right (float, optional): Value to return for ``x > xp[-1]``, default is ``fp[-1]``
4365            once obtained.
4366
4367    Returns:
4368        Tensor, the interpolated values, same shape as `x`.
4369
4370    Raises:
4371        ValueError: if `xp` or `fp` is not one-dimensional, or if `xp` and `fp` do not have
4372            the same length.
4373
4374    Supported Platforms:
4375        ``Ascend`` ``GPU`` ``CPU``
4376
4377    Examples:
4378        >>> xp = [1, 2, 3]
4379        >>> fp = [3, 2, 0]
4380        >>> print(np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp))
4381        [3.         3.         2.5        0.55999994 0.        ]
4382        >>> UNDEF = -99.0
4383        >>> print(np.interp(3.14, xp, fp, right=UNDEF))
4384        -99.0
4385    """
4386    # implement period once sort is supported
4387    x, xp, fp = _to_tensor(x, xp, fp)
4388    if F.rank(xp) != 1 or F.rank(fp) != 1:
4389        _raise_value_error('xp and fp must be 1-d sequences')
4390    size = xp.size
4391    if fp.size != size:
4392        _raise_value_error('the y-coordinates must have the same length as `xp`')
4393
4394    xp = xp.astype(mstype.float32)
4395    fp = fp.astype(mstype.float32)
4396
4397    indices_1 = clip(searchsorted(xp, x), 0, size - 1)
4398    indices_0 = clip(indices_1 - _to_tensor(1), 0, size - 1)
4399    indices_0 = F.expand_dims(indices_0, -1)
4400    indices_1 = F.expand_dims(indices_1, -1)
4401    x_0 = F.gather_nd(xp, indices_0)
4402    x_1 = F.gather_nd(xp, indices_1)
4403    y_0 = F.gather_nd(fp, indices_0)
4404    y_1 = F.gather_nd(fp, indices_1)
4405    res = (y_0*(x_1 - x) + y_1*(x - x_0))/(x_1 - x_0)
4406    res = F.select(F.equal(x_0, x_1), y_0, res)
4407
4408    idx_0 = _to_tensor([0])
4409    idx_last = _to_tensor([size - 1])
4410    if left is None:
4411        left = F.gather_nd(fp, idx_0)
4412    left = full(F.shape(x), left, mstype.float32)
4413    if right is None:
4414        right = F.gather_nd(fp, idx_last)
4415    right = full(F.shape(x), right, mstype.float32)
4416    res = F.select(F.tensor_lt(x, F.gather_nd(xp, idx_0)), left, res)
4417    res = F.select(F.tensor_gt(x, F.gather_nd(xp, idx_last)), right, res)
4418    return res
4419
4420
4421def _apply_tensor_op(fn, *args, dtype=None):
4422    """Applies tensor operations based on fn"""
4423    args = _to_tensor(*args)
4424    if isinstance(args, Tensor):
4425        res = fn(args)
4426    else:
4427        res = fn(*args)
4428    if dtype is not None and not _check_same_type(F.dtype(res), dtype):
4429        res = F.cast(res, dtype)
4430    return res
4431
4432
4433def sign(x, dtype=None):
4434    """
4435    Returns an element-wise indication of the sign of a number.
4436
4437    The sign function returns `-1 if x < 0, 0 if x == 0, 1 if x > 0`. nan is returned for nan inputs.
4438
4439    Note:
4440        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
4441        not supported.
4442        Complex inputs are not supported now.
4443        On Ascend, integer inputs are not supported.
4444
4445    Args:
4446        x (Union[int, float, list, tuple, Tensor]): Input values.
4447        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
4448            output Tensor.
4449
4450    Returns:
4451        The sign of x. This is a tensor or a scalar when x is a scalar.
4452
4453    Raises:
4454        TypeError: if dtype of the input is not in the given types or
4455            the input can not be converted to tensor.
4456
4457    Supported Platforms:
4458        ``Ascend`` ``GPU`` ``CPU``
4459
4460    Examples:
4461        >>> import mindspore.numpy as np
4462        >>> output = np.sign(np.array([-1., 0., 1., 1.2]))
4463        >>> print(output)
4464        [-1.  0.  1.  1.]
4465    """
4466    if not isinstance(x, (int, float, list, tuple, Tensor)):
4467        _raise_type_error('integer, float, list, tuple or Tensor are expected, but got', x)
4468    x = _to_tensor(x)
4469    if _check_same_type(F.dtype(x), mstype.bool_):
4470        _raise_type_error("sign does not accept dtype bool.")
4471
4472    _non_zero_sign = x / absolute(x)
4473    _zero = _broadcast_to_shape(_make_tensor(0, x.dtype), x.shape)
4474    is_zero = F.equal(x, 0)
4475    res = F.select(is_zero, _zero, _non_zero_sign)
4476
4477    if dtype is not None and not _check_same_type(F.dtype(res), dtype):
4478        res = F.cast(res, dtype)
4479    return res
4480
4481
4482def copysign(x1, x2, dtype=None):
4483    """
4484    Changes the sign of `x1` to that of `x2`, element-wise.
4485
4486    If `x2` is a scalar, its sign will be copied to all elements of `x1`.
4487
4488    Note:
4489        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
4490        not supported.
4491        Complex inputs are not supported now.
4492
4493    Args:
4494        x1 (Union[int, float, list, tuple, Tensor]): Values to change the sign of.
4495        x2 (Union[int, float, list, tuple, Tensor]): The sign of x2 is copied to x1. If `x1.shape != x2.shape`,
4496            they must be broadcastable to a common shape (which becomes the shape of the output).
4497        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
4498            output Tensor.
4499
4500    Returns:
4501        Tensor or scalar. The values of `x1` with the sign of `x2`. This is a scalar if both `x1` and `x2` are scalars.
4502
4503    Raises:
4504        TypeError: if dtype of the input is not in the given types or
4505            the input can not be converted to tensor.
4506
4507    Supported Platforms:
4508        ``Ascend`` ``GPU`` ``CPU``
4509
4510    Examples:
4511        >>> import mindspore.numpy as np
4512        >>> output = np.copysign(np.array([1, -1, -1]), np.array([-1, 1, -1]))
4513        >>> print(output)
4514        [-1  1 -1]
4515    """
4516    if not isinstance(x1, (int, float, list, tuple, Tensor)):
4517        _raise_type_error('integer, float, list, tuple or Tensor are expected, but got', x1)
4518    if not isinstance(x2, (int, float, list, tuple, Tensor)):
4519        _raise_type_error('integer, float, list, tuple or Tensor are expected, but got', x2)
4520    x1, x2 = _to_tensor(x1, x2)
4521    shape_out = _infer_out_shape(F.shape(x1), F.shape(x2))
4522    x1 = _broadcast_to_shape(x1, shape_out)
4523    x2 = _broadcast_to_shape(x2, shape_out)
4524    if _check_same_type(F.dtype(x1), mstype.bool_) or _check_same_type(F.dtype(x2), mstype.bool_):
4525        _raise_type_error("sign does not accept dtype bool.")
4526
4527    original_dtype = x1.dtype
4528    if not _check_is_float(original_dtype):
4529        pos_tensor = F.absolute(x1.astype('float32')).astype(original_dtype)
4530    else:
4531        pos_tensor = F.absolute(x1)
4532
4533    neg_tensor = F.neg_tensor(pos_tensor)
4534    less_zero = F.less(x2, 0)
4535    res = F.select(less_zero, neg_tensor, pos_tensor)
4536
4537    if dtype is not None and not _check_same_type(F.dtype(res), dtype):
4538        res = F.cast(res, dtype)
4539    return res
4540
4541
4542def digitize(x, bins, right=False):
4543    """
4544    Returns the indices of the bins to which each value in input array belongs.
4545    If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is returned
4546    as appropriate.
4547
4548    Args:
4549        x (Union[int, float, bool, list, tuple, Tensor]): Input array to be binned.
4550        bins (Union[list, tuple, Tensor]): Array of bins. It has to
4551            be 1-dimensional and monotonic.
4552        right (boolean, optional): Indicating whether the intervals include the right
4553            or the left bin edge. Default behavior is ``(right==False)`` indicating
4554            that the interval does not include the right edge. The left bin end is
4555            open in this case, i.e., ``bins[i-1] <= x < bins[i]`` is the default
4556            behavior for monotonically increasing bins.
4557
4558    Returns:
4559        Tensor of ints, output array of indices, of same shape as `x`.
4560
4561    Supported Platforms:
4562        ``Ascend`` ``GPU`` ``CPU``
4563
4564    Examples:
4565        >>> import mindspore.numpy as np
4566        >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.])
4567        >>> bins = np.array([0, 5, 10, 15, 20])
4568        >>> inds = np.digitize(x, bins)
4569        >>> print(inds)
4570        [1 3 3 4 5]
4571    """
4572    x, bins = _to_tensor(x, bins)
4573    if F.rank(bins) != 1:
4574        _raise_value_error('bins should be 1-dimensional')
4575    if x.size == 0:
4576        return x
4577    if bins.size == 0:
4578        return zeros(F.shape(x), mstype.int32)
4579    side = 'left' if right else 'right'
4580    first_bin = bins[0]
4581    last_bin = bins[_type_convert(int, bins.size) - 1]
4582    cond = first_bin <= last_bin
4583    incr = searchsorted(bins, x, side)
4584    decr = _to_tensor(bins.size) - searchsorted(flip(bins), x, side)
4585    return where_(cond, incr, decr)
4586
4587
4588def bincount(x, weights=None, minlength=0, length=None):
4589    """
4590    Count number of occurrences of each value in array of non-negative ints.
4591    The number of bins (of size 1) is one larger than the largest value in `x`.
4592    If `minlength` is specified, there will be at least this number of bins in the
4593    output array (though it will be longer if necessary, depending on the contents
4594    of `x`). Each bin gives the number of occurrences of its index value in `x`. If
4595    `weights` is specified the input array is weighted by it, i.e. if a value `n`
4596    is found at position `i`, ``out[n] += weight[i]`` instead of ``out[n] += 1``.
4597
4598    Note:
4599        The additional argument `length` specifies the number of bins (overriding
4600        ``x.max() + 1``), which must be provided in graph mode.
4601        If `x` contains negative values, no error will be raised, and negative values
4602        are treated as zeros instead.
4603
4604    Args:
4605        x (Union[list, tuple, Tensor]): 1-d input array.
4606        weights (Union[int, float, bool, list, tuple, Tensor], optional): Weights,
4607            array of the same shape as `x`. Defaults to None.
4608        minlength (int, optional): A minimum number of bins for the output array.
4609            Defaults to 0.
4610        length (int, optional): Number of bins. Defaults to None.
4611
4612    Returns:
4613        Tensor, the result of binning the input array. The length of out is equal to
4614        ``np.amax(x)+1``.
4615
4616    Raises:
4617        ValueError: if `x` is not one-dimensional, or if `x` and `weights` do not have
4618            the same shape.
4619
4620    Supported Platforms:
4621        ``Ascend`` ``GPU`` ``CPU``
4622
4623    Examples:
4624        >>> import mindspore.numpy as np
4625        >>> print(np.bincount(np.arange(5)))
4626        [1 1 1 1 1]
4627        >>> print(np.bincount(np.array([0, 1, 1, 3, 2, 1, 7])))
4628        [1 3 1 1 0 0 0 1]
4629        >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
4630        >>> x = np.array([0, 1, 1, 2, 2, 2])
4631        >>> print(np.bincount(x,  weights=w))
4632        [0.3 0.7 1.1]
4633    """
4634    x = _to_tensor(x)
4635    if F.rank(x) != 1:
4636        _raise_value_error('`x` should be one-dimensional')
4637    if not _check_is_int(F.dtype(x)):
4638        _raise_type_error('`x` should be an array of ints')
4639    x = clip(x, 0, None)
4640    if length is None:
4641        if F.isconstant(x):
4642            length = int(maximum(F.reduce_max(x.astype(mstype.float32)), minlength - 1).asnumpy()) + 1
4643        else:
4644            _raise_value_error('argument `length` must be provided in graph mode')
4645    idx = arange(length).reshape(length, 1)
4646    idx_mapping = F.equal(x, idx)
4647    if weights is not None:
4648        weights = _to_tensor(weights)
4649        if F.shape(x) != F.shape(weights):
4650            _raise_value_error('`x` and `weights` must have the same length')
4651        idx_mapping *= weights
4652    return F.reduce_sum(idx_mapping.astype(mstype.float32), 1).ravel()
4653
4654
4655def histogram(a, bins=10, range=None, weights=None, density=False): # pylint: disable=redefined-builtin
4656    """
4657    Computes the histogram of a dataset.
4658
4659    Note:
4660        String values for `bins` is not supported.
4661        Deprecated numpy argument `normed` is not supported.
4662
4663    Args:
4664        a (Union[int, float, bool, list, tuple, Tensor]): Input data. The histogram
4665            is computed over the flattened array.
4666        bins (Union[int, tuple, list, Tensor], optional): If `bins` is an int, it
4667            defines the number of equal-width bins in the given range (10, by
4668            default). If `bins` is a sequence, it defines the bin edges, including
4669            the rightmost edge, allowing for non-uniform bin widths.
4670        range((float, float), optional): The lower and upper range of the bins. If
4671            not provided, `range` is simply ``(a.min(), a.max())``. Values outside
4672            the range are ignored. The first element of the range must be less than
4673            or equal to the second.
4674        weights (Union[int, float, bool, list, tuple, Tensor], optional): An array
4675            of weights, of the same shape as `a`. If density is True, the weights
4676            are normalized, so that the integral of the density over the range
4677            remains 1.
4678        density (boolean, optional): If False, the result will contain the number of
4679            samples in each bin. If True, the result is the value of the probability
4680            density function at the bin, normalized such that the integral over the
4681            range is 1. Note that the sum of the histogram values will not be equal
4682            to 1 unless bins of unity width are chosen; it is not a probability mass
4683            function.
4684
4685    Returns:
4686        (Tensor, Tensor), the values of the histogram and the bin edges.
4687
4688    Raises:
4689        ValueError: if `x` and `weights` do not have the same size.
4690
4691    Supported Platforms:
4692        ``Ascend`` ``GPU`` ``CPU``
4693
4694    Examples:
4695        >>> from mindspore import numpy as np
4696        >>> print(np.histogram([1, 2, 1], bins=[0, 1, 2, 3]))
4697        (Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00,  2.00000000e+00,  1.00000000e+00]),
4698        Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 3]))
4699        >>> print(np.histogram(np.arange(4), bins=np.arange(5), density=True))
4700        (Tensor(shape=[4], dtype=Float32, value=
4701        [ 2.50000000e-01,  2.50000000e-01,  2.50000000e-01,  2.50000000e-01]),
4702        Tensor(shape=[5], dtype=Int32, value= [0, 1, 2, 3, 4]))
4703        >>> print(np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]))
4704        (Tensor(shape=[3], dtype=Float32, value= [ 1.00000000e+00,  4.00000000e+00,  1.00000000e+00]),
4705        Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 3]))
4706    """
4707    a = _to_tensor(a)
4708    if weights is not None:
4709        weights = _to_tensor(weights)
4710        if F.shape(a) != F.shape(weights):
4711            _raise_value_error('weights should have the same shape as a')
4712        weights = weights.ravel()
4713    a = a.ravel()
4714    bin_edges = histogram_bin_edges(a, bins, range, weights)
4715    data_to_bins = searchsorted(bin_edges, a, 'right')
4716    bin_size = _type_convert(int, bin_edges.size)
4717    data_to_bins = where_(a == bin_edges[-1], _to_tensor(bin_size - 1), data_to_bins)
4718    count = bincount(data_to_bins, weights, length=bin_size)[1:]
4719    if count.size == 0:
4720        return count, bin_edges
4721    if density:
4722        count = F.cast(count, mstype.float32)
4723        count = count/diff(bin_edges)/F.reduce_sum(count)
4724    return count, bin_edges
4725
4726
4727@constexpr
4728def _factor_flattened_hist(nbin):
4729    """Returns the factor that will be applied to the histogram to be flattened."""
4730    factor = list((itertools.accumulate(nbin[1:][::-1], operator.mul)))[::-1]
4731    factor.append(1)
4732    return factor
4733
4734
4735def _get_histogramdd_count(ndim, bin_edges, sample, weights):
4736    """Returns count for histogramdd."""
4737    data_indices = []
4738    nbin = ()
4739    flattened_bin_size = 1
4740    for i in F.make_range(ndim):
4741        data_to_bins = searchsorted(bin_edges[i], sample[:, i], 'right')
4742        bin_size = _type_convert(int, bin_edges[i].size)
4743        data_to_bins = where_(sample[:, i] == bin_edges[i][-1], _to_tensor(bin_size - 1), data_to_bins)
4744        data_indices.append(data_to_bins)
4745        nbin += (bin_size + 1,)
4746        flattened_bin_size *= (bin_size + 1)
4747
4748    factor = F.reshape(_to_tensor(_factor_flattened_hist(nbin)), (ndim, 1))
4749    stacked_indices = stack(data_indices) * factor
4750    if _get_device() == 'Ascend':
4751        stacked_indices = F.cast(stacked_indices, mstype.float32)
4752    flattened_hist = F.reduce_sum(stacked_indices.astype(mstype.float32), 0)
4753    count = bincount(flattened_hist.astype(mstype.int32), weights, length=flattened_bin_size)
4754    count = F.reshape(count, nbin)
4755    slices = _list_comprehensions(ndim, F.make_slice(1, -1, 1), True)
4756    count = count[slices]
4757    return count
4758
4759
4760def histogramdd(sample, bins=10, range=None, weights=None, density=False): # pylint: disable=redefined-builtin
4761    """
4762    Computes the multidimensional histogram of some data.
4763
4764    Note:
4765        Deprecated numpy argument `normed` is not supported.
4766
4767    Args:
4768        sample (Union[list, tuple, Tensor]): The data to be histogrammed, either `(N, D)`
4769            array, or `(D, N)` array_like. Note the unusual interpretation of sample
4770            when an array_like:
4771
4772            When an array, each row is a coordinate in a `D-dimensional` space, such as
4773            ``histogramdd(np.array([p1, p2, p3]))``.
4774
4775            When an array_like, each element is the list of values for single coordinate,
4776            such as ``histogramdd((X, Y, Z))``.
4777
4778            The first form should be preferred.
4779        bins (Union[int, tuple, list], optional): The bin specification:
4780
4781            A sequence of arrays describing the monotonically increasing bin edges along
4782            each dimension.
4783
4784            The number of bins for each dimension ``(nx, ny, … =bins)``
4785
4786            The number of bins for all dimensions ``(nx=ny=…=bins)``.
4787        range(Union[list, tuple], optional): A sequence of length `D`, each an optional
4788            ``(lower, upper)`` tuple giving the outer bin edges to be used if the edges
4789            are not given explicitly in bins. An entry of None in the sequence results in
4790            the minimum and maximum values being used for the corresponding dimension.
4791            The default, None, is equivalent to passing a tuple of `D` None values.
4792        weights (Union[list, tuple, Tensor], optional): An array with shape `(N,)` of values
4793            `w_i` weighing each sample ``(x_i, y_i, z_i, …)``.
4794        density (boolean, optional): If False, the default, returns the number of samples
4795            in each bin. If True, returns the probability density function at the bin,
4796            ``bin_count / sample_count / bin_volume``.
4797
4798    Returns:
4799        (Tensor, list of Tensor), the values of the histogram and the bin edges.
4800
4801    Raises:
4802        ValueError: if `range` does not have the same size as the number of samples.
4803
4804    Supported Platforms:
4805        ``Ascend`` ``GPU`` ``CPU``
4806
4807    Examples:
4808        >>> from mindspore import numpy as np
4809        >>> sample = np.arange(15).reshape(5, 3)
4810        >>> print(sample)
4811        [[ 0  1  2]
4812        [ 3  4  5]
4813        [ 6  7  8]
4814        [ 9 10 11]
4815        [12 13 14]]
4816        >>> print(np.histogramdd(sample, bins=(2, 3, 4)))
4817        (Tensor(shape=[2, 3, 4], dtype=Float32, value=
4818        [[[ 1.00000000e+00,  1.00000000e+00,  0.00000000e+00,  0.00000000e+00],
4819        [ 0.00000000e+00,  0.00000000e+00,  0.00000000e+00,  0.00000000e+00],
4820        [ 0.00000000e+00,  0.00000000e+00,  0.00000000e+00,  0.00000000e+00]],
4821        [[ 0.00000000e+00,  0.00000000e+00,  0.00000000e+00,  0.00000000e+00],
4822        [ 0.00000000e+00,  0.00000000e+00,  1.00000000e+00,  0.00000000e+00],
4823        [ 0.00000000e+00,  0.00000000e+00,  0.00000000e+00,  2.00000000e+00]]]),
4824        [Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00,  6.00000000e+00,  1.20000000e+01]),
4825        Tensor(shape=[4], dtype=Float32, value=
4826        [ 1.00000000e+00,  5.00000000e+00,  9.00000000e+00,  1.30000000e+01]),
4827        Tensor(shape=[5], dtype=Float32, value=
4828        [ 2.00000000e+00,  5.00000000e+00,  8.00000000e+00,  1.10000000e+01,  1.40000000e+01])])
4829    """
4830    if isinstance(sample, (tuple, list)):
4831        sample = _to_tensor(*sample)
4832        sample = stack(sample, -1)
4833    elif not isinstance(sample, Tensor):
4834        _raise_type_error('sample should be (N, D) array, or (D, N) array_like')
4835    if F.rank(sample) != 2:
4836        _raise_value_error('when an array, sample should be 2-dimensional')
4837    ndim = F.shape(sample)[1]
4838
4839    if isinstance(bins, int):
4840        bins = _list_comprehensions(ndim, bins)
4841    if isinstance(bins, (tuple, list, Tensor)):
4842        if len(bins) != ndim:
4843            _raise_value_error('The dimension of bins must be equal to the dimension of the sample')
4844    else:
4845        _raise_type_error('bins should be int or sequence')
4846
4847    if range is None:
4848        range = _list_comprehensions(ndim, None, False, True)
4849    else:
4850        if len(range) != ndim:
4851            _raise_value_error('range argument must have one entry per dimension')
4852
4853    bin_edges = []
4854    dedges = []
4855    for i in F.make_range(ndim):
4856        edges = histogram_bin_edges(sample[:, i], bins[i], range[i], weights)
4857        bin_edges.append(edges)
4858        dedges.append(diff(edges))
4859
4860    count = _get_histogramdd_count(ndim, bin_edges, sample, weights)
4861
4862    if density:
4863        s = F.reduce_sum(count.astype(mstype.float32))
4864        for i in F.make_range(ndim):
4865            shape = _expanded_shape(ndim, dedges[i].size, i)
4866            count /= _to_tensor(dedges[i]).reshape(shape)
4867        count /= s
4868    return count, bin_edges
4869
4870
4871def histogram2d(x, y, bins=10, range=None, weights=None, density=False): # pylint: disable=redefined-builtin
4872    """
4873    Computes the multidimensional histogram of some data.
4874
4875    Note:
4876        Deprecated numpy argument `normed` is not supported.
4877
4878    Args:
4879        x (Union[list, tuple, Tensor]): An array with shape `(N,)` containing the x
4880            coordinates of the points to be histogrammed.
4881        y (Union[list, tuple, Tensor]): An array with shape `(N,)` containing the y
4882            coordinates of the points to be histogrammed.
4883        bins (Union[int, tuple, list], optional): The bin specification:
4884
4885            If int, the number of bins for the two dimensions ``(nx=ny=bins)``.
4886
4887            If array_like, the bin edges for the two dimensions ``(x_edges=y_edges=bins)``.
4888
4889            If [int, int], the number of bins in each dimension ``(nx, ny = bins)``.
4890
4891            If [array, array], the bin edges in each dimension ``(x_edges, y_edges = bins)``.
4892
4893            A combination [int, array] or [array, int], where int is the number of bins and
4894            array is the bin edges.
4895        range(Union[list, tuple], optional): has shape (2, 2), the leftmost and rightmost
4896            edges of the bins along each dimension (if not specified explicitly in the bins
4897            parameters): ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
4898            will be considered outliers and not tallied in the histogram.
4899        weights (Union[list, tuple, Tensor], optional): An array with shape `(N,)` of values
4900            `w_i` weighing each sample `(x_i, y_i)`.
4901        density (boolean, optional): If False, the default, returns the number of samples
4902            in each bin. If True, returns the probability density function at the bin,
4903            ``bin_count / sample_count / bin_volume``.
4904
4905    Returns:
4906        (Tensor, Tensor, Tensor), the values of the bi-directional histogram and the bin edges
4907        along the first and second dimensions.
4908
4909    Raises:
4910        ValueError: if `range` does not have the same size as the number of samples.
4911
4912    Supported Platforms:
4913        ``Ascend`` ``GPU`` ``CPU``
4914
4915    Examples:
4916        >>> from mindspore import numpy as np
4917        >>> x = np.arange(5)
4918        >>> y = np.arange(2, 7)
4919        >>> print(np.histogram2d(x, y, bins=(2, 3)))
4920        (Tensor(shape=[2, 3], dtype=Float32, value=
4921        [[ 2.00000000e+00,  0.00000000e+00,  0.00000000e+00],
4922        [ 0.00000000e+00,  1.00000000e+00,  2.00000000e+00]]),
4923        Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00,  2.00000000e+00,  4.00000000e+00]),
4924        Tensor(shape=[4], dtype=Float32, value=
4925        [ 2.00000000e+00,  3.33333349e+00,  4.66666698e+00,  6.00000000e+00]))
4926    """
4927    count, bin_edges = histogramdd((x, y), bins=bins, range=range, weights=weights, density=density)
4928    return count, bin_edges[0], bin_edges[1]
4929
4930
4931def matrix_power(a, n):
4932    """
4933    Raises a square matrix to the (integer) power `n`.
4934
4935    For positive integers `n`, the power is computed by repeated matrix squarings and
4936    matrix multiplications.
4937    If :math:`n == 0`, the identity matrix of the same shape as `M` is returned.
4938
4939    Note:
4940        Stacks of object matrices are not currently supported and
4941        :math:`n < 0` is not supported.
4942
4943    Args:
4944        a (Union[int, float, bool, list, tuple, Tensor]): Input matrix.
4945        n (int): The exponent can be any integer or long integer, positive or zero.
4946
4947    Returns:
4948        Tensor.
4949
4950    Raises:
4951        TypeError: if the input can not be converted to a tensor or
4952            the exponent is not integer.
4953        ValueError: if the input includes less than 2 dimensions or
4954            the last 2 dimensions are not square.
4955
4956    Supported Platforms:
4957        ``Ascend`` ``GPU`` ``CPU``
4958
4959    Examples:
4960        >>> from mindspore import numpy as np
4961        >>> a = np.arange(16).reshape(4, 4).astype('float32')
4962        >>> print(np.matrix_power(a, 2))
4963        [[ 56.  62.  68.  74.]
4964         [152. 174. 196. 218.]
4965         [248. 286. 324. 362.]
4966         [344. 398. 452. 506.]]
4967    """
4968    a = _to_tensor(a)
4969    if not isinstance(n, int):
4970        _raise_type_error("exponent must be an integer")
4971    if a.ndim < 2:
4972        _raise_value_error("Array must be at least two-dimensional")
4973    if a.shape[-2] != a.shape[-1]:
4974        _raise_value_error("Last 2 dimensions of the array must be square")
4975
4976    if n < 0:
4977        _raise_value_error("n < 0 is not supported now.")
4978    if n == 0:
4979        return _broadcast_to_shape(eye(a.shape[-1], a.shape[-1], dtype=a.dtype), a.shape)
4980    if n == 1:
4981        return a
4982    res = a
4983    while n > 1:
4984        res = C.matmul(res, a)
4985        n = n - 1
4986    return res
4987
4988
4989def around(a, decimals=0):
4990    """
4991    Evenly round to the given number of decimals.
4992
4993    Note:
4994        Numpy argument `out` is not supported.
4995        Complex numbers are not supported.
4996
4997    Args:
4998        a (Union[int, float, list, tuple, Tensor]): Input data.
4999        decimals (int): Number of decimal places to round to. Default: 0.
5000
5001    Returns:
5002        Tensor. A tensor of the same type as a, containing the rounded values.
5003        The result of rounding a float is a float.
5004
5005    Raises:
5006        TypeError: if the input can not be converted to a tensor or
5007            the `decimals` argument is not integer.
5008
5009    Supported Platforms:
5010        ``Ascend`` ``GPU`` ``CPU``
5011
5012    Examples:
5013        >>> import mindspore.numpy as np
5014        >>> a = np.array([-1.3, 0.0, 0.5, 1.5, 2.5])
5015        >>> print(np.around(a))
5016        [-1. 0. 0. 2. 2.]
5017    """
5018    a = _to_tensor_origin_dtype(a)
5019    if not isinstance(decimals, int):
5020        _raise_type_error("decimals must be an integer")
5021    if decimals < 0:
5022        _raise_value_error("decimals < 0 is not supported now.")
5023    if decimals == 0:
5024        return _round(a)
5025    return F.tensor_div(_round(a * 10**decimals), 10**decimals)
5026
5027
5028def _to_poly1d(x):
5029    x = atleast_1d(_to_tensor(x))
5030    if F.rank(x) > 1:
5031        _raise_value_error('input array must be scalar or 1-d sequence')
5032    return x
5033
5034
5035def polyadd(a1, a2):
5036    """
5037    Finds the sum of two polynomials.
5038    Returns the polynomial resulting from the sum of two input polynomials.
5039
5040    Note:
5041        Numpy object poly1d is currently not supported.
5042
5043    Args:
5044        a1 (Union[int, float, list, tuple, Tensor): Input polynomial.
5045        a2 (Union[int, float, list, tuple, Tensor): Input polynomial.
5046
5047    Returns:
5048        Tensor, the sum of the inputs.
5049
5050    Raises:
5051        ValueError: if the input array has more than 1 dimensions.
5052
5053    Supported Platforms:
5054        ``Ascend`` ``GPU`` ``CPU``
5055
5056    Examples:
5057        >>> import mindspore.numpy as np
5058        >>> print(np.polyadd([1, 2], [9, 5, 4]))
5059        [9 6 6]
5060    """
5061    a1 = _to_poly1d(a1)
5062    a2 = _to_poly1d(a2)
5063    diff_size = a1.size - a2.size
5064    if diff_size == 0:
5065        return add(a1, a2)
5066    if diff_size > 0:
5067        return concatenate((a1[:diff_size], add(a1[diff_size:], a2)))
5068    return concatenate((a2[:-diff_size], add(a1, a2[-diff_size:])))
5069
5070
5071def polysub(a1, a2):
5072    """
5073    Difference (subtraction) of two polynomials.
5074    Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
5075
5076    Note:
5077        Numpy object poly1d is currently not supported.
5078
5079    Args:
5080        a1 (Union[int, float, list, tuple, Tensor): Minuend polynomial.
5081        a2 (Union[int, float, list, tuple, Tensor): Subtrahend polynomial.
5082
5083    Returns:
5084        Tensor, the difference of the inputs.
5085
5086    Raises:
5087        ValueError: if the input array has more than 1 dimensions.
5088
5089    Supported Platforms:
5090        ``Ascend`` ``GPU`` ``CPU``
5091
5092    Examples:
5093        >>> import mindspore.numpy as np
5094        >>> print(np.polysub([2, 10, -2], [3, 10, -4]))
5095        [-1  0  2]
5096    """
5097    return polyadd(a1, F.neg_tensor(_to_tensor(a2)))
5098
5099
5100def polyval(p, x):
5101    """
5102    Evaluates a polynomial at specific values.
5103    If `p` is of length `N`, this function returns the value:
5104    ``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
5105    If `x` is a sequence, then ``p(x)`` is returned for each element of `x`. If `x`
5106    is another polynomial then the composite polynomial ``p(x(t))`` is returned.
5107
5108    Note:
5109        Numpy object poly1d is currently not supported.
5110
5111    Args:
5112        p (Union[int, float, bool, list, tuple, Tensor): 1D array of polynomial
5113            coefficients (including coefficients equal to zero) from highest
5114            degree to the constant term.
5115        x (Union[int, float, bool, list, tuple, Tensor): A number, an array of
5116            numbers, at which to evaluate `p`.
5117
5118    Returns:
5119        Tensor.
5120    Raises:
5121        ValueError: if `p` has more than 1 dimensions.
5122
5123    Supported Platforms:
5124        ``Ascend`` ``GPU`` ``CPU``
5125
5126    Examples:
5127        >>> import mindspore.numpy as np
5128        >>> print(np.polyval([3.,0.,1.], 5.))
5129        76.0
5130    """
5131    p = _to_poly1d(p)
5132    x = _to_tensor(x)
5133    shape = F.shape(x)
5134    exp_p = arange(_type_convert(int, p.size) - 1, -1, -1).astype(mstype.float32)
5135    var_p = (x.reshape(shape + (1,)))**exp_p
5136    return F.reduce_sum(p*var_p, -1)
5137
5138
5139def polyder(p, m=1):
5140    """
5141    Returns the derivative of the specified order of a polynomial.
5142
5143    Note:
5144        Numpy object poly1d is currently not supported.
5145
5146    Args:
5147        p (Union[int, float, bool, list, tuple, Tensor): Polynomial to differentiate.
5148            A sequence is interpreted as polynomial coefficients.
5149        m (int, optional): Defaults to 1, order of differentiation.
5150
5151    Returns:
5152        Tensor, a new polynomial representing the derivative.
5153
5154    Raises:
5155        ValueError: if `p` has more than 1 dimensions.
5156
5157    Supported Platforms:
5158        ``Ascend`` ``GPU`` ``CPU``
5159
5160    Examples:
5161        >>> import mindspore.numpy as np
5162        >>> print(np.polyder([1, 1, 1, 1]))
5163        [3 2 1]
5164    """
5165    p = _to_poly1d(p)
5166    if m < 0:
5167        _raise_value_error('Order of derivative must be positive')
5168    if m >= p.size:
5169        return _to_tensor([])
5170    for _ in range(m):
5171        coeff = _to_tensor(F.make_range(_type_convert(int, p.size) - 1, 0, -1))
5172        p = p[:-1]*coeff
5173    return p
5174
5175
5176def polymul(a1, a2):
5177    """
5178    Finds the product of two polynomials.
5179
5180    Note:
5181        Numpy object poly1d is currently not supported.
5182
5183    Args:
5184        a1 (Union[int, float, bool, list, tuple, Tensor): Input polynomial.
5185        a2 (Union[int, float, bool, list, tuple, Tensor): Input polynomial.
5186
5187    Returns:
5188        Tensor, a new polynomial representing the derivative.
5189
5190    Raises:
5191        ValueError: if the input array has more than 1 dimensions.
5192
5193    Supported Platforms:
5194        ``GPU``
5195
5196    Examples:
5197        >>> import mindspore.numpy as np
5198        >>> print(np.polymul([3, 1, 2], [2, 5]))
5199        [ 6 17  9 10]
5200    """
5201    a1 = _to_poly1d(a1)
5202    a2 = _to_poly1d(a2)
5203    return convolve(a1, a2)
5204
5205
5206def polyint(p, m=1, k=None):
5207    """
5208    Returns an antiderivative (indefinite integral) of a polynomial.
5209
5210    Note:
5211        Numpy object poly1d is currently not supported.
5212
5213    Args:
5214        p (Union[int, float, bool, list, tuple, Tensor): Polynomial to integrate. A
5215            sequence is interpreted as polynomial coefficients.
5216        m (int, optional): Defaults to 1, Order of the antiderivative.
5217        k (Union[int, list of int]y, optinoal): Integration constants. They are given
5218            in the order of integration: those corresponding to highest-order terms
5219            come first. If None (default), all constants are assumed to be zero. If
5220            ``m = 1``, a single scalar can be given instead of a list.
5221
5222    Returns:
5223        Tensor, a new polynomial representing the antiderivative.
5224
5225    Raises:
5226        ValueError: if `p` has more than 1 dimensions.
5227
5228    Supported Platforms:
5229        ``Ascend`` ``GPU`` ``CPU``
5230
5231    Examples:
5232        >>> import mindspore.numpy as np
5233        >>> print(np.polyint([1, 1, 1]))
5234        [0.33333334 0.5        1.         0.        ]
5235    """
5236    p = _to_poly1d(p)
5237    if m < 0:
5238        _raise_value_error('Order of derivative must be positive')
5239    if m == 0:
5240        return p
5241    if k is None:
5242        k = zeros(m, F.dtype(p))
5243    k = atleast_1d(_to_tensor(k))
5244    if k.size == 1:
5245        k = F.tile(k, (m,))
5246    k = F.expand_dims(k, -1)
5247    for i in range(m):
5248        coeff = _to_tensor(F.make_range(_type_convert(int, p.size), 0, -1))
5249        p = concatenate((true_divide(p, coeff), k[i]))
5250    return p
5251
5252
5253@constexpr
5254def _get_dtype(x):
5255    """Returns the dtype of x."""
5256    if isinstance(x, bool):
5257        return mstype.bool_
5258    if isinstance(x, int):
5259        return mstype.int32
5260    if isinstance(x, float):
5261        return mstype.float32
5262    if isinstance(x, typing.Number):
5263        return x
5264    if isinstance(x, str):
5265        t = dtype_map.get(x, None)
5266        if t is None:
5267            t = dtype_map.get(str(nptype(x)))
5268        return t
5269    raise TypeError('data type not understood')
5270
5271
5272def result_type(*arrays_and_dtypes):
5273    """
5274    Returns the type that results from applying the type promotion rules to the arguments.
5275
5276    Note:
5277        The promotion rule is slightly different from original Numpy, but more like
5278        jax, due to the preference on ``32-bit`` over ``64-bit`` data types.
5279        Complex dtypes are not supported.
5280
5281    Args:
5282        *arrays_and_dtypes (Union[int, float, bool, list, tuple, Tensor, :class:`mindspore.dtype`, str]):
5283            The operands of some operation whose result type is needed.
5284
5285    Returns:
5286        :class:`mindspore.dtype`, the result type.
5287
5288    Raises:
5289        TypeError: if the input is not a valid data type.
5290
5291    Supported Platforms:
5292        ``Ascend`` ``GPU`` ``CPU``
5293
5294    Examples:
5295        >>> import mindspore.numpy as np
5296        >>> print(np.result_type('i2', np.float32, True))
5297        Float32
5298    """
5299    def get_dtype(x):
5300        if isinstance(x, Tensor):
5301            return F.dtype(_to_tensor(x))
5302        return _get_dtype(x)
5303
5304    dtype_out = get_dtype(arrays_and_dtypes[0])
5305    for i in arrays_and_dtypes[1:]:
5306        dtype_out = _promote(dtype_out, get_dtype(i))
5307    return dtype_out
5308
5309
5310def unwrap(p, discont=3.141592653589793, axis=-1):
5311    """
5312    Unwraps by changing deltas between values to ``2*pi`` complement.
5313    Unwraps radian phase `p` by changing absolute jumps greater than `discont` to their
5314    `2*pi` complement along the given axis.
5315
5316    Note:
5317        For absolute jumps that are within a very close range to pi, unwrapping may be done
5318        differently than numpy due to differences in round-off.
5319
5320    Args:
5321        p (Union[int, float, bool, list, tuple, Tensor): Input array.
5322        discont (float, optional): Maximum discontinuity between values, default is pi.
5323        axis (int, optional): Axis along which unwrap will operate, default is -1.
5324
5325    Returns:
5326        Tensor.
5327
5328    Raises:
5329        ValueError: if the axis is out of range.
5330
5331    Supported Platforms:
5332        ``Ascend`` ``GPU`` ``CPU``
5333
5334    Examples:
5335        >>> import mindspore.numpy as np
5336        >>> phase = np.add(np.linspace(0, np.pi, num=5), [0, 0, 0, np.pi, np.pi])
5337        >>> print(phase)
5338        [0.        0.7853982 1.5707964 5.4977875 6.2831855]
5339        >>> print(np.unwrap(phase))
5340        [ 0.0000000e+00  7.8539819e-01  1.5707964e+00 -7.8539848e-01 -4.7683716e-07]
5341    """
5342    if not isinstance(discont, (int, float)):
5343        _raise_type_error('discont should be a float')
5344    p = _to_tensor(p)
5345    ndim = F.rank(p)
5346    axis = _check_axis_in_range(axis, ndim)
5347    dd = diff(p, axis=axis)
5348    ddmod = remainder(add(dd, pi), 2*pi) - pi
5349    ddmod = where_(F.logical_and(ddmod == -pi, dd > 0), pi, ddmod)
5350    ph_correct = ddmod - dd
5351    ph_correct = where_(absolute(dd) < discont, 0, ph_correct)
5352    slice_all = _list_comprehensions(F.rank(p), F.make_slice(None, None, None), True)
5353    slice0 = _tuple_setitem(slice_all, axis, F.make_slice(0, 1, None))
5354    slice1 = _tuple_setitem(slice_all, axis, F.make_slice(1, None, None))
5355    head = p[slice0]
5356    tail = add(p[slice1], cumsum(ph_correct, axis))
5357    return concatenate((head, tail), axis=axis)
5358
5359
5360def cumprod(a, axis=None, dtype=None):
5361    """
5362    Returns the cumulative product of elements along a given axis.
5363
5364    Note:
5365        Numpy argument `out` is not supported.
5366
5367    Args:
5368        a (Union[int, float, bool, list, tuple, Tensor]): Input tensor.
5369        axis (int, optional): Axis along which the cumulative product is computed.
5370            By default the input is flattened.
5371        dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
5372            output Tensor.
5373
5374    Returns:
5375        Tensor.
5376
5377    Raises:
5378        TypeError: If the input can not be converted to tensor or `axis` is not integer.
5379        ValueError: If axis is out of range.
5380
5381    Supported Platforms:
5382        ``Ascend`` ``GPU``
5383
5384    Examples:
5385        >>> import mindspore.numpy as np
5386        >>> x = np.array([1, 2, 3])
5387        >>> print(np.cumprod(x))
5388        [1 2 6]
5389    """
5390    a = _to_tensor_origin_dtype(a)
5391    original_dtype = F.dtype(a)
5392
5393    if axis is not None and not isinstance(axis, int):
5394        _raise_type_error("integer axis is expected, but got", axis)
5395    if axis is None:
5396        a = a.ravel()
5397        axis = 0
5398    _check_axis_in_range(axis, a.ndim)
5399
5400    a = a.astype('float32') if original_dtype != mstype.float64 else a
5401    if dtype is None:
5402        if original_dtype in [mstype.int8, mstype.int16, mstype.bool_]:
5403            dtype = mstype.int32
5404        elif original_dtype in [mstype.uint8, mstype.uint16]:
5405            dtype = mstype.uint32
5406        else:
5407            dtype = original_dtype
5408    return _cumprod_default(a, axis).astype(dtype, copy=False)
5409
5410
5411def _process_index(index, dims, mode='raise'):
5412    """Generates index (Tensor) according to different modes."""
5413    if mode == "raise":
5414        _raise_unimplemented_error("'raise' mode is not implemented")
5415    if mode not in ['clip', 'wrap']:
5416        _raise_value_error("invalid mode. Expected 'wrap' or 'clip'")
5417    ori_shape = index.shape
5418    tup = ()
5419    for i, idx in enumerate(index):
5420        d = dims[i]
5421        if mode == "clip":
5422            idx = clip(idx, 0, d - 1)
5423        elif mode == "wrap":
5424            idx = remainder(idx, d)
5425        idx = F.expand_dims(idx, 0) if idx.ndim < 1 else idx
5426        tup += (idx,)
5427    return P.Concat(0)(tup).reshape(ori_shape)
5428
5429
5430def _get_strides(dims, order='C'):
5431    """Generates strides (1-D tensor) according to `dims` (1-D tensor)."""
5432    if order not in ['C', 'F']:
5433        _raise_value_error("invalid order. Expected 'C' or 'F'")
5434    tup = (_to_tensor([1]),)
5435    dims = dims[1:][::-1] if order == 'C' else dims[:-1]
5436    for d in dims:
5437        tensor = tup[-1] * d
5438        if tensor.ndim < 1:
5439            tensor = F.expand_dims(tensor, 0)
5440        tup += (tensor,)
5441    tup = tup[::-1] if order == 'C' else tup
5442    return P.Concat(0)(tup)
5443
5444
5445def ravel_multi_index(multi_index, dims, mode='clip', order='C'):
5446    """
5447    Converts a tuple of index arrays into an array of flat indices,
5448    applying boundary modes to the multi-index.
5449
5450    Note:
5451        `raise` mode is not supported. Default mode is `clip`.
5452
5453    Args:
5454        multi_index (tuple of array_like):
5455            A tuple of integer arrays, one array for each dimension.
5456        dims (Union[int, tuple of integers]): The shape of array into which the indices from multi_index apply.
5457        mode ({`wrap`, `clip`}): Specifies how out-of-bounds indices are handled. Default: `clip`.
5458
5459            - `wrap`: wrap around
5460            - `clip`: clip to the range
5461
5462            In `clip` mode, a negative index which would normally wrap will clip to 0 instead.
5463        order ({`C`, `F`}): Determines whether the multi-index should be viewed as indexing in
5464            row-major (C-style) or column-major (Fortran-style) order.
5465
5466    Returns:
5467        Raveled_indices array. An array of indices into the flattened version of an array of dimensions dims.
5468
5469    Raises:
5470        TypeError: If `multi_index` or `dims` can not be converted to tensor or
5471            `dims` is not a sequence of integer values.
5472        ValueError: If the length of `multi_index` and that of `dims` are not equal.
5473
5474    Supported Platforms:
5475        ``GPU``
5476
5477    Examples:
5478        >>> import mindspore.numpy as np
5479        >>> arr = np.array([[3, 6, 6], [4, 5, 1]])
5480        >>> output = np.ravel_multi_index(arr, (7, 6))
5481        >>> print(output)
5482        [22. 41. 37.]
5483        >>> output = np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9))
5484        >>> print(output)
5485        1621.0
5486    """
5487    if isinstance(dims, int):
5488        dims = (dims,)
5489    dims = _to_tensor(dims)
5490    if dims.ndim > 1 or dims.dtype in (mstype.float16, mstype.float32, mstype.float64, mstype.bool_):
5491        _raise_type_error("only 1-D integer arrays are accepted.")
5492    multi_index = _to_tensor(multi_index)
5493    if len(multi_index) != len(dims):
5494        _raise_value_error("parameter multi_index must be a sequence of length ", len(dims))
5495    if multi_index.dtype in (mstype.float16, mstype.float32, mstype.float64):
5496        _raise_type_error("only int indices permitted")
5497
5498    multi_index = _process_index(multi_index, dims, mode)
5499    strides = _get_strides(dims, order)
5500    s_shape = strides.shape + _list_comprehensions(multi_index.ndim - 1, 1, True)
5501    strides = _broadcast_to_shape(strides.reshape(s_shape), multi_index.shape)
5502    return sum_((multi_index * strides).astype('float32'), axis=0)
5503
5504
5505def _vector_norm(x, _ord, axis, keepdims):
5506    """Returns norm of a vector."""
5507    if _in(_ord, ('fro', 'nuc')):
5508        _raise_value_error('Frobenius norm and nuclear norm are only defined for vectors')
5509    if _ord is None:
5510        _ord = 2
5511    if _ord == inf:
5512        res = P.ReduceMax(keepdims)(absolute(x), axis)
5513    elif _ord == -inf:
5514        res = P.ReduceMin(keepdims)(absolute(x), axis)
5515    elif _ord == 0:
5516        res = P.ReduceSum(keepdims)(F.not_equal(x, 0).astype(mstype.float32), axis)
5517    else:
5518        res = power(P.ReduceSum(keepdims)(power(absolute(x), _ord), axis), 1./_ord)
5519    return res
5520
5521
5522def _matrix_norm(x, _ord, axis, keepdims):
5523    """Returns norm of a matrix."""
5524    if _ord == 0:
5525        _raise_value_error('for 0 axis, norm is defined only for 2-D matrices')
5526    if _ord == 'nuc':
5527        _raise_unimplemented_error('nuclear norm is not implemented')
5528    if _in(_ord, (2, -2)):
5529        _raise_unimplemented_error('2-norm is not implemented for matrices')
5530    if _in(_ord, (None, 'fro')):
5531        return F.sqrt(P.ReduceSum(keepdims)(F.square(x), axis))
5532    axis0, axis1 = axis
5533    if not keepdims:
5534        if _check_is_inf(_abs(_ord)) and axis0 > axis1:
5535            axis0 -= 1
5536        elif _abs(_ord) == 1 and axis1 > axis0:
5537            axis1 -= 1
5538    if _check_is_inf(_ord):
5539        return P.ReduceMax(keepdims)(P.ReduceSum(keepdims)(absolute(x), axis1), axis0)
5540    if _check_is_inf(_ord, True):
5541        return P.ReduceMin(keepdims)(P.ReduceSum(keepdims)(absolute(x), axis1), axis0)
5542    if _ord == 1:
5543        return P.ReduceMax(keepdims)(P.ReduceSum(keepdims)(absolute(x), axis0), axis1)
5544    if _ord == -1:
5545        return P.ReduceMin(keepdims)(P.ReduceSum(keepdims)(absolute(x), axis0), axis1)
5546    return _raise_value_error('invalid norm order for matrices')
5547
5548
5549def norm(x, ord=None, axis=None, keepdims=False): # pylint: disable=redefined-builtin
5550    """
5551    Matrix or vector norm.
5552    This function is able to return one of eight different matrix norms, or one of an
5553    infinite number of vector norms (described below), depending on the value of the
5554    ord parameter.
5555
5556    Note:
5557        Nuclear norm and 2-norm are not supported for matrices.
5558
5559    Args:
5560        x (Union[int, float, bool, list, tuple, Tensor]): Input array. If `axis` is None,
5561            `x` must be 1-D or 2-D, unless `ord` is None. If both `axis` and `ord` are None,
5562            the 2-norm of ``x.ravel`` will be returned.
5563        ord (Union[None, 'fro', 'nuc', inf, -inf, int, float], optional): Order of the norm.
5564            inf means numpy’s inf object. The default is None.
5565        axis (Union[None, int, 2-tuple of integers], optional): If `axis` is an integer, it
5566            specifies the axis of `x` along which to compute the vector norms. If `axis` is
5567            a 2-tuple, it specifies the axes that hold 2-D matrices, and the matrix norms of
5568            these matrices are computed. If `axis` is None then either a vector norm (when x
5569            is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default is None.
5570        keepdims (boolean, optional): If this is set to True, the axes which are normed over
5571            are left in the result as dimensions with size one. With this option the result
5572            will broadcast correctly against the original `x`.
5573
5574    Returns:
5575        Tensor, norm of the matrix or vector(s).
5576
5577    Raises:
5578        ValueError: If the norm order is not defined.
5579
5580    Supported Platforms:
5581        ``Ascend`` ``GPU`` ``CPU``
5582
5583    Examples:
5584        >>> import mindspore.numpy as np
5585        >>> print(np.norm(np.arange(9).astype(np.float32)))
5586        14.282857
5587    """
5588    if not isinstance(ord, (int, float)) and not _in(ord, (None, 'fro', 'nuc', inf, -inf)):
5589        _raise_value_error('invalid value for `ord`')
5590    x = _to_tensor(x)
5591    ndim = F.rank(x)
5592    if axis is None:
5593        if ord is None:
5594            x = x.ravel()
5595        if F.rank(x) not in (1, 2):
5596            _raise_value_error('for None axis, array must a vector or a 2-D matrix')
5597        axis = F.make_range(F.rank(x))
5598    axis = _check_axis_valid(axis, F.rank(x))
5599
5600    if len(axis) == 1:
5601        res = _vector_norm(x, ord, axis, keepdims)
5602    elif len(axis) == 2:
5603        res = _matrix_norm(x, ord, axis, keepdims)
5604    else:
5605        return _raise_value_error('invalid number of dimensions to norm')
5606
5607    if keepdims and ndim > F.rank(res):
5608        res = _expand(res, ndim)
5609    return res
5610
5611
5612def bitwise_and(x1, x2, dtype=None):
5613    """
5614    Computes the bit-wise AND of two arrays element-wise.
5615    Computes the bit-wise AND of the underlying binary representation of the integers in
5616    the input arrays. This ufunc implements the C/Python operator &.
5617
5618    Note:
5619        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
5620        not supported.
5621
5622    Args:
5623        x1 (Tensor): Input array.
5624        x2 (Tensor): Input array. Only integer and boolean types are handled. If
5625            ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes
5626            the shape of the output).
5627        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
5628            output Tensor.
5629
5630    Returns:
5631        Tensor or scalar, this is a scalar if both x1 and x2 are scalars.
5632
5633    Supported Platforms:
5634        ``Ascend``
5635
5636    Examples:
5637        >>> import mindspore.numpy as np
5638        >>> print(np.bitwise_and(13, 17))
5639        1
5640    """
5641    return _apply_tensor_op(F.bitwise_and, x1, x2, dtype=dtype)
5642
5643
5644def bitwise_or(x1, x2, dtype=None):
5645    r"""
5646    Computes the bit-wise OR of two arrays element-wise.
5647    Computes the bit-wise OR of the underlying binary representation of the integers in
5648    the input arrays. This ufunc implements the C/Python operator \|.
5649
5650    Note:
5651        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
5652        not supported.
5653
5654    Args:
5655        x1 (Tensor): Input array.
5656        x2 (Tensor): Input array. Only integer and boolean types are handled. If
5657            ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes
5658            the shape of the output).
5659        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
5660            output Tensor.
5661
5662    Returns:
5663        Tensor or scalar, this is a scalar if both x1 and x2 are scalars.
5664
5665    Supported Platforms:
5666        ``Ascend``
5667
5668    Examples:
5669        >>> import mindspore.numpy as np
5670        >>> print(np.bitwise_or(13, 16))
5671        29
5672    """
5673    return _apply_tensor_op(F.bitwise_or, x1, x2, dtype=dtype)
5674
5675
5676def bitwise_xor(x1, x2, dtype=None):
5677    """
5678    Computes the bit-wise XOR of two arrays element-wise.
5679    Computes the bit-wise XOR of the underlying binary representation of the integers in
5680    the input arrays. This ufunc implements the C/Python operator ^.
5681
5682    Note:
5683        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
5684        not supported.
5685
5686    Args:
5687        x1 (Tensor): Input array.
5688        x2 (Tensor): Input array. Only integer and boolean types are handled. If
5689            ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes
5690            the shape of the output).
5691        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
5692            output Tensor.
5693
5694    Returns:
5695        Tensor or scalar, this is a scalar if both x1 and x2 are scalars.
5696
5697    Supported Platforms:
5698        ``Ascend``
5699
5700    Examples:
5701        >>> import mindspore.numpy as np
5702        >>> print(np.bitwise_xor(13, 17))
5703        28
5704    """
5705    return _apply_tensor_op(F.bitwise_xor, x1, x2, dtype=dtype)
5706
5707
5708def invert(x, dtype=None):
5709    """
5710    Computes bit-wise inversion, or bit-wise NOT, element-wise.
5711    Computes the bit-wise NOT of the underlying binary representation of the integers in
5712    the input arrays. This ufunc implements the C/Python operator ~.
5713    For signed integer inputs, the two’s complement is returned. In a two’s-complement system
5714    negative numbers are represented by the two’s complement of the absolute value. This is
5715    the most common method of representing signed integers on computers
5716    `[1] <https://en.wikipedia.org/wiki/Two’s_complement>`_. A N-bit two’s-complement system
5717    can represent every integer in the range ``-2^{N-1}`` to ``+2^{N-1}-1``.
5718
5719    Note:
5720        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
5721        not supported.
5722        Supported dtypes on Ascend: np.int16, np.uint16.
5723
5724    Args:
5725        x (Tensor): Only integer and boolean types are handled.
5726        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
5727            output Tensor.
5728
5729    Returns:
5730        Tensor or scalar.
5731
5732    Supported Platforms:
5733        ``Ascend``
5734
5735    Examples:
5736        >>> import mindspore.numpy as np
5737        >>> print(np.invert(np.array(13, dtype=np.uint16)))
5738        65522
5739    """
5740    return _apply_tensor_op(F.invert, x, dtype=dtype)
5741
5742
5743def rint(x, dtype=None):
5744    """
5745    Rounds elements of the array to the nearest integer.
5746
5747    Note:
5748        Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
5749        not supported.
5750        Ascend does not support dtype `float64` currently.
5751
5752    Args:
5753        x (Union[float, list, tuple, Tensor]): Input tensor.
5754        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
5755            output Tensor.
5756
5757    Returns:
5758        Output tensor is same shape and type as x. This is a scalar if x is a scalar.
5759
5760    Raises:
5761        TypeError: If `x` can not be converted to tensor.
5762
5763    Supported Platforms:
5764        ``Ascend`` ``GPU`` ``CPU``
5765
5766    Examples:
5767        >>> import mindspore.numpy as np
5768        >>> x = np.array([-1.7, -1.5, 0.2, 1.5, 1.7, 2.0])
5769        >>> print(np.rint(x))
5770        [-2. -2. 0. 2. 2. 2.]
5771    """
5772    x = _to_tensor_origin_dtype(x)
5773    res = _rint(x)
5774    if dtype is not None and not _check_same_type(F.dtype(res), dtype):
5775        res = F.cast(res, dtype)
5776    return res
5777
5778
5779def correlate(a, v, mode='valid'):
5780    """
5781    Cross-correlation of two 1-dimensional sequences.
5782
5783    This function computes the correlation as generally defined in signal processing texts:
5784
5785    :math:`c_{av}[k] = sum_n a[n+k] * conj(v[n])`
5786
5787    with `a` and `v` sequences being zero-padded where necessary and conj being the conjugate.
5788
5789    Note:
5790        Currently, complex numbers are not supported.
5791
5792    Args:
5793        a (Union[list, tuple, Tensor]): First input sequence.
5794        v (Union[list, tuple, Tensor]): Second input sequence.
5795        mode (str, optional): By default, mode is `\'valid\'`.
5796            If `mode` is `\'valid\'`, it returns output of length :math:`max(M, N) - min(M, N) + 1`.
5797            The convolution product is only given for points where the signals overlap
5798            completely. Values outside the signal boundary have no effect.
5799            If `mode` is `\'full\'`, it returns the convolution at each point of overlap, with
5800            an output shape of :math:`(N + M - 1,)`.
5801            At the end-points of the convolution, the signals do not overlap completely,
5802            and boundary effects may be seen.
5803            If `mode` is `\'same\'`, it returns output of length :math:`max(M, N)`. Boundary
5804            effects are still visible.
5805
5806    Returns:
5807        Tensor. Discrete cross-correlation of `a` and `v`.
5808
5809    Raises:
5810        TypeError: if the inputs can not be converted to tensor.
5811        ValueError: if `a` and `v` are empty or have wrong dimensions
5812
5813    Supported Platforms:
5814        ``GPU``
5815
5816    Examples:
5817        >>> import mindspore.numpy as np
5818        >>> output = np.correlate([1, 2, 3], [0, 1, 0.5])
5819        >>> print(output)
5820        [3.5]
5821        >>> output = np.correlate([1, 2, 3], [0, 1, 0.5], mode="same")
5822        >>> print(output)
5823        [2.  3.5 3. ]
5824        >>> output = np.correlate([1, 2, 3, 4, 5], [1, 2], mode="same")
5825        >>> print(output)
5826        [ 2.  5.  8. 11. 14.]
5827    """
5828    a, v = _to_tensor(a, v)
5829    if a.ndim != 1 or v.ndim != 1:
5830        _raise_value_error("only support 1-dimensional inputs.")
5831    if a.size == 0 or v.size == 0:
5832        _raise_value_error("Inputs cannot be empty.")
5833
5834    promote_dtype = _promote(a.dtype, v.dtype)
5835    # P.Conv2D requires that the two tensors have the same data type.
5836    # If the promote data type is not supported, it will be converted to float32.
5837    # The supported dtype list may vary in the future.
5838    if promote_dtype not in [mstype.float32, mstype.float16]:
5839        promote_dtype = mstype.float32
5840    a = a.astype(promote_dtype)
5841    v = v.astype(promote_dtype)
5842    if a.size < v.size:
5843        a, v = v, a
5844        return _compute_1d_conv(a, v, mode)[::-1]
5845    return _compute_1d_conv(a, v, mode)
5846
5847
5848def _compute_1d_conv(a, v, mode):
5849    """Returns a 1-D sequence which is the cross-correlate of two 1-D sequences (`a` and `v`)."""
5850    v_size = F.shape_mul(v.shape)
5851    if mode not in ('same', 'full', 'valid'):
5852        _raise_value_error("mode must be one of ['full', 'same', 'valid']")
5853    if v_size > 1:
5854        if mode == 'same':
5855            pad_left = _to_tensor(_list_comprehensions(v_size // 2, 0.0, True))
5856            pad_right = _to_tensor(_list_comprehensions(v_size - v_size // 2 - 1, 0.0, True))
5857            a = P.Concat(0)((pad_left, a, pad_right))
5858        elif mode == 'full':
5859            pad = _to_tensor(_list_comprehensions(v_size - 1, 0.0, True))
5860            a = P.Concat(0)((pad, a, pad))
5861    a = a.reshape(1, 1, 1, a.size)
5862    v = v.reshape(1, 1, 1, v.size)
5863    _conv = P.Conv2D(1, (1, v.size))
5864    return _conv(a, v).reshape(-1)
5865
5866
5867def radians(x, dtype=None):
5868    """
5869    Converts angles from degrees to radians.
5870
5871    Args:
5872        x (Tensor): Angles in degrees.
5873        dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
5874            output Tensor.
5875
5876    Returns:
5877        Tensor, the corresponding radian values. This is a tensor scalar if `x`
5878        is a tensor scalar.
5879
5880    Raises:
5881        TypeError: if `x` is not a tensor.
5882
5883    Supported Platforms:
5884        ``Ascend`` ``GPU`` ``CPU``
5885
5886    Examples:
5887        >>> import mindspore.numpy as np
5888        >>> x = np.asarray([1, 2, 3, -4, -5])
5889        >>> output = np.radians(x)
5890        >>> print(output)
5891        [ 0.01745329  0.03490658  0.05235988 -0.06981317 -0.08726647]
5892    """
5893    return deg2rad(x, dtype=dtype)
5894