• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2020-2022 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15"""Tensor implementation."""
16
17__all__ = ['Tensor']
18
19import abc
20import math
21import numbers
22import numpy as np
23
24from mindspore.communication.management import get_group_size
25from mindspore.common._utils import is_shape_unknown
26from mindspore.common.seed import get_seed
27from mindspore import context
28from mindspore import log as logger
29from mindspore.common import dtype as mstype
30from mindspore.common.hook_handle import _TensorHookHandle
31
32from mindspore.common._utils import get_slice_num
33from mindspore.common._register_for_tensor import tensor_operator_registry
34from mindspore._c_expression import Tensor as Tensor_
35from mindspore import _checkparam as validator
36from mindspore._checkparam import check_is_number, is_stub_tensor, check_hook_fn
37from mindspore._check_jit_forbidden_api import jit_forbidden_register
38from mindspore.common.symbol import Symbol
39
40np_types = (np.int8, np.int16, np.int32, np.int64,
41            np.uint8, np.uint16, np.uint32, np.uint64, np.float16,
42            np.float32, np.float64, np.bool_, np.complex64, np.complex128)
43
44
45def _check_input_data_type(input_data):
46    """Check the type of input_data for Tensor"""
47    validator.check_value_type('input_data', input_data,
48                               (Tensor_, Tensor, np.ndarray, np.str_, list, tuple, float, int, bool, complex),
49                               'Tensor')
50    valid_dtypes = (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64,
51                    np.float16, np.float32, np.float64, np.bool_, np.str_, np.complex64, np.complex128)
52    if isinstance(input_data, np.ndarray) and input_data.dtype not in valid_dtypes and \
53            input_data.dtype.kind != 'U' and input_data.dtype.kind != 'S' and \
54            input_data.dtype.kind != 'T':  # Support dtype np.str_ and npy_bfloat16
55        new_line = '\n'
56        for index, x in np.ndenumerate(input_data):
57            if np.array(x).dtype not in valid_dtypes:
58                raise TypeError(f"initializing tensor by numpy array failed, because the "
59                                f"element type '{type(x)}' of array is not supported.\n"
60                                f"The element index in array: {index}, numpy array: {input_data}.\n"
61                                f"The supported element type of ndarray as follow: "
62                                f"{new_line}{new_line.join(map(str, valid_dtypes))}")
63        raise TypeError(f"initializing tensor by numpy array failed, numpy array: {input_data}, "
64                        f"data type: {input_data.dtype}.\nThe supported element type of ndarray "
65                        f"as follow: {new_line}{new_line.join(map(str, valid_dtypes))}")
66    if isinstance(input_data, np.ndarray) and input_data.dtype.kind == "S" and \
67            input_data.shape and context.get_context("enable_ge"):
68        raise TypeError("For binary string input in GE mode, the shape of the data must be ()")
69    if isinstance(input_data, (tuple, list)) and np.array(input_data).dtype not in valid_dtypes:
70        raise TypeError(
71            f"For Tensor, the input_data is {input_data} that contain unsupported element.")
72
73
74class _TensorMeta(type(Tensor_), abc.ABCMeta):
75    """
76    Meta class for Tensor. Used internally.
77    """
78
79
80def tensor(input_data=None, dtype=None, shape=None, init=None, internal=False, const_arg=False):
81    """
82    Create a new Tensor in Cell.construct() or function decorated by @jit.
83
84    In graph mode, MindSpore would create a new Tensor object at runtime dynamically,
85    based on the `dtype` argument.
86
87    Please refer to `Creating and Using Tensor
88    <https://www.mindspore.cn/docs/en/master/note/static_graph_syntax_support.html#mindspore-user-defined-data-types>`_ .
89
90    The difference between it and the Tensor class is that it adds
91    `Annotation
92    <https://www.mindspore.cn/docs/en/master/design/dynamic_graph_and_static_graph.html?#annotation-type>`_
93    which can prevent the generation of AnyType compared to the Tensor class.
94
95    The arguments and return values are the same as the Tensor class. Also see: :class:`mindspore.Tensor`.
96    internally to indicate the type of the Tensor currently being created,
97
98    Supported Platforms:
99        ``Ascend`` ``GPU`` ``CPU``
100
101    Examples:
102        >>> import mindspore as ms
103        >>> from mindspore import jit, tensor
104        >>> @jit
105        ... def func(x):
106        ...    return tensor(x.asnumpy(), dtype=ms.float32)
107        >>> x = tensor([1, 2, 3])
108        >>> y = func(x)
109        >>> print(y)
110        [1. 2. 3.]
111    """
112    return Tensor(input_data, dtype, shape, init, internal, const_arg)  # @jit.typing: () -> tensor_type[{dtype}]
113
114
115class Tensor(Tensor_, metaclass=_TensorMeta):
116    """
117    Tensor is a data structure that stores an n-dimensional array.
118
119    Note:
120        If `init` interface is used to initialize `Tensor`, the `Tensor.init_data` API needs to be called to load the
121        actual data to `Tensor`.
122
123    Warning:
124          To convert dtype of a `Tensor`, it is recommended to use `Tensor.astype()` rather than
125          `Tensor(sourceTensor, dtype=newDtype)`.
126
127    Args:
128        input_data (Union[Tensor, float, int, bool, tuple, list, numpy.ndarray]): The data to be stored. It can be
129            another Tensor, Python number or NumPy ndarray. Default: ``None`` .
130        dtype (:class:`mindspore.dtype`): Used to indicate the data type of the output Tensor. The argument should
131            be defined in `mindspore.dtype`. If it is ``None`` , the data type of the output Tensor will be the same
132            as the `input_data`. Default: ``None`` .
133        shape (Union[tuple, list, int, :class:`mindspore.Symbol`]): Used to indicate the shape of the output Tensor.
134            If `input_data` is available, `shape` doesn't need to be set. If ``None`` or `Symbol` exists in `shape` ,
135            a tensor of dynamic shape is created, `input_data` doesn't need to be set; if only integers exist in
136            `shape`, a tensor of static shape is created, `input_data` or `init` must be set. Default: ``None`` .
137        init (Initializer): The information of init data.
138            `init` is used for delayed initialization in parallel mode, when using init, `dtype` and `shape` must be
139            set. Default: ``None`` .
140        internal (bool): Whether it is created by the framework.
141            ``'True'`` means that the tensor is created by framework.
142            ``'False'`` means that the tensor is created by user.
143            Default: ``False`` .
144        const_arg (bool): Whether the tensor is a constant when it is used for the argument of a network.
145            Default: ``False`` .
146
147    Outputs:
148        Tensor.
149
150    Note:
151        The default value ``None`` of `input_data` works as a placeholder,
152        it does not mean that we can create a NoneType
153        Tensor.
154        Tensor with `shape` contains 0 is not fully tested and supported.
155
156    Examples:
157        >>> import numpy as np
158        >>> import mindspore as ms
159        >>> from mindspore import Tensor
160        >>> from mindspore.common.initializer import One
161        >>> # initialize a tensor with numpy.ndarray
162        >>> t1 = Tensor(np.zeros([1, 2, 3]), ms.float32)
163        >>> print(t1)
164        [[[0. 0. 0.]
165        [0. 0. 0.]]]
166        >>> print(type(t1))
167        <class 'mindspore.common.tensor.Tensor'>
168        >>> print(t1.shape)
169        (1, 2, 3)
170        >>> print(t1.dtype)
171        Float32
172        >>>
173        >>> # initialize a tensor with a float scalar
174        >>> t2 = Tensor(0.1)
175        >>> print(t2)
176        0.1
177        >>> print(type(t2))
178        <class 'mindspore.common.tensor.Tensor'>
179        >>> print(t2.shape)
180        ()
181        >>> print(t2.dtype)
182        Float32
183        >>>
184        >>> # initialize a tensor with a tuple
185        >>> t3 = Tensor((1, 2))
186        >>> print(t3)
187        [1 2]
188        >>> print(type(t3))
189        <class 'mindspore.common.tensor.Tensor'>
190        >>> print(t3.shape)
191        (2,)
192        >>> print(t3.dtype)
193        Int64
194        ...
195        >>> # initialize a tensor with init
196        >>> t4 = Tensor(shape = (1, 3), dtype=ms.float32, init=One())
197        >>> print(t4)
198        [[1. 1. 1.]]
199        >>> print(type(t4))
200        <class 'mindspore.common.tensor.Tensor'>
201        >>> print(t4.shape)
202        (1, 3)
203        >>> print(t4.dtype)
204        Float32
205    """
206    delta_seed = 0
207
208    def __init__(self, input_data=None, dtype=None, shape=None, init=None, internal=False, const_arg=False):
209        self.init_finished = False
210        if isinstance(input_data, (Tensor, Tensor_)) and dtype is not None:
211            logger.info("It is suggested to use 'Tensor.astype()' to convert the dtype of a Tensor.")
212            _cast = tensor_operator_registry.get("cast")
213            input_data = _cast(input_data, dtype)
214
215        if is_stub_tensor(input_data):
216            input_data = input_data.stub_sync()
217
218        if internal:
219            if input_data is not None:
220                Tensor_.__init__(self, input_data)
221        else:
222            if input_data is None and shape is None and init is None and dtype is not None:
223                validator.check_type_name('dtype', dtype, mstype.number_type +
224                                          (mstype.bool_, mstype.string), "Tensor")
225                Tensor_.__init__(self, dtype, [-2])
226                logger.warning(f"For 'Tensor', if 'dtype' is not None, 'input_data', 'shape' "
227                               f"or 'init' must not be None.")
228            else:
229                # If input data is numpy number, convert it to np array
230                if isinstance(input_data, np_types):
231                    input_data = np.array(input_data)
232
233                if shape is not None:
234                    if isinstance(shape, numbers.Number):
235                        shape = (shape,)
236                    elif isinstance(shape, Symbol):
237                        self.symbolic_shape = [shape]
238                        shape = (None,)
239                    elif isinstance(shape, (list, tuple)) and any(isinstance(s, Symbol) for s in shape):
240                        self.symbolic_shape = [item.to_dict() if isinstance(item, Symbol) else item for item in shape]
241                        shape_without_symbol = (None if isinstance(item, Symbol) else item for item in shape)
242                        shape = list(shape_without_symbol) if isinstance(shape, list) else tuple(shape_without_symbol)
243
244                _check_tensor_input(input_data, dtype, shape, init)
245
246                # If input_data is tuple/list/numpy.ndarray, it's support in check_type method.
247                if (isinstance(shape, (list, tuple)) and None in shape) or init is not None:
248                    shape = _check_tensor_dynamic_shape(dtype, shape, init)
249                    Tensor_.__init__(self, dtype, shape)
250                else:
251                    _check_input_data_type(input_data)
252                    if dtype is not None:
253                        validator.check_type_name('dtype', dtype, mstype.number_type +
254                                                  (mstype.bool_, mstype.string), "Tensor")
255                    else:
256                        dtype = self._set_default_dtype(input_data, dtype)
257
258                    if isinstance(input_data, np.ndarray) and (not input_data.flags['FORC']):
259                        input_data = np.ascontiguousarray(input_data)
260
261                    if dtype is not None:
262                        Tensor_.__init__(self, input_data, dtype)
263                    else:
264                        Tensor_.__init__(self, input_data)
265                    validator.check_value_type('const_arg', const_arg, bool, 'Tensor')
266
267        self.const_arg = const_arg
268        self.virtual_flag = False
269        self.init = init
270        self.init_finished = True
271
272        # if cur Tensor is a index value of another Tensor,
273        # parent_tensor_ set to another Tensor
274        # index_of_parent_ will set to the index
275        self.parent_tensor_ = None
276        self.index_of_parent_ = None
277
278        self.slice_num_of_persistent_data_ = None
279        self.slice_shape_of_persistent_data_ = None
280
281        # the auto gradient information
282        self._grad = None
283        self._grad_fn = None
284        self._requires_grad = False
285        self._retain_grad = False
286
287    @classmethod
288    def __subclasshook__(cls, sub):
289        """
290        Subclass with stub_sync attr will be instance of Tensor
291        """
292        if cls is Tensor:
293            if any("stub_sync" in s.__dict__ for s in sub.__mro__):
294                return True
295        return NotImplemented
296
297    @staticmethod
298    def _set_default_dtype(input_data, dtype):
299        """Set tensor default dtype"""
300        if isinstance(input_data, (float, list, tuple)):
301            if np.array(input_data).dtype == np.float64:
302                return mstype.float32
303        if isinstance(input_data, (int, list, tuple)):
304            if np.array(input_data).dtype in (np.int32, np.int64):
305                return mstype.int64
306        return dtype
307
308    def __deepcopy__(self, memodict):
309        new_obj = Tensor(self)
310        new_obj.init = self.init
311        new_obj.virtual_flag = self.virtual_flag
312        new_obj.const_arg = self.const_arg
313        return new_obj
314
315    def __repr__(self):
316        if self.init_finished:
317            Tensor_.data_sync(self, True)
318            return Tensor_.__repr__(self)
319        return ''
320
321    def __eq__(self, other):
322        if not isinstance(other, (int, float, Tensor)):
323            return False
324        return tensor_operator_registry.get('__eq__')(self, other)
325
326    def __ne__(self, other):
327        if not isinstance(other, (int, float, Tensor)):
328            return True
329        return tensor_operator_registry.get('__ne__')(self, other)
330
331    def __hash__(self):
332        return hash(id(self))
333
334    def __neg__(self):
335        out = tensor_operator_registry.get('__neg__')(self)
336        return out
337
338    def __invert__(self):
339        out = tensor_operator_registry.get('__logical_not__')(self)
340        return out
341
342    def __round__(self):
343        out = tensor_operator_registry.get('round')(self)
344        return out
345
346    def __bool__(self):
347        data = self.asnumpy()
348        if data.shape == ():
349            return bool(data)
350        if data.shape == (1,):
351            return bool(data[0])
352        raise ValueError("The truth value of an array with more than one element is ambiguous.")
353
354    @staticmethod
355    def _convert_scalar_(data, func, message):
356        if data.shape == ():
357            return func(data)
358        if data.shape == (1,):
359            return func(data[0])
360        raise ValueError(message)
361
362    def __int__(self):
363        data = self.asnumpy()
364        return self._convert_scalar_(data, int, "Only one element tensors can be converted to Python scalars")
365
366    def __float__(self):
367        data = self.asnumpy()
368        return self._convert_scalar_(data, float, "Only one element tensors can be converted to Python scalars")
369
370    def __index__(self):
371        data = self.asnumpy()
372        if data.dtype not in ["int8", "int16", "int32", "int64", "bool"]:
373            raise ValueError("Only integer tensors of a single element can be converted to an index.")
374        return self._convert_scalar_(data, int,
375                                     "Only integer tensors of a single element can be converted to an index.")
376
377    def __pos__(self):
378        return self
379
380    def __abs__(self):
381        return tensor_operator_registry.get('abs')(self)
382
383    def __add__(self, other):
384        return tensor_operator_registry.get('__add__')(self, other)
385
386    def __and__(self, other):
387        if isinstance(other, (int, bool, float, Tensor)):
388            return tensor_operator_registry.get('bitwise_and')(self, other)
389        raise TypeError("Unsupported operand type(s) for &: 'Tensor' and '{}'".format(type(other)))
390
391    def __xor__(self, other):
392        if isinstance(other, (int, bool, float, Tensor)):
393            return tensor_operator_registry.get('bitwise_xor')(self, other)
394        raise TypeError("Unsupported operand type(s) for ^: 'Tensor' and '{}'".format(type(other)))
395
396    def __or__(self, other):
397        if isinstance(other, (int, bool, float, Tensor)):
398            return tensor_operator_registry.get('bitwise_or')(self, other)
399        raise TypeError("Unsupported operand type(s) for |: 'Tensor' and '{}'".format(type(other)))
400
401    def __radd__(self, other):
402        return self.__add__(other)
403
404    def __iadd__(self, other):
405        return self.__add__(other)
406
407    def __sub__(self, other):
408        return tensor_operator_registry.get('__sub__')(self, other)
409
410    def __rsub__(self, other):
411        return tensor_operator_registry.get('__sub__')(other, self)
412
413    def __isub__(self, other):
414        return self.__sub__(other)
415
416    def __mul__(self, other):
417        return tensor_operator_registry.get('__mul__')(self, other)
418
419    def __rmul__(self, other):
420        return self.__mul__(other)
421
422    def __imul__(self, other):
423        return self.__mul__(other)
424
425    def __matmul__(self, other):
426        return tensor_operator_registry.get('__matmul__')(self, other)
427
428    def __rmatmul__(self, other):
429        return tensor_operator_registry.get('__matmul__')(other, self)
430
431    def __imatmul__(self, other):
432        return self.__matmul__(other)
433
434    def __truediv__(self, other):
435        return tensor_operator_registry.get('__truediv__')(self, other)
436
437    def __rtruediv__(self, other):
438        return tensor_operator_registry.get('__truediv__')(other, self)
439
440    def __mod__(self, other):
441        return tensor_operator_registry.get('__mod__')(self, other)
442
443    def __rmod__(self, other):
444        return tensor_operator_registry.get('__mod__')(other, self)
445
446    def __imod__(self, other):
447        return self.__mod__(other)
448
449    def __pow__(self, other):
450        return tensor_operator_registry.get('__pow__')(self, other)
451
452    def __rpow__(self, other):
453        return tensor_operator_registry.get('__rpow__')(self, other)
454
455    def __floordiv__(self, other):
456        return tensor_operator_registry.get('__floordiv__')(self, other)
457
458    def __rfloordiv__(self, other):
459        return tensor_operator_registry.get('__floordiv__')(other, self)
460
461    def __ifloordiv__(self, other):
462        return self.__floordiv__(other)
463
464    def __lt__(self, other):
465        out = tensor_operator_registry.get('__lt__')(self, other)
466        return out
467
468    def __le__(self, other):
469        out = tensor_operator_registry.get('__le__')(self, other)
470        return out
471
472    def __getitem__(self, index):
473        out = tensor_operator_registry.get('__getitem__')(self, index)
474        if out is not self:
475            out.parent_tensor_ = self
476            out.index_of_parent_ = index
477        return out
478
479    def __setitem__(self, index, value):
480        out = tensor_operator_registry.get('__setitem__')(self, index, value)
481        if isinstance(out, tuple):
482            if self.parent_tensor_ is not None and self.index_of_parent_ is not None:
483                self.parent_tensor_.__setitem__(self.index_of_parent_, out[0])
484                return self
485            return self
486        self.assign_value(out)
487        if self.parent_tensor_ is not None and self.index_of_parent_ is not None:
488            self.parent_tensor_.__setitem__(self.index_of_parent_, self)
489        return self
490
491    def __gt__(self, other):
492        out = tensor_operator_registry.get('__gt__')(self, other)
493        return out
494
495    def __ge__(self, other):
496        out = tensor_operator_registry.get('__ge__')(self, other)
497        return out
498
499    def __len__(self):
500        out = tensor_operator_registry.get('shape')(self)
501        if out:
502            return out[0]
503        raise TypeError("Not support len of a 0-D tensor")
504
505    def __str__(self):
506        if self.dtype == mstype.type_none:
507            return "Unknown Tensor type!"
508        return str(self.asnumpy())
509
510    def __getstate__(self):
511        state = self.__dict__.copy()
512        state["value"] = Tensor_.__getstate__(self)
513        return state
514
515    def __setstate__(self, state):
516        value = state.pop("value")
517        Tensor_.__setstate__(self, value)
518        self.__dict__.update(state)
519
520    @property
521    def shape(self):
522        """
523        For details, please refer to :func:`mindspore.ops.shape`.
524        """
525        return self._shape
526
527    @shape.setter
528    def shape(self, shape_value):
529        r"""
530        Set the shape value.
531        """
532        self._shape = shape_value
533
534    @property
535    def dtype(self):
536        """Return the dtype of the tensor (:class:`mindspore.dtype`)."""
537        return self._dtype
538
539    @property
540    def size(self):
541        """
542        For details, please refer to :func:`mindspore.ops.size`.
543
544        Examples:
545            >>> from mindspore import Tensor
546            >>> import numpy as np
547            >>> x = Tensor(np.array([[1, 2], [3, 4]]))
548            >>> output = x.size
549            >>> print(output)
550            4
551        """
552        return self._size
553
554    @property
555    def ndim(self):
556        """
557        Return the number of tensor dimensions.
558
559        Examples:
560            >>> from mindspore import Tensor
561            >>> import numpy as np
562            >>> x = Tensor(np.array([[1, 2], [3, 4]]))
563            >>> output = x.ndim
564            >>> print(output)
565            2
566        """
567        return len(self._shape)
568
569    @property
570    def grad(self):
571        r"""
572        Get the gradient value.
573        """
574        return self._grad
575
576    @grad.setter
577    def grad(self, grad):
578        r"""
579        Set the gradient value.
580        """
581        self._grad = grad
582
583    @property
584    def grad_fn(self):
585        r"""
586        The function for backward.
587        """
588        return self._grad_fn
589
590    @grad_fn.setter
591    def grad_fn(self, grad_fn):
592        r"""
593        Set the function for backward.
594        """
595        self._grad_fn = grad_fn
596
597    @property
598    def is_leaf(self):
599        r"""
600        Whether the stub tensor is leaf.
601        They will be a leaf if they have requires_grad and requires_grad is False,
602        Or they were created by user.
603        """
604        return self._requires_grad is False or self._grad_fn is None
605
606    @property
607    def requires_grad(self):
608        r"""
609        Whether the stub tensor need requires grad.
610        """
611        return self._requires_grad
612
613    @requires_grad.setter
614    def requires_grad(self, requires_grad):
615        r"""
616        Mark the stub tensor whether need requires gradient.
617        """
618        self._requires_grad = requires_grad
619
620    def retain_grad(self):
621        r"""
622        Enable the stub tensor which is not non-leaf to have the grad during backward().
623        """
624        if not self._requires_grad:
625            RuntimeError("can't retain_grad on Tensor that has requires_grad = False.")
626        self._retain_grad = self._grad_fn is not None
627
628    @property
629    def retains_grad(self):
630        r"""
631        Is True if the stub tensor is non-leaf and its grad is enabled to be populated during backward().
632        """
633        return self._retain_grad
634
635    def backward(self, grad=None):
636        r"""
637        Calculate the gradient.
638        """
639        if grad is None:
640            grad = Tensor(np.ones(self.shape), self.dtype)
641        if self._grad_fn is not None:
642            self._grad_fn.apply(grad)
643        elif self._requires_grad:
644            self._grad = grad
645
646    @property
647    def H(self):
648        """
649        Returns a view of a matrix (2-D tensor) conjugated and transposed.
650        x.H is equivalent to `mindspore.Tensor.swapaxes(0, 1).conj()` for complex matrices and
651        `mindspore.Tensor.swapaxes(0, 1)` for real matrices.
652
653        Supported Platforms:
654            ``Ascend`` ``GPU`` ``CPU``
655
656        Examples:
657            >>> from mindspore import Tensor
658            >>> import numpy as np
659            >>> x = Tensor(np.array([[1, 2], [3, 4]]))
660            >>> output = x.H
661            >>> print(output)
662            [[1 3]
663            [2 4]]
664        """
665        if self.ndim != 2:
666            raise ValueError(f"For tensor.H only support 2-D Tensor, but got {self.ndim}-D.")
667        output = self.swapaxes(0, 1)
668        if self.dtype in (mstype.complex64, mstype.complex128):
669            return output.conj()
670        return output
671
672    @property
673    def has_init(self):
674        """
675        Whether tensor is initialized.
676
677        Examples:
678            >>> from mindspore import Tensor
679            >>> import numpy as np
680            >>> x = Tensor(np.array([[1, 2], [3, 4]]))
681            >>> output = x.has_init
682            >>> print(output)
683            False
684        """
685        return self.init is not None
686
687    @property
688    def itemsize(self):
689        """
690        Return the length of one tensor element in bytes.
691
692        Examples:
693            >>> from mindspore import Tensor
694            >>> import numpy as np
695            >>> x = Tensor(np.array([[1, 2], [3, 4]]))
696            >>> output = x.itemsize
697            >>> print(output)
698            8
699        """
700        return self._itemsize
701
702    @property
703    def strides(self):
704        """
705        Return the tuple of bytes to step in each dimension when traversing a tensor.
706
707        Examples:
708            >>> from mindspore import Tensor
709            >>> import numpy as np
710            >>> x = Tensor(np.array([[1, 2], [3, 4]]))
711            >>> output = x.strides
712            >>> print(output)
713            (16, 8)
714        """
715        return self._strides
716
717    @property
718    def nbytes(self):
719        """
720        Return the total number of bytes taken by the tensor.
721
722        Examples:
723            >>> from mindspore import Tensor
724            >>> import numpy as np
725            >>> x = Tensor(np.array([[1, 2], [3, 4]]))
726            >>> output = x.nbytes
727            >>> print(output)
728            32
729        """
730        return self._nbytes
731
732    @property
733    def T(self):
734        """
735        Return the transposed tensor.
736
737        Examples:
738            >>> from mindspore import Tensor
739            >>> import numpy as np
740            >>> x = Tensor(np.array([[1, 2], [3, 4]]))
741            >>> output = x.T
742            >>> print(output)
743            [[1 3]
744            [2 4]]
745        """
746        if self.ndim <= 1:
747            return self
748        return self.transpose()
749
750    @staticmethod
751    def from_numpy(array):
752        """
753        Convert numpy array to Tensor.
754        If the data is not C contiguous, the data will be copied to C contiguous to construct the tensor.
755        Otherwise, The tensor will be constructed using this numpy array without copy.
756
757        Args:
758            array (numpy.array): The input array.
759
760        Returns:
761            Tensor, has the same data type as input array.
762
763        Examples:
764            >>> import numpy as np
765            >>> from mindspore import Tensor
766            >>> x = np.array([1, 2])
767            >>> output = Tensor.from_numpy(x)
768            >>> print(output)
769            [1 2]
770        """
771        if isinstance(array, np.ndarray) and not array.flags['C_CONTIGUOUS']:
772            array = np.ascontiguousarray(array)
773
774        return Tensor(Tensor_.from_numpy(array))
775
776    def ndimension(self):
777        r"""
778        Alias for :func:`mindspore.Tensor.ndim`.
779        """
780        return len(self._shape)
781
782    @jit_forbidden_register
783    def set_const_arg(self, const_arg=True):
784        """
785        Specify whether the tensor is a constant when it is used for the argument of a network.
786
787        Args:
788            const_arg (bool): Whether the tensor is a constant when it is used for the argument of a network.
789                Default: ``True`` .
790
791        Returns:
792            Tensor, has been specified whether to be a const network argument.
793
794        Raises:
795            TypeError: If `const_arg` is not a bool.
796
797        Supported Platforms:
798            ``Ascend`` ``GPU`` ``CPU``
799
800        Examples:
801            >>> import numpy as np
802            >>> from mindspore import Tensor
803            >>> x = Tensor(np.array([[1,2,3],[4,5,6]], dtype=np.float32))
804            >>> x.set_const_arg(True)
805        """
806        validator.check_value_type('const_arg', const_arg, bool, 'set_const_arg')
807        self.const_arg = const_arg
808        return self
809
810    def arccosh(self):
811        r"""
812        For details, please refer to :func:`mindspore.ops.arccosh`.
813        """
814        return tensor_operator_registry.get('acosh')(self)
815
816    def arcsin(self):
817        r"""
818        For details, please refer to :func:`mindspore.ops.arcsin`.
819        """
820        return tensor_operator_registry.get('asin')(self)
821
822    def arctan(self):
823        r"""
824        For details, please refer to :func:`mindspore.ops.arctan`.
825        """
826        return tensor_operator_registry.get('atan')(self)
827
828    def arctan2(self, other):
829        r"""
830        For details, please refer to :func:`mindspore.ops.arctan2`.
831        """
832        return tensor_operator_registry.get('atan2')(self, other)
833
834    def cauchy(self, median=0.0, sigma=1.0):
835        r"""
836        Fills the tensor with numbers drawn from the Cauchy distribution. It is
837        defined as follows:
838
839        .. math::
840            f(x)= \frac{1}{\pi} \frac{\sigma}{(x-median)^2 +\sigma^2}
841
842        .. warning::
843            This is an experimental API that is subject to change or deletion.
844
845        Args:
846            median (float, optional): the location parameter, specifying the location
847                of the peak of the distribution. Default: 0.0.
848            sigma (float, optional): the scale parameter which specifies the half-width
849                at half-maximum. Default: 1.0.
850
851        Returns:
852            Tensor. A Tensor with the same type and shape of input.
853
854        Supported Platforms:
855            ``Ascend`` ``CPU``
856
857        Examples:
858            >>> import mindspore
859            >>> import numpy as np
860            >>> x = mindspore.Tensor(np.zeros((1, 2)), dtype=mindspore.float32)
861            >>> x.cauchy()
862            Tensor(shape=[1, 2], dtype=Float32, value=
863            [[8.79836142e-01, 9.37541723e-01]])
864
865        """
866        out = tensor_operator_registry.get('cauchy')(list(self.shape), median, sigma)()
867        return out.astype(self.dtype)
868
869    def log_normal(self, mean=1.0, std=2.0):
870        r"""
871        Fills the elements of the input tensor with log normal values initialized by
872        given mean and std:
873
874        .. math::
875            \text{f}(x;1.0,2.0)=\frac{1}{x\delta \sqrt[]{2\pi} }e^{-\frac{(\ln x-\mu )^2}{2\delta ^2} }
876
877        where :math:`\mu`, :math:`\delta` is mean and standard deviation of  lognormal distribution respectively.
878
879        .. warning::
880            This is an experimental API that is subject to change or deletion.
881
882        Args:
883            mean (float, optional): the mean of normal distribution. With float data type.
884                Default: 1.0.
885            std (float, optional): the std of normal distribution. With float data type.
886                Default: 2.0.
887
888        Returns:
889            Tensor. A Tensor with the same type and shape of input.
890
891        Supported Platforms:
892            ``Ascend`` ``GPU`` ``CPU``
893
894        Examples:
895            >>> import mindspore
896            >>> import numpy as np
897            >>> x = mindspore.Tensor(np.array([[1, 2], [3, 4]]), dtype=mindspore.float32)
898            >>> output = x.log_normal()
899            >>> print(output)
900            [[1.2788825 2.3305743]
901            [14.944194 0.16303174]]
902        """
903        return tensor_operator_registry.get('log_normal')(mean, std)(self)
904
905    @jit_forbidden_register
906    def assign_value(self, value):
907        """
908        Assign another tensor value to this tensor.
909
910        Args:
911            value (Tensor): Tensor for assignment.
912
913        Returns:
914            Tensor, Tensor that's been assigned.
915
916        Examples:
917            >>> from mindspore import Tensor
918            >>> import numpy as np
919            >>> x = Tensor([1, 2, 3, 4])
920            >>> y = Tensor(np.array([[1, 2], [3, 4]]))
921            >>> output = x.assign_value(y)
922            >>> print(x)
923            [[1 2]
924            [3 4]]
925        """
926        if is_stub_tensor(value):
927            value = value.stub_sync()
928        self.assign_value_cpp(value)
929        return self
930
931    def bincount(self, weights=None, minlength=0):
932        r"""
933        For details, please refer to :func:`mindspore.ops.bincount`.
934        """
935        return tensor_operator_registry.get('bincount')(self, weights, minlength)
936
937    def chunk(self, chunks, axis=0):
938        r"""
939        For details, please refer to :func:`mindspore.ops.chunk`.
940        """
941        return tensor_operator_registry.get('chunk')(self, chunks, axis)
942
943    def item(self, index=None):
944        """
945        Get the item at the specified index of the tensor.
946
947        Args:
948            index (Union[None, int, tuple(int)]): The index in Tensor. Default: ``None``.
949
950        Returns:
951            A scalar, type is defined by the dtype of the Tensor.
952
953        Raises:
954            ValueError: If the length of the `index` is not equal to self.ndim.
955
956        Supported Platforms:
957            ``Ascend`` ``GPU`` ``CPU``
958
959        Examples:
960            >>> import mindspore as ms
961            >>> from mindspore import Tensor
962            >>> x = Tensor([[1, 2, 3], [4, 5, 6]], ms.float32)
963            >>> print(x.item((0, 1)))
964            2.0
965            >>> x = Tensor(1.2, ms.float32)
966            >>> print(x.item())
967            1.2
968        """
969
970        if index is not None:
971            output = self.asnumpy().item(index)
972        else:
973            output = self.asnumpy().item()
974        return output
975
976    def itemset(self, *args):
977        r"""
978        Insert scalar into a tensor (scalar is cast to tensor's dtype, if possible).
979
980        There must be at least 1 argument, and define the last argument as item.
981        Then, tensor.itemset(\*args) is equivalent to :math:`Tensor[args] = item`.
982
983        Args:
984            args (Union[(numbers.Number), (int/tuple(int), numbers.Number)]): The arguments that
985                specify the index and value. If `args` contain one argument (a scalar),
986                it is only used in case tensor is of size 1. If `args` contain two
987                arguments, the last argument is the value to be set and must be a
988                scalar, the first argument specifies a single tensor element location.
989                It is either an int or a tuple.
990
991        Returns:
992            A new tensor that doesn't affect the original tensor, with value set by :math:`Tensor[args] = item`.
993
994        Raises:
995            ValueError: If the length of the first argument is not equal to self.ndim.
996            IndexError: If only one argument is provided, and the original Tensor is not scalar.
997
998        Supported Platforms:
999            ``Ascend`` ``GPU`` ``CPU``
1000
1001        Examples:
1002            >>> import numpy as np
1003            >>> from mindspore import Tensor
1004            >>> x = Tensor(np.array([[1,2,3],[4,5,6]], dtype=np.float32))
1005            >>> print(x.itemset((0,1), 4))
1006            [[1. 4. 3.]
1007            [4. 5. 6.]]
1008            >>> print(x)
1009            [[1. 2. 3.]
1010            [4. 5. 6.]]
1011        """
1012        output = tensor_operator_registry.get('itemset')(self, *args)
1013        return output
1014
1015    def get_bytes(self):
1016        r"""
1017        Get raw data of tensor with type of bytes.
1018
1019        Supported Platforms:
1020            ``CPU`` ``GPU`` ``Ascend``
1021
1022        Returns:
1023            Bytes of tensor.
1024
1025        Examples:
1026            >>> import mindspore as ms
1027            >>> from mindspore import Tensor
1028            >>> x = ms.Tensor([1, 2, 3], ms.int16)
1029            >>> print(x.get_bytes())
1030            b'\x01\x00\x02\x00\x03\x00'
1031        """
1032        return Tensor_.get_bytes(self)
1033
1034    def asnumpy(self):
1035        """
1036        Convert tensor to numpy array. Returns self tensor as a NumPy ndarray. This tensor and the returned ndarray
1037        share the same underlying storage. Changes to self tensor will be reflected in the ndarray.
1038
1039        Returns:
1040            A numpy ndarray which shares the same underlying storage with the tensor.
1041
1042        Examples:
1043            >>> from mindspore import Tensor
1044            >>> import numpy as np
1045            >>> x = Tensor(np.array([1, 2], dtype=np.float32))
1046            >>> y = x.asnumpy()
1047            >>> y[0] = 11
1048            >>> print(x)
1049            [11.  2.]
1050            >>> print(y)
1051            [11.  2.]
1052        """
1053        if self.has_init:
1054            self.init_data()
1055        return Tensor_.asnumpy(self)
1056
1057    def numpy(self):
1058        """
1059        Alias for :func:`mindspore.Tensor.asnumpy`.
1060        """
1061        return self.asnumpy()
1062
1063    def is_persistent_data(self):
1064        """
1065        Check if size of tensor is huge, and need save data to persistent storage.
1066        If size of tensor is bigger then MS_EMBEDDING_REMOTE_CACHE_MEMORY_SIZE, it will
1067        use persistent storage to save tensor data. And will spilt data to some slice.
1068
1069        Returns:
1070            True or False
1071        """
1072        return Tensor_.is_persistent_data(self)
1073
1074    def asnumpy_of_slice_persistent_data(self, param_key, slice_index):
1075        """
1076        Convert a slice of tensor data to numpy array. A slice is part of tensor data.
1077        Returns as a NumPy ndarray. This slice tensor data and the returned ndarray
1078        share the same underlying storage. Changes to self tensor will be reflected in the ndarray.
1079
1080        Returns:
1081            A numpy ndarray which shares the same underlying storage with the slice of tensor data.
1082        """
1083        return Tensor_.asnumpy_of_slice_persistent_data(self, param_key, slice_index)
1084
1085    def slice_num_of_persistent_data(self):
1086        """
1087        Get slice num of a tensor which use persistent storage.
1088
1089        Returns:
1090            Num of slice.
1091        """
1092        return self.slice_num_of_persistent_data_
1093
1094    def slice_scatter(self, src, axis=0, start=None, end=None, step=1):
1095        """
1096        For details, please refer to :func:`mindspore.ops.slice_scatter`.
1097        """
1098        return tensor_operator_registry.get('slice_scatter')(self, src, axis, start, end, step)
1099
1100    def select_scatter(self, src, axis, index):
1101        """
1102        For details, please refer to :func:`mindspore.ops.select_scatter`.
1103        """
1104        return tensor_operator_registry.get('select_scatter')(self, src, axis, index)
1105
1106    def histc(self, bins=100, min=0., max=0.):
1107        """
1108        For details, please refer to :func:`mindspore.ops.histc`.
1109        """
1110        validator.check_value_type('min', min, (int, float,), 'Tensor.histc')
1111        validator.check_value_type('max', max, (int, float,), 'Tensor.histc')
1112        return tensor_operator_registry.get('histc')(self, bins, float(min), float(max))
1113
1114    def geqrf(self):
1115        """
1116        For details, please refer to :func:`mindspore.ops.geqrf`.
1117        """
1118        return tensor_operator_registry.get('geqrf')(self)
1119
1120    def slice_shape_of_persistent_data(self):
1121        """
1122        Get slice shape of tensor after cut to slice size.
1123
1124        Returns:
1125            The slice shape of tensor.
1126        """
1127        return self.slice_shape_of_persistent_data_
1128
1129    def value(self):
1130        """
1131        Get the value of the tensor or the parameter.
1132
1133        Returns:
1134            The value of the tensor or the parameter.
1135
1136        Examples:
1137            >>> from mindspore import Tensor
1138            >>> import numpy as np
1139            >>> x = Tensor(np.array([1, 2], dtype=np.float32))
1140            >>> x_value = x.value()
1141            >>> print(x_value)
1142            [1.  2.]
1143        """
1144        return self
1145
1146    def contiguous(self):
1147        """
1148        Converts a Tensor into a continuous-memory Tensor that contains the same data as the original Tensor.
1149
1150        Returns:
1151            A contiguous in memory tensor containing the same data as self tensor.
1152
1153        Examples:
1154            >>> import mindspore as ms
1155            >>> import numpy as np
1156            >>> from mindspore import Tensor, ops
1157            >>> x = Tensor([[1, 2, 3], [4, 5, 6]], dtype=ms.float32)
1158            >>> y = ops.transpose(x, (1, 0))
1159            >>> z = y.contiguous()
1160            >>> print(z.is_contiguous())
1161            True
1162        """
1163        return tensor_operator_registry.get('contiguous')(self)
1164
1165    def is_contiguous(self):
1166        """
1167        Determines whether the memory of tensor is contiguous.
1168
1169        Returns:
1170            Bool, True if tensor memory is contiguous, False otherwise.
1171
1172        Examples:
1173            >>> import mindspore as ms
1174            >>> import numpy as np
1175            >>> from mindspore import Tensor, ops
1176            >>> x = Tensor([[1, 2, 3], [4, 5, 6]], dtype=ms.float32)
1177            >>> y = ops.transpose(x, (1, 0))
1178            >>> print(y.is_contiguous())
1179            False
1180        """
1181        return Tensor_.is_contiguous(self)
1182
1183    def stride(self, dim=None):
1184        """
1185        The stride to jump from one element to the next in the input dim.
1186        When no parameters are passed in, a list of stride for all dimensions is returned.
1187
1188        Args:
1189            dim (int): The dim of stride from one element to the next.
1190
1191        Returns:
1192            Int, the stride of tensor.
1193
1194        Raises:
1195            TypeError: `dim` is not an int.
1196
1197        Examples:
1198            >>> import mindspore as ms
1199            >>> x = ms.Tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]], dtype=ms.float32)
1200            >>> x.stride()
1201            [5, 1]
1202        """
1203        stride = Tensor_.stride(self)
1204        if dim is None:
1205            return stride
1206        return stride[dim]
1207
1208    def storage_offset(self):
1209        """
1210        Tensor's offset in the underlying storage in terms of the number of storage elements.
1211
1212        Returns:
1213            int, tensor's offset in the underlying storage in terms of number of storage elements.
1214
1215        Examples:
1216            >>> import mindspore as ms
1217            >>> x = ms.Tensor([1, 2, 3, 4, 5], dtype=ms.float32)
1218            >>> ret = x.storage_offset()
1219            >>> print(ret)
1220            0
1221        """
1222        return Tensor_.storage_offset(self)
1223
1224    def register_hook(self, hook_fn):
1225        """
1226        Registers a backward hook for tensor.
1227
1228        Note:
1229            - The `register_backward_hook(hook_fn)` does not work in graph mode or functions decorated with 'jit'.
1230            - The 'hook_fn' must be defined as the following code. `grad` is the gradient passed to the tensor,
1231              which may be modified by returning a new output gradient.
1232            - The 'hook_fn' should have the following signature:
1233              hook_fn(grad) -> New output gradient, but can not return None or not set return value.
1234
1235        Args:
1236            hook_fn (function): Python function. Tensor backward hook function.
1237
1238        Returns:
1239            A handle corresponding to the `hook_fn` . The handle can be used to remove the added `hook_fn` by calling
1240            `handle.remove()` .
1241
1242        Raises:
1243            TypeError: If the `hook_fn` is not a function of python.
1244
1245        Supported Platforms:
1246            ``Ascend`` ``GPU`` ``CPU``
1247
1248        Examples:
1249            >>> import mindspore as ms
1250            >>> from mindspore import Tensor
1251            >>> ms.set_context(mode=ms.PYNATIVE_MODE)
1252            >>> def hook_fn(grad):
1253            ...     return grad * 2
1254            ...
1255            >>> def hook_test(x, y):
1256            ...     z = x * y
1257            ...     z.register_hook(hook_fn)
1258            ...     z = z * y
1259            ...     return z
1260            ...
1261            >>> ms_grad = ms.grad(hook_test, grad_position=(0,1))
1262            >>> output = ms_grad(Tensor(1, ms.float32), Tensor(2, ms.float32))
1263            >>> print(output)
1264            (Tensor(shape=[], dtype=Float32, value=8), Tensor(shape=[], dtype=Float32, value=6))
1265        """
1266        if not check_hook_fn("register_hook", hook_fn):
1267            return _TensorHookHandle()
1268        handle = _TensorHookHandle()
1269        handle.id = Tensor_.register_hook(self, hook_fn)
1270        return handle
1271
1272    def flush_from_cache(self):
1273        """
1274        Flush cache data to host if tensor is cache enable.
1275
1276        Examples:
1277            >>> from mindspore import Tensor
1278            >>> import numpy as np
1279            >>> x = Tensor(np.array([1, 2], dtype=np.float32))
1280            >>> y = x.flush_from_cache()
1281            >>> print(y)
1282            None
1283        """
1284        Tensor_._flush_from_cache(self)
1285
1286    def addcdiv(self, tensor1, tensor2, value=1):
1287        r"""
1288        For details, please refer to :func:`mindspore.ops.addcdiv`.
1289        """
1290        return tensor_operator_registry.get('addcdiv')(self, tensor1, tensor2, value)
1291
1292    def addcmul(self, tensor1, tensor2, value=1):
1293        r"""
1294        For details, please refer to :func:`mindspore.ops.addcmul`.
1295        """
1296        return tensor_operator_registry.get('addcmul')(self, tensor1, tensor2, value)
1297
1298    def add(self, other):
1299        r"""
1300        For details, please refer to :func:`mindspore.ops.add`.
1301        """
1302        return tensor_operator_registry.get('add')(self, other)
1303
1304    def subtract(self, other, *, alpha=1):
1305        r"""
1306        For details, please refer to :func:`mindspore.ops.subtract`.
1307        """
1308        return tensor_operator_registry.get('sub')(self, alpha * other)
1309
1310    def true_divide(self, value):
1311        r"""
1312        Alias for Tensor.div() with :math:`rounding\_mode=None`.
1313        For details, please refer to :func:`mindspore.ops.div`.
1314        """
1315        return tensor_operator_registry.get('div')(self, value, rounding_mode=None)
1316
1317    def triu(self, diagonal=0):
1318        r"""
1319        For details, please refer to :func:`mindspore.ops.triu`.
1320
1321        .. warning::
1322            This is an experimental API that is subject to change or deletion.
1323
1324        """
1325        validator.check_value_type('diagonal', diagonal, [int], 'triu')
1326        return tensor_operator_registry.get('triu')(self, diagonal)
1327
1328    def addbmm(self, batch1, batch2, *, beta=1, alpha=1):
1329        r"""
1330        For details, please refer to :func:`mindspore.ops.addbmm`.
1331        """
1332        return tensor_operator_registry.get('addbmm')(self, batch1, batch2, beta=beta, alpha=alpha)
1333
1334    def addmm(self, mat1, mat2, *, beta=1, alpha=1):
1335        r"""
1336        For details, please refer to :func:`mindspore.ops.addmm`.
1337        """
1338        return tensor_operator_registry.get('addmm')(self, mat1, mat2, beta=beta, alpha=alpha)
1339
1340    def addr(self, vec1, vec2, beta=1, alpha=1):
1341        r"""
1342        For details, please refer to :func:`mindspore.ops.addr`.
1343        """
1344        return tensor_operator_registry.get('addr')(self, vec1, vec2, beta=beta, alpha=alpha)
1345
1346    def adjoint(self):
1347        r"""
1348        For details, please refer to :func:`mindspore.ops.adjoint`.
1349        """
1350        return tensor_operator_registry.get('adjoint')(self)
1351
1352    def all(self, axis=None, keep_dims=False):
1353        r"""
1354        For details, please refer to :func:`mindspore.ops.all`.
1355        """
1356        return tensor_operator_registry.get('all')(self, axis, keep_dims)
1357
1358    def angle(self):
1359        r"""
1360        For details, please refer to :func:`mindspore.ops.angle`.
1361        """
1362        return tensor_operator_registry.get('angle')(self)
1363
1364    def any(self, axis=None, keep_dims=False):
1365        r"""
1366        For details, please refer to :func:`mindspore.ops.any`.
1367        """
1368        if axis is None:
1369            axis = ()
1370        return tensor_operator_registry.get('any')(self, axis, keep_dims)
1371
1372    def atan2(self, other):
1373        r"""
1374        For details, please refer to :func:`mindspore.ops.atan2`.
1375        """
1376        return tensor_operator_registry.get('atan2')(self, other)
1377
1378    def baddbmm(self, batch1, batch2, beta=1, alpha=1):
1379        r"""
1380        For details, please refer to :func:`mindspore.ops.baddbmm`.
1381        """
1382        return tensor_operator_registry.get('baddbmm')(self, batch1, batch2, beta=beta, alpha=alpha)
1383
1384    def view(self, *shape):
1385        """
1386        Reshape the tensor according to the input shape. It's the same as :func:`mindspore.Tensor.reshape`,
1387        implemented by the underlying reshape operator.
1388
1389        Args:
1390            shape (Union[tuple(int), int]): Dimension of the output tensor.
1391
1392        Returns:
1393            Tensor, which dimension is the input shape's value.
1394
1395        Examples:
1396            >>> from mindspore import Tensor
1397            >>> import numpy as np
1398            >>> a = Tensor(np.array([[1, 2, 3], [2, 3, 4]], dtype=np.float32))
1399            >>> output = a.view((3, 2))
1400            >>> print(output)
1401            [[1. 2.]
1402            [3. 2.]
1403            [3. 4.]]
1404        """
1405        if not shape:
1406            raise ValueError("The shape variable should not be empty")
1407        if isinstance(shape[0], tuple):
1408            if len(shape) != 1:
1409                raise ValueError(f"Only one tuple is needed, but got {shape}")
1410            shape = shape[0]
1411        return tensor_operator_registry.get('reshape')(self, shape)
1412
1413    def view_as(self, other):
1414        r"""
1415        View self Tensor as the same shape as `other` .
1416
1417        Args:
1418            other(Tensor): The returned Tensor has the same shape as `other`.
1419
1420        Returns:
1421            Tensor, has the same shape as `other`.
1422
1423        Raises:
1424            TypeError: If `other` is not a Tensor.
1425
1426        Supported Platforms:
1427            ``Ascend`` ``GPU`` ``CPU``
1428
1429        Examples:
1430            >>> from mindspore import Tensor
1431            >>> from mindspore import dtype as mstype
1432            >>> a = Tensor([[1, 2, 3], [2, 3, 4]], mstype.float32)
1433            >>> b = Tensor([1, 1, 1, 1, 1, 1], mstype.float32)
1434            >>> output = a.view_as(b)
1435            >>> print(output)
1436            [1. 2. 3. 2. 3. 4.]
1437        """
1438        if not isinstance(other, (Tensor, Tensor_)):
1439            raise TypeError(f"For view_as, the input other must be a Tensor, but got {type(other)}")
1440        return self.view(other.shape)
1441
1442    def t(self):
1443        r"""
1444        For details, please refer to :func:`mindspore.ops.t`.
1445        """
1446        return tensor_operator_registry.get("t")(self)
1447
1448    def bitwise_and(self, other):
1449        """
1450        For details, please refer to :func:`mindspore.ops.bitwise_and`.
1451        """
1452        return tensor_operator_registry.get('bitwise_and')(self, other)
1453
1454    def bitwise_or(self, other):
1455        """
1456        For details, please refer to :func:`mindspore.ops.bitwise_or`.
1457        """
1458        return tensor_operator_registry.get('bitwise_or')(self, other)
1459
1460    def bitwise_xor(self, other):
1461        """
1462        For details, please refer to :func:`mindspore.ops.bitwise_xor`.
1463        """
1464        return tensor_operator_registry.get('bitwise_xor')(self, other)
1465
1466    def bitwise_left_shift(self, other):
1467        """
1468        For details, please refer to :func:`mindspore.ops.bitwise_left_shift`.
1469        """
1470        return tensor_operator_registry.get('bitwise_left_shift')(self, other)
1471
1472    def bitwise_right_shift(self, other):
1473        """
1474        For details, please refer to :func:`mindspore.ops.bitwise_right_shift`.
1475        """
1476        _cast = tensor_operator_registry.get('cast')
1477        other = _cast(other, self.dtype)
1478        return tensor_operator_registry.get('bitwise_right_shift')(self, other)
1479
1480    def scatter(self, axis, index, src):
1481        """
1482        For details, please refer to :func:`mindspore.ops.scatter`.
1483        """
1484        return tensor_operator_registry.get('scatter')(self, axis, index, src)
1485
1486    def scatter_mul(self, indices, updates):
1487        """
1488        For details, please refer to :func:`mindspore.ops.scatter_mul`.
1489        """
1490        return tensor_operator_registry.get('tensor_scatter_mul')(self, indices, updates)
1491
1492    def scatter_div(self, indices, updates):
1493        """
1494        For details, please refer to :func:`mindspore.ops.scatter_div`.
1495        """
1496        return tensor_operator_registry.get('tensor_scatter_div')(self, indices, updates)
1497
1498    def ger(self, vec2):
1499        """
1500        For details, please refer to :func:`mindspore.ops.ger`.
1501        """
1502        return tensor_operator_registry.get('ger')(self, vec2)
1503
1504    def gt(self, x):
1505        """
1506        For details, please refer to :func:`mindspore.ops.gt`.
1507        """
1508        return tensor_operator_registry.get('gt')(self, x)
1509
1510    def ge(self, x):
1511        """
1512        For details, please refer to :func:`mindspore.ops.ge`.
1513        """
1514        return tensor_operator_registry.get('ge')(self, x)
1515
1516    def broadcast_to(self, shape):
1517        """
1518        For details, please refer to :func:`mindspore.ops.broadcast_to`.
1519        """
1520        return tensor_operator_registry.get('broadcast_to')(self, shape)
1521
1522    def expand_as(self, x):
1523        """
1524        Expand the dimension of target tensor to the dimension of input tensor.
1525
1526        Args:
1527            x (Tensor): The input tensor. The shape of the input tensor must obey
1528                the broadcasting rule.
1529
1530        Returns:
1531            Tensor, has the same dimension as input tensor.
1532
1533        Examples:
1534            >>> import numpy as np
1535            >>> from mindspore import Tensor
1536            >>> from mindspore import dtype as mstype
1537            >>> x = Tensor([1, 2, 3], dtype=mstype.float32)
1538            >>> y = Tensor(np.ones((2, 3)), dtype=mstype.float32)
1539            >>> output = x.expand_as(y)
1540            >>> print(output)
1541            [[1. 2. 3.]
1542            [1. 2. 3.]]
1543        """
1544        return tensor_operator_registry.get('broadcast_to')(self, x.shape)
1545
1546    def exp(self):
1547        """
1548        For details, please refer to :func:`mindspore.ops.exp`.
1549        """
1550        return tensor_operator_registry.get('exp')(self)
1551
1552    def real(self):
1553        r"""
1554        For details, please refer to :func:`mindspore.ops.real`.
1555        """
1556        return tensor_operator_registry.get('real')(self)
1557
1558    def rsqrt(self):
1559        r"""
1560        For details, please refer to :func:`mindspore.ops.rsqrt`.
1561        """
1562        return tensor_operator_registry.get('rsqrt')(self)
1563
1564    def reciprocal(self):
1565        r"""
1566        For details, please refer to :func:`mindspore.ops.reciprocal`.
1567        """
1568        return tensor_operator_registry.get('reciprocal')(self)
1569
1570    def sqrt(self):
1571        """
1572        For details, please refer to :func:`mindspore.ops.sqrt`.
1573        """
1574        return tensor_operator_registry.get('sqrt')(self)
1575
1576    def square(self):
1577        """
1578        For details, please refer to :func:`mindspore.ops.square`.
1579        """
1580        return tensor_operator_registry.get('square')(self)
1581
1582    def sub(self, y):
1583        r"""
1584        For details, please refer to :func:`mindspore.ops.sub`.
1585        """
1586        return tensor_operator_registry.get('sub')(self, y)
1587
1588    def tan(self):
1589        """
1590        For details, please refer to :func:`mindspore.ops.tan`.
1591        """
1592        return tensor_operator_registry.get('tan')(self)
1593
1594    def tanh(self):
1595        r"""
1596        For details, please refer to :func:`mindspore.ops.tanh`.
1597        """
1598        return tensor_operator_registry.get('tanh')(self)
1599
1600    def cosh(self):
1601        r"""
1602        For details, please refer to :func:`mindspore.ops.cosh`.
1603        """
1604        return tensor_operator_registry.get('cosh')(self)
1605
1606    def acos(self):
1607        r"""
1608        For details, please refer to :func:`mindspore.ops.acos`.
1609        """
1610        return tensor_operator_registry.get('acos')(self)
1611
1612    def arccos(self):
1613        r"""
1614        Alias for :func:`mindspore.Tensor.acos`.
1615        """
1616        return self.acos()
1617
1618    def cos(self):
1619        r"""
1620        For details, please refer to :func:`mindspore.ops.cos`.
1621        """
1622        return tensor_operator_registry.get('cos')(self)
1623
1624    def cov(self, *, correction=1, fweights=None, aweights=None):
1625        r"""
1626        For details, please refer to :func:`mindspore.ops.cov`.
1627        """
1628        return tensor_operator_registry.get('cov')(self, correction=correction, fweights=fweights, aweights=aweights)
1629
1630    def acosh(self):
1631        """
1632        For details, please refer to :func:`mindspore.ops.acosh`.
1633        """
1634        return tensor_operator_registry.get('acosh')(self)
1635
1636    def asin(self):
1637        r"""
1638        For details, please refer to :func:`mindspore.ops.asin`.
1639        """
1640        return tensor_operator_registry.get('asin')(self)
1641
1642    def abs(self):
1643        """
1644        For details, please refer to :func:`mindspore.ops.abs`.
1645        """
1646        return tensor_operator_registry.get('abs')(self)
1647
1648    def absolute(self):
1649        """
1650        Alias for :func:`mindspore.Tensor.abs`.
1651        """
1652        return self.abs()
1653
1654    def ceil(self):
1655        """
1656        For details, please refer to :func:`mindspore.ops.ceil`.
1657        """
1658        return tensor_operator_registry.get('ceil')(self)
1659
1660    def floor(self):
1661        """
1662        For details, please refer to :func:`mindspore.ops.floor`.
1663        """
1664        return tensor_operator_registry.get('floor')(self)
1665
1666    def floor_divide(self, other):
1667        """
1668        For details, please refer to :func:`mindspore.ops.floor_divide`.
1669
1670        .. warning::
1671            This is an experimental API that is subject to change or deletion.
1672        """
1673        return tensor_operator_registry.get('floor_divide')(self, other)
1674
1675    def lerp(self, end, weight):
1676        """
1677        For details, please refer to :func:`mindspore.ops.lerp`.
1678        """
1679        return tensor_operator_registry.get('lerp')(self, end, weight)
1680
1681    def negative(self):
1682        r"""
1683        For details, please refer to :func:`mindspore.ops.negative`.
1684        """
1685        return tensor_operator_registry.get("negative")(self)
1686
1687    # pylint: disable=redefined-builtin
1688    def norm(self, ord=None, dim=None, keepdim=False, *, dtype=None):
1689        """
1690        For details, please refer to :func:`mindspore.ops.norm`.
1691        """
1692        return tensor_operator_registry.get('norm')(self, ord, dim, keepdim, dtype=dtype)
1693
1694    def renorm(self, p, axis, maxnorm):
1695        """
1696        For details, please refer to :func:`mindspore.ops.renorm`.
1697        """
1698        return tensor_operator_registry.get("renorm")(self, p, axis, maxnorm)
1699
1700    def approximate_equal(self, other, tolerance=1e-5):
1701        r"""
1702        For details, please refer to :func:`mindspore.ops.approximate_equal`.
1703        """
1704        validator.check_isinstance("x", self, Tensor)
1705        validator.check_isinstance("y", other, Tensor)
1706        validator.check_isinstance("tolerance", tolerance, float)
1707        input_x = self.copy() if self.dtype == mstype.float32 else self.astype(mstype.float16)
1708        input_y = other.copy() if other.dtype == mstype.float32 else other.astype(mstype.float16)
1709        return tensor_operator_registry.get('__lt__')(tensor_operator_registry.get('abs')(
1710            tensor_operator_registry.get('__sub__')(input_x, input_y)
1711        ), tolerance)
1712
1713    def log1p(self):
1714        r"""
1715        For details, please refer to :func:`mindspore.ops.log1p`.
1716        """
1717        return tensor_operator_registry.get('log1p')(self)
1718
1719    def logit(self, eps=None):
1720        r"""
1721        For details, please refer to :func:`mindspore.ops.logit`.
1722        """
1723        if eps is None:
1724            eps = -1.0
1725        validator.check_value_type('eps', eps, (float,), 'Tensor.logit')
1726        return tensor_operator_registry.get('logit')(self, eps)
1727
1728    def logaddexp(self, other):
1729        r"""
1730        For details, please refer to :func:`mindspore.ops.logaddexp`.
1731        """
1732        return tensor_operator_registry.get('logaddexp')(self, other)
1733
1734    def logaddexp2(self, other):
1735        r"""
1736        For details, please refer to :func:`mindspore.ops.logaddexp2`.
1737        """
1738        return tensor_operator_registry.get('logaddexp2')(self, other)
1739
1740    def logcumsumexp(self, axis):
1741        r"""
1742        For details, please refer to :func:`mindspore.ops.logcumsumexp`.
1743
1744        .. warning::
1745            This is an experimental API that is subject to change or deletion.
1746        """
1747        return tensor_operator_registry.get('logcumsumexp')(self, axis)
1748
1749    def logsumexp(self, axis, keepdims=False):
1750        r"""
1751        For details, please refer to :func:`mindspore.ops.logsumexp`.
1752        """
1753        return tensor_operator_registry.get('logsumexp')(self, axis, keepdims)
1754
1755    def logdet(self):
1756        r"""
1757        For details, please refer to :func:`mindspore.ops.logdet`.
1758        """
1759        return tensor_operator_registry.get('logdet')(self)
1760
1761    def i0(self):
1762        r"""
1763        For details, please refer to :func:`mindspore.ops.i0`.
1764        """
1765        return tensor_operator_registry.get('i0')(self)
1766
1767    def isclose(self, x2, rtol=1e-05, atol=1e-08, equal_nan=False):
1768        """
1769        For details, please refer to :func:`mindspore.ops.isclose`.
1770        """
1771        return tensor_operator_registry.get('isclose')(self, x2, rtol, atol, equal_nan)
1772
1773    def isneginf(self):
1774        r"""
1775        For details, please refer to :func:`mindspore.ops.isneginf`.
1776        """
1777        return tensor_operator_registry.get('isneginf')(self)
1778
1779    def isposinf(self):
1780        r"""
1781        For details, please refer to :func:`mindspore.ops.isposinf`.
1782        """
1783        return tensor_operator_registry.get('isposinf')(self)
1784
1785    def isreal(self):
1786        r"""
1787        For details, please refer to :func:`mindspore.ops.isreal`.
1788        """
1789        return tensor_operator_registry.get('isreal')(self)
1790
1791    def isfinite(self):
1792        r"""
1793        For details, please refer to :func:`mindspore.ops.isfinite`.
1794        """
1795        return tensor_operator_registry.get('isfinite')(self)
1796
1797    def is_complex(self):
1798        r"""
1799        For details, please refer to :func:`mindspore.ops.is_complex`.
1800        """
1801        return tensor_operator_registry.get('is_complex')(self)
1802
1803    def inv(self):
1804        r"""
1805        For details, please refer to :func:`mindspore.ops.inv`.
1806        """
1807        return tensor_operator_registry.get('inv')(self)
1808
1809    def inverse(self):
1810        r"""
1811        For details, please refer to :func:`mindspore.ops.inverse`.
1812        """
1813        return tensor_operator_registry.get('inverse')(self)
1814
1815    def invert(self):
1816        r"""
1817        For details, please refer to :func:`mindspore.ops.invert`.
1818        """
1819        return tensor_operator_registry.get('invert')(self)
1820
1821    def pow(self, exponent):
1822        r"""
1823        For details, please refer to :func:`mindspore.ops.pow`.
1824        """
1825        return tensor_operator_registry.get('pow')(self, exponent)
1826
1827    def log(self):
1828        """
1829        For details, please refer to :func:`mindspore.ops.log`.
1830        """
1831        return tensor_operator_registry.get('log')(self)
1832
1833    def log10(self):
1834        r"""
1835        For details, please refer to :func:`mindspore.ops.log10`.
1836        """
1837        return tensor_operator_registry.get('log10')(self)
1838
1839    def log2(self):
1840        r"""
1841        For details, please refer to :func:`mindspore.ops.log2`.
1842        """
1843        return tensor_operator_registry.get('log2')(self)
1844
1845    def mean(self, axis=None, keep_dims=False):
1846        """
1847        For details, please refer to :func:`mindspore.ops.mean`.
1848        """
1849        return tensor_operator_registry.get('mean')(self, axis, keep_dims)
1850
1851    def amin(self, axis=None, keepdims=False, *, initial=None, where=None):
1852        """
1853        For details, please refer to :func:`mindspore.ops.amin`.
1854        """
1855        if axis is None:
1856            axis = ()
1857        return tensor_operator_registry.get('amin')(self, axis, keepdims, initial=initial, where=where)
1858
1859    def reverse(self, axis):
1860        """
1861        For details, please refer to :func:`mindspore.ops.flip`.
1862        """
1863        return tensor_operator_registry.get('flip')(self, axis)
1864
1865    def amax(self, axis=None, keepdims=False, *, initial=None, where=None):
1866        """
1867        For details, please refer to :func:`mindspore.ops.amax`.
1868        """
1869        if axis is None:
1870            axis = ()
1871        return tensor_operator_registry.get('amax')(self, axis, keepdims, initial=initial, where=where)
1872
1873    def aminmax(self, *, axis=0, keepdims=False):
1874        r"""
1875        For details, please refer to :func:`mindspore.ops.aminmax`.
1876        """
1877        return tensor_operator_registry.get('aminmax')(self, axis=axis, keepdims=keepdims)
1878
1879    def reverse_sequence(self, seq_lengths, seq_dim=0, batch_dim=0):
1880        """
1881        For details, please refer to :func:`mindspore.ops.reverse_sequence`.
1882        """
1883        return tensor_operator_registry.get("reverse_sequence")(self, seq_lengths, seq_dim, batch_dim)
1884
1885    def prod(self, axis=None, keep_dims=False, dtype=None):
1886        """
1887        For details, please refer to :func:`mindspore.ops.prod`.
1888        """
1889        return tensor_operator_registry.get('prod')(self, axis, keep_dims, dtype)
1890
1891    def select(self, condition, y):
1892        r"""
1893        For details, please refer to :func:`mindspore.ops.select`.
1894        """
1895        if not isinstance(condition, Tensor):
1896            raise TypeError(f"For 'Tensor.select', the argument 'condition' should be Tensor,"
1897                            f" but got {type(condition)}.")
1898        if not isinstance(y, (Tensor, int, float)):
1899            raise TypeError(f"For 'Tensor.select', the argument 'y' should be Tensor, int or float,"
1900                            f" but got {type(y)}.")
1901        if isinstance(y, int) and self.dtype != mstype.int32:
1902            raise TypeError(f"For 'Tensor.select', if the argument 'y' is int,"
1903                            f" then the tensor type should be int32 but got {self.dtype}")
1904        if isinstance(y, float) and self.dtype != mstype.float32:
1905            raise TypeError(f"For 'Tensor.select', if the argument 'y' is float,"
1906                            f" then the tensor type should be float32 but got {self.dtype}")
1907        input_y = y
1908        if isinstance(y, (int, float)):
1909            input_y = tensor_operator_registry.get('zeros_like')(self) + y
1910            if isinstance(y, int):
1911                input_y = tensor_operator_registry.get('cast')(input_y, mstype.int32)
1912            else:
1913                input_y = tensor_operator_registry.get('cast')(input_y, mstype.float32)
1914        return tensor_operator_registry.get('select')(condition, self, input_y)
1915
1916    def transpose(self, *axes):
1917        r"""
1918        For details, please refer to :func:`mindspore.ops.transpose`.
1919        """
1920        perm = validator.check_transpose_axis(axes, self.ndim)
1921        return tensor_operator_registry.get('transpose')(self, perm)
1922
1923    def col2im(self, output_size, kernel_size, dilation, padding_value, stride):
1924        """
1925        For details, please refer to :func:`mindspore.ops.col2im`.
1926        """
1927        return tensor_operator_registry.get('col2im')(self, output_size, kernel_size, dilation, padding_value, stride)
1928
1929    def reshape(self, *shape):
1930        r"""
1931        Rearranges the input Tensor based on the given `shape` .
1932
1933        The `shape` can only have one -1 at most, in which case it's inferred from the remaining dimensions and
1934        the number of elements in the input.
1935
1936        Args:
1937            shape (Union[int, tuple[int], list[int]]): If `shape` is a tuple or list, its elements should be
1938                integers, and only constant value is allowed. i.e., :math:`(y_1, y_2, ..., y_S)`.
1939
1940        Returns:
1941            Tensor, If the given `shape` does not contain -1, the `shape` of tensor is :math:`(y_1, y_2, ..., y_S)`.
1942            If the k-th position in the given `shape` is -1, the `shape` of tensor is :math:`(y_1, ..., y_{k-1},
1943            \frac{\prod_{i=1}^{R}x_{i}}{y_1\times ...\times y_{k-1}\times y_{k+1}\times...\times y_S} , y_{k+1},
1944            ..., y_S)`, in where the shape of input tensor is :math:`(x_1, x_2, ..., x_R)`.
1945
1946        Supported Platforms:
1947            ``Ascend`` ``GPU`` ``CPU``
1948
1949        Examples:
1950            >>> import mindspore
1951            >>> import numpy as np
1952            >>> from mindspore import Tensor, ops
1953            >>> input = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
1954            >>> output = input.reshape(3, 2)
1955            >>> print(output)
1956            [[-0.1  0.3]
1957             [ 3.6  0.4]
1958             [ 0.5 -3.2]]
1959        """
1960        new_shape = validator.check_reshape_shp(shape)
1961        return tensor_operator_registry.get('reshape')(self, new_shape)
1962
1963    def reshape_as(self, other):
1964        """
1965        Change the shape of the Tensor to the shape of `other` without changing the data.
1966
1967        Args:
1968            other(Tensor): The result tensor has the same shape as `other`.
1969
1970        Returns:
1971            Tensor, has the same shape as `other`.
1972
1973        Supported Platforms:
1974            ``Ascend`` ``GPU`` ``CPU``
1975
1976        Examples:
1977            >>> import mindspore as ms
1978            >>> from mindspore import Tensor
1979            >>> import numpy as np
1980            >>> x = Tensor([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]], dtype=ms.float32)
1981            >>> y = Tensor(np.arange(6).reshape(3,2))
1982            >>> output = x.reshape_as(y)
1983            >>> print(output)
1984            [[-0.1  0.3]
1985             [ 3.6  0.4]
1986             [ 0.5 -3.2]]
1987        """
1988        return tensor_operator_registry.get('reshape')(self, other.shape)
1989
1990    def ravel(self):
1991        """
1992        Return a contiguous flattened tensor.
1993
1994        Returns:
1995            Tensor, a 1-D tensor, containing the same elements of the input.
1996
1997        See also:
1998            - :func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
1999            - :func:`mindspore.Tensor.flatten`: Return a copy of the tensor collapsed into one dimension.
2000
2001        Supported Platforms:
2002            ``Ascend`` ``GPU`` ``CPU``
2003
2004        Examples:
2005            >>> import numpy as np
2006            >>> from mindspore import Tensor
2007            >>> x = Tensor(np.ones((2,3,4), dtype=np.float32))
2008            >>> output = x.ravel()
2009            >>> print(output.shape)
2010            (24,)
2011        """
2012        reshape_op = tensor_operator_registry.get('reshape')
2013        return reshape_op(self, (-1,))
2014
2015    def round(self):
2016        """
2017        For details, please refer to :func:`mindspore.ops.round`.
2018        """
2019        return tensor_operator_registry.get('round')(self)
2020
2021    def roll(self, shifts, dims):
2022        """
2023        For details, please refer to :func:`mindspore.ops.roll`.
2024        """
2025        return tensor_operator_registry.get('roll')(shifts, dims)(self)
2026
2027    def rot90(self, k, dims):
2028        r"""
2029        For details, please refer to :func:`mindspore.ops.rot90`.
2030        """
2031        return tensor_operator_registry.get('rot90')(self, k, dims)
2032
2033    def deg2rad(self):
2034        r"""
2035        For details, please refer to :func:`mindspore.ops.deg2rad`.
2036        """
2037        return tensor_operator_registry.get('deg2rad')(self)
2038
2039    def dot(self, other):
2040        r"""
2041        For details, please refer to :func:`mindspore.ops.dot`.
2042        """
2043        return tensor_operator_registry.get('dot')(self, other)
2044
2045    def outer(self, vec2):
2046        r"""
2047        For details, please refer to :func:`mindspore.ops.outer`.
2048        """
2049        return tensor_operator_registry.get('outer')(self, vec2)
2050
2051    def rad2deg(self):
2052        r"""
2053        For details, please refer to :func:`mindspore.ops.rad2deg`.
2054        """
2055        return tensor_operator_registry.get('rad2deg')(self)
2056
2057    def copysign(self, other):
2058        r"""
2059        For details, please refer to :func:`mindspore.ops.copysign`.
2060        """
2061        return tensor_operator_registry.get('copysign')(self, other)
2062
2063    def nelement(self):
2064        r"""
2065        Alias for :func:`mindspore.Tensor.numel`.
2066        """
2067        return tensor_operator_registry.get('nelement')(self)
2068
2069    def numel(self):
2070        r"""
2071        For details, please refer to :func:`mindspore.ops.numel`.
2072        """
2073        return tensor_operator_registry.get('numel')(self)
2074
2075    def permute(self, *axis):
2076        """
2077        For details, please refer to :func:`mindspore.ops.permute`.
2078        """
2079        perm = validator.check_transpose_axis(axis, self.ndim)
2080        return tensor_operator_registry.get('permute')(self, perm)
2081
2082    def positive(self):
2083        """
2084        For details, please refer to :func:`mindspore.ops.positive`.
2085        """
2086        return tensor_operator_registry.get("positive")(self)
2087
2088    def remainder(self, divisor):
2089        r"""
2090        For details, please refer to :func:`mindspore.ops.remainder`.
2091        """
2092        return tensor_operator_registry.get('remainder')(self, divisor)
2093
2094    def flatten(self, order='C', *, start_dim=0, end_dim=-1):
2095        r"""
2096        For details, please refer to :func:`mindspore.ops.flatten`.
2097        """
2098        return tensor_operator_registry.get('flatten')(self, order, start_dim=start_dim, end_dim=end_dim)
2099
2100    def float_power(self, other):
2101        r"""
2102        For details, please refer to :func:`mindspore.ops.float_power`.
2103        """
2104        return tensor_operator_registry.get('float_power')(self, other)
2105
2106    def fmax(self, other):
2107        r"""
2108        For details, please refer to :func:`mindspore.ops.fmax`.
2109        """
2110        return tensor_operator_registry.get('fmax')(self, other)
2111
2112    def fmin(self, other):
2113        r"""
2114        For details, please refer to :func:`mindspore.ops.fmin`.
2115        """
2116        return tensor_operator_registry.get('fmin')(self, other)
2117
2118    def fmod(self, other):
2119        r"""
2120        For details, please refer to :func:`mindspore.ops.fmod`.
2121        """
2122        return tensor_operator_registry.get('fmod')(self, other)
2123
2124    def narrow(self, axis, start, length):
2125        """
2126        For details, please refer to :func:`mindspore.ops.narrow`.
2127        """
2128        return tensor_operator_registry.get('narrow')(self, axis, start, length)
2129
2130    def swapaxes(self, axis0, axis1):
2131        """
2132        For details, please refer to :func:`mindspore.ops.swapaxes`.
2133        """
2134        return tensor_operator_registry.get('swapaxes')(self, axis0, axis1)
2135
2136    def swapdims(self, dim0, dim1):
2137        """
2138        For details, please refer to :func:`mindspore.ops.swapdims`.
2139        """
2140        return tensor_operator_registry.get('swapdims')(self, dim0, dim1)
2141
2142    def squeeze(self, axis=None):
2143        """
2144        For details, please refer to :func:`mindspore.ops.squeeze`.
2145        """
2146        return tensor_operator_registry.get('squeeze')(self, axis)
2147
2148    def slogdet(self):
2149        """
2150        For details, please refer to :func:`mindspore.ops.slogdet`.
2151        """
2152        return tensor_operator_registry.get('slogdet')(self)
2153
2154    def tril(self, diagonal=0):
2155        """
2156        For details, please refer to :func:`mindspore.ops.tril`.
2157        """
2158        return tensor_operator_registry.get('tril')(self, diagonal)
2159
2160    def unsqueeze(self, dim):
2161        """
2162        For details, please refer to :func:`mindspore.ops.unsqueeze`.
2163        """
2164        validator.check_is_int(dim, 'dim')
2165        validator.check_int_range(dim, -self.ndim - 1, self.ndim + 1, validator.INC_LEFT, 'dim')
2166        return tensor_operator_registry.get('unsqueeze')(self, dim)
2167
2168    def expand_dims(self, axis):
2169        """
2170        For details, please refer to :func:`mindspore.ops.expand_dims`.
2171        """
2172        validator.check_is_int(axis, 'axis')
2173        validator.check_int_range(axis, -self.ndim - 1, self.ndim + 1, validator.INC_LEFT, 'axis')
2174        return tensor_operator_registry.get('expand_dims')(self, axis)
2175
2176    def astype(self, dtype, copy=True):
2177        """
2178        Return a copy of the tensor, cast to a specified type.
2179
2180        Args:
2181            dtype (Union[:class:`mindspore.dtype`, numpy.dtype, str]): Designated tensor dtype, can be in
2182                format of `mindspore.dtype.float32` or `numpy.float32` or `float32`.
2183            copy (bool, optional): By default, astype always returns a newly allocated
2184                tensor. If this is set to ``false`` , the input tensor is returned instead
2185                of a copy. Default:  ``True`` .
2186
2187        Returns:
2188            Tensor, with the designated dtype.
2189
2190        Raises:
2191            TypeError: If the specified dtype cannot be understood.
2192
2193        Supported Platforms:
2194            ``Ascend`` ``GPU`` ``CPU``
2195
2196        Examples:
2197            >>> import numpy as np
2198            >>> from mindspore import Tensor
2199            >>> x = Tensor(np.ones((1,2,2,1), dtype=np.float32))
2200            >>> x = x.astype("int32")
2201            >>> print(x.dtype)
2202            Int32
2203        """
2204        dtype = _check_astype_and_convert(dtype)
2205        if not copy and dtype == self.dtype:
2206            return self
2207        return tensor_operator_registry.get('cast')(self, dtype)
2208
2209    def argmax(self, axis=None, keepdims=False):
2210        """
2211        For details, please refer to :func:`mindspore.ops.argmax`.
2212        """
2213        out = tensor_operator_registry.get('argmax')(self, axis, keepdims)
2214        return out
2215
2216    def argmin(self, axis=None, keepdims=False):
2217        """
2218        For details, please refer to :func:`mindspore.ops.argmin`.
2219        """
2220        out = tensor_operator_registry.get('argmin')(self, axis, keepdims)
2221        return out
2222
2223    def argmax_with_value(self, axis=0, keep_dims=False):
2224        """
2225        Returns the maximum value with corresponding index.
2226
2227        Compute the max value of input Tensor on the specified axis, and return the max value and index.
2228
2229        Note:
2230            - In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
2231            - If there are multiple maximum values, the index of the first maximum value is used.
2232            - The value range of `axis` is [-dims, dims - 1]. `dims` is the dimension length of this tensor.
2233
2234        Args:
2235            axis (int): The dimension to reduce. Default: ``0`` .
2236            keep_dims (bool): Whether to reduce dimension, if ``true`` the output will keep the same dimension as the
2237                            input, the output will reduce dimension if ``false`` . Default: ``False`` .
2238
2239        Returns:
2240            tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the maximum value of the input
2241            tensor.
2242
2243            - **index** (Tensor) - The index for the maximum value of the input tensor.
2244              If `keep_dims` is ``true`` , the shape of
2245              output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`. Otherwise, the shape is
2246              :math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)` .
2247            - **value** (Tensor) - The maximum value of input tensor, with the same shape as index.
2248
2249        Raises:
2250            TypeError: If `keep_dims` is not a bool.
2251            TypeError: If `axis` is not an int.
2252
2253        Supported Platforms:
2254            ``Ascend`` ``GPU`` ``CPU``
2255
2256        Examples:
2257            >>> import numpy as np
2258            >>> import mindspore
2259            >>> from mindspore import Tensor
2260            >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
2261            >>> output, index = x.argmax_with_value()
2262            >>> print(output, index)
2263            0.7 3
2264            >>> output, index = x.argmax_with_value(keep_dims=True)
2265            >>> print(output, index)
2266            [0.7] [3]
2267        """
2268        if self.shape == ():
2269            return (self, Tensor(0))
2270        return tensor_operator_registry.get('argmax_with_value')(self, axis, keep_dims)
2271
2272    def argmin_with_value(self, axis=0, keep_dims=False):
2273        """
2274        Returns the minimum value with corresponding index.
2275
2276        Note:
2277            - In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
2278            - If there are multiple minimum values, the index of the first minimum value is used.
2279            - The value range of `axis` is [-dims, dims - 1]. `dims` is the dimension length of this tensor.
2280
2281        Args:
2282            axis (int): The dimension to reduce. Default: 0.
2283            keep_dims (bool): Whether to reduce dimension, if true the output will keep the same dimension as the input,
2284                            the output will reduce dimension if false. Default: ``False``.
2285
2286        Returns:
2287            tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the minimum value of the input
2288            tensor.
2289
2290            - **index** (Tensor) - The index for the minimum value of the input tensor.
2291              If `keep_dims` is true, the shape of
2292              output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`. Otherwise, the shape is
2293              :math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)` .
2294            - **value** (Tensor) - The minimum value of input tensor, with the same shape as index.
2295
2296        Raises:
2297            TypeError: If `keep_dims` is not a bool.
2298            TypeError: If `axis` is not an int.
2299
2300        Supported Platforms:
2301            ``Ascend`` ``GPU`` ``CPU``
2302
2303        Examples:
2304            >>> import numpy as np
2305            >>> import mindspore
2306            >>> from mindspore import Tensor
2307            >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
2308            >>> output, index = x.argmin_with_value()
2309            >>> print(output, index)
2310            0.0 0
2311            >>> output, index = x.argmin_with_value(keep_dims=True)
2312            >>> print(output, index)
2313            [0.0] [0]
2314        """
2315        if self.shape == ():
2316            return (self, Tensor(0))
2317        return tensor_operator_registry.get('argmin_with_value')(self, axis, keep_dims)
2318
2319    def cumsum(self, axis=None, dtype=None):
2320        """
2321        For details, please refer to :func:`mindspore.ops.cumsum`.
2322        """
2323        x = self
2324        original_dtype = x.dtype
2325        # If original tensor is int, and has precision less then int32, convert to int32
2326        if x.dtype in (mstype.bool_, mstype.int8, mstype.int16, mstype.uint8, mstype.int16):
2327            x = x.astype(mstype.int32)
2328        if axis is None:
2329            x = x.ravel()
2330            axis = 0
2331        validator.check_axis_in_range(axis, x.ndim)
2332        if dtype is not None and original_dtype != dtype:
2333            return tensor_operator_registry.get('cumsum')()(x, axis).astype(dtype, copy=False)
2334        return tensor_operator_registry.get('cumsum')()(x, axis)
2335
2336    def cummin(self, axis):
2337        r"""
2338        For details, please refer to :func:`mindspore.ops.cummin`.
2339        """
2340        return tensor_operator_registry.get('cummin')(self, axis)
2341
2342    def cummax(self, axis):
2343        r"""
2344        For details, please refer to :func:`mindspore.ops.cummax`.
2345        """
2346        return tensor_operator_registry.get('cummax')(self, axis)
2347
2348    def index_fill(self, axis, index, value):
2349        """
2350        For details, please refer to :func:`mindspore.ops.index_fill`.
2351        """
2352        return tensor_operator_registry.get('index_fill')(self, axis, index, value)
2353
2354    def index_select(self, axis, index):
2355        """
2356        For details, please refer to :func:`mindspore.ops.index_select`.
2357        """
2358        return tensor_operator_registry.get('index_select')(self, axis, index)
2359
2360    def inplace_update(self, v, indices):
2361        """
2362        For details, please refer to :func:`mindspore.ops.inplace_update`.
2363        """
2364        return tensor_operator_registry.get('inplace_update')(self, v, indices)
2365
2366    def copy(self):
2367        """
2368        Return a copy of the tensor.
2369
2370        Note:
2371            The current implementation does not support `order` argument.
2372
2373        Returns:
2374            Copied tensor.
2375
2376        Supported Platforms:
2377            ``Ascend`` ``GPU`` ``CPU``
2378
2379        Examples:
2380            >>> import numpy as np
2381            >>> from mindspore import Tensor
2382            >>> a = Tensor(np.ones((3,3)).astype("float32"))
2383            >>> output = a.copy()
2384            >>> print(output)
2385            [[1. 1. 1.]
2386            [1. 1. 1.]
2387            [1. 1. 1.]]
2388        """
2389        if self.size == 0:
2390            return self
2391        origin_dtype = self.dtype
2392        x = self
2393        logical_not_op = tensor_operator_registry.get('logical_not')
2394        if origin_dtype == mstype.bool_:
2395            return logical_not_op(logical_not_op(x))
2396        if origin_dtype != mstype.float64:
2397            x = x.astype("float32")
2398        x = x / 1.0
2399        x = x.astype(origin_dtype)
2400        return x
2401
2402    def max(self, axis=None, keepdims=False, *, initial=None, where=True, return_indices=False):
2403        """
2404        Return the maximum of a tensor or maximum along an axis.
2405
2406        Note:
2407            When `axis` is ``None``, `keepdims` and subsequent parameters
2408            have no effect. At the same time, the index is fixed to return 0.
2409
2410        Args:
2411            axis (Union[None, int, list, tuple of ints], optional): Axis or
2412                axes along which to operate. By default, flattened input is used. If
2413                this is a tuple of ints, the maximum is selected over multiple axes,
2414                instead of a single axis or all the axes as before. Default: ``None`` .
2415            keepdims (bool, optional):
2416                If this is set to ``True`` , the axes which are reduced are left in the
2417                result as dimensions with size one. With this option, the result will
2418                broadcast correctly against the input array. Default: ``False`` .
2419
2420        Keyword Args:
2421            initial (scalar, optional):
2422                The minimum value of an output element. Must be present to allow
2423                computation on empty slice. Default: ``None`` .
2424            where (bool Tensor, optional):
2425                A boolean tensor which is broadcasted to match the dimensions of array,
2426                and selects elements to include in the reduction. If non-default value
2427                is passed, initial must also be provided. Default: ``True`` .
2428            return_indices (bool, optional): Whether to return the index of the maximum value.
2429                Default: ``False`` . If `axis` is a list or tuple of ints, it must be ``False`` .
2430
2431        Returns:
2432            Tensor or scalar, maximum of input tensor. If `axis` is ``None`` , the result is a scalar
2433            value. If `axis` is given, the result is a tensor of dimension ``self.ndim - 1``.
2434
2435        Raises:
2436            TypeError: If arguments have types not specified above.
2437
2438        See also:
2439            - :func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
2440            - :func:`mindspore.Tensor.argmax`: Return the indices of the maximum values along an axis.
2441            - :func:`mindspore.Tensor.min`: Return the minimum of a tensor or minimum along an axis.
2442
2443        Supported Platforms:
2444            ``Ascend`` ``GPU`` ``CPU``
2445
2446        Examples:
2447            >>> import numpy as np
2448            >>> from mindspore import Tensor
2449            >>> a = Tensor(np.arange(4).reshape((2, 2)).astype('float32'))
2450            >>> output = a.max()
2451            >>> print(output)
2452            3.0
2453            >>> value, indices = a.max(axis=0, return_indices=True)
2454            >>> print(value)
2455            [2. 3.]
2456            >>> print(indices)
2457            [1 1]
2458        """
2459        if isinstance(axis, (list, tuple)):
2460            reduce_ = tensor_operator_registry.get("reduce")
2461            reduce_max = tensor_operator_registry.get("reduce_max")
2462            maximum = tensor_operator_registry.get("maximum")
2463            return reduce_(self, reduce_max(keepdims), cmp_fn=maximum, axis=axis, keepdims=keepdims,
2464                           initial=initial, where=where)
2465        values, indices = tensor_operator_registry.get("max")(self, axis, keepdims, initial=initial, where=where)
2466        if not return_indices:
2467            return values
2468        return values, indices
2469
2470    def min(self, axis=None, keepdims=False, *, initial=None, where=True, return_indices=False):
2471        """
2472        Return the minimum of a tensor or minimum along an axis.
2473
2474        Note:
2475            When `axis` is ``None``, `keepdims` and subsequent parameters
2476            have no effect. At the same time, the index is fixed to return 0.
2477
2478        Args:
2479            axis (Union[None, int, list, tuple of ints], optional): An axis or
2480                axes along which to operate. By default, flattened input is used. If
2481                `axis` is a tuple of ints, the minimum is selected over multiple axes,
2482                instead of a single axis or all the axes as before. Default: ``None`` .
2483            keepdims (bool, optional):
2484                If ``True`` , the axes which are reduced are left in the
2485                result as dimensions with size one. With this option, the result will
2486                broadcast correctly against the input array. Default: ``False`` .
2487
2488        Keyword Args:
2489            initial (scalar, optional):
2490                The minimum value of an output element. Must be present to allow
2491                computation on empty slice. Default: ``None`` .
2492            where (Tensor[bool], optional):
2493                A boolean tensor which is broadcasted to match the dimensions of array,
2494                and selects elements to include in the reduction. If non-default value
2495                is passed, initial must also be provided. Default: ``True`` .
2496            return_indices (bool, optional): Whether to return the index of the minimum value. Default: ``False`` .
2497                If `axis` is a list or tuple of ints, it must be ``False`` .
2498
2499        Returns:
2500            Tensor or scalar, minimum of input tensor. If `axis` is ``None`` , the result is a scalar
2501            value. If `axis` is given, the result is a tensor of dimension ``self.ndim - 1``.
2502
2503        Raises:
2504            TypeError: If arguments have types not specified above.
2505
2506        See also:
2507            - :func:`mindspore.Tensor.argmin`: Return the indices of the minimum values along an axis.
2508            - :func:`mindspore.Tensor.argmax`: Return the indices of the maximum values along an axis.
2509            - :func:`mindspore.Tensor.max`: Return the minimum of a tensor or minimum along an axis.
2510
2511        Supported Platforms:
2512            ``Ascend`` ``GPU`` ``CPU``
2513
2514        Examples:
2515            >>> import numpy as np
2516            >>> from mindspore import Tensor
2517            >>> a = Tensor(np.arange(4).reshape((2, 2)).astype('float32'))
2518            >>> output = a.min()
2519            >>> print(output)
2520            0.0
2521            >>> output = a.min(axis=0)
2522            >>> print(output)
2523            [0. 1.]
2524            >>> output = a.min(axis=0, initial=9, where=Tensor([False]))
2525            >>> print(output)
2526            [9. 9.]
2527            >>> output = a.min(axis=0, initial=9, where=Tensor([False, True]))
2528            >>> print(output)
2529            [9. 1.]
2530            >>> value, indices = a.min(axis=0, return_indices=True)
2531            >>> print(value)
2532            [0. 1.]
2533            >>> print(indices)
2534            [0 0]
2535        """
2536        if isinstance(axis, (list, tuple)):
2537            reduce_ = tensor_operator_registry.get("reduce")
2538            reduce_min = tensor_operator_registry.get("reduce_min")
2539            minimum = tensor_operator_registry.get("minimum")
2540            return reduce_(self, reduce_min(keepdims), cmp_fn=minimum, axis=axis, keepdims=keepdims,
2541                           initial=initial, where=where)
2542        values, indices = tensor_operator_registry.get("min")(self, axis, keepdims, initial=initial, where=where)
2543        if not return_indices:
2544            return values
2545        return values, indices
2546
2547    def scatter_add(self, indices, updates):
2548        """
2549        For details, please refer to :func:`mindspore.ops.scatter_add`.
2550        """
2551        return tensor_operator_registry.get("tensor_scatter_add")(self, indices, updates)
2552
2553    def scatter_sub(self, indices, updates):
2554        """
2555        Creates a new tensor by subtracting the values from the positions in self tensor indicated by
2556        `indices`, with values from `updates`. When multiple values are provided for the same
2557        index, the result of the update will be to subtract these values respectively. This operation is almost
2558        equivalent to using :class:`mindspore.ops.ScatterNdSub` , except that the updates are applied on output `Tensor`
2559        instead of input `Parameter`.
2560
2561        The last axis of `indices` is the depth of each index vectors. For each index vector,
2562        there must be a corresponding value in `updates`. The shape of `updates` should be
2563        equal to the shape of `self[indices]`. For more details, see Examples.
2564
2565        Note:
2566            On GPU, if some values of the `indices` are out of bound, instead of raising an index error,
2567            the corresponding `updates` will not be updated to self tensor. On CPU, if some values of
2568            the `indices` are out of bound, raising an index error. On Ascend, out of bound checking is
2569            not supported, if some values of the `indices` are out of bound, unknown errors may be caused.
2570
2571        Args:
2572            indices (Tensor): The index of input tensor whose data type is int32 or int64.
2573                The rank must be at least 2.
2574            updates (Tensor): The tensor to update the input tensor, has the same type as input,
2575                and updates.shape should be equal to indices.shape[:-1] + self.shape[indices.shape[-1]:].
2576
2577        Returns:
2578            Tensor, has the same shape and type as self tensor.
2579
2580        Raises:
2581            TypeError: If dtype of `indices` is neither int32 nor int64.
2582            ValueError: If length of shape of self tensor is less than the last dimension of shape of `indices`.
2583
2584        Supported Platforms:
2585            ``Ascend`` ``GPU`` ``CPU``
2586
2587        Examples:
2588            >>> import numpy as np
2589            >>> from mindspore import Tensor
2590            >>> x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]).astype('float32'))
2591            >>> indices = Tensor(np.array([[0, 0], [0, 0]]).astype('int32'))
2592            >>> updates = Tensor(np.array([1.0, 2.2]).astype('float32'))
2593            >>> output = x.scatter_sub(indices, updates)
2594            >>> print(output)
2595            [[-3.3000002  0.3        3.6      ]
2596            [ 0.4        0.5       -3.2      ]]
2597        """
2598        return tensor_operator_registry.get('tensor_scatter_sub')(self, indices, updates)
2599
2600    def scatter_min(self, indices, updates):
2601        """
2602        For details, please refer to :func:`mindspore.ops.scatter_min`.
2603        """
2604        return tensor_operator_registry.get('tensor_scatter_min')(self, indices, updates)
2605
2606    def scatter_max(self, indices, updates):
2607        """
2608        For details, please refer to :func:`mindspore.ops.scatter_max`.
2609        """
2610        return tensor_operator_registry.get('tensor_scatter_max')(self, indices, updates)
2611
2612    def softmax(self, axis, dtype=None):
2613        """
2614        For details, please refer to :func:`mindspore.ops.softmax`.
2615        """
2616        return tensor_operator_registry.get('softmax')(self, axis, dtype=dtype)
2617
2618    def fill(self, value):
2619        """
2620        `Tensor.fill` is deprecated, please use `ops.fill` instead.
2621        """
2622        if value is None:
2623            if self.dtype not in (mstype.float16, mstype.float32, mstype.float64):
2624                raise TypeError("For 'Tensor.fill', if the argument 'value' is None, the type of the original "
2625                                "tensor must be float, but got {}.".format(self.dtype))
2626            value = Tensor(float('nan')).astype("float32")
2627            return tensor_operator_registry.get("tile")()(value, self.shape).astype(self.dtype)
2628        return tensor_operator_registry.get("fill")(self.dtype, self.shape, value)
2629
2630    def fills(self, value):
2631        """
2632        `Tensor.fills` is deprecated, please use `ops.fill` instead.
2633        """
2634        return tensor_operator_registry.get('fills')(self, value)
2635
2636    def fill_diagonal(self, fill_value, wrap=False):
2637        """
2638        Fills the main diagonal of a Tensor with a specified value and returns the result.
2639        The input has at least 2 dimensions, and all dimensions of input must be equal in length
2640        when the dimension of input is greater than 2.
2641
2642        .. warning::
2643            This is an experimental API that is subject to change or deletion.
2644
2645        Args:
2646            fill_value (float): The value to fill with the diagonal of `self`.
2647            wrap (bool, optional): Controls whether the diagonal elements continue onto the
2648                remaining rows in case of a tall matrix(a matrix has more rows than columns). Default: ``False``.
2649
2650        Returns:
2651            - **y** (Tensor) - Tensor, has the same shape and data type as `self`.
2652
2653        Raises:
2654            TypeError: If data type of `self` is not one of the following: float32, int32, int64.
2655            ValueError: If the dimension of `self` is not greater than 1.
2656            ValueError: If the size of each dimension is not equal, when the dimension is greater than 2.
2657
2658        Supported Platforms:
2659            ``Ascend`` ``GPU`` ``CPU``
2660
2661        Examples:
2662            >>> import numpy as np
2663            >>> import mindspore
2664            >>> from mindspore import Tensor
2665            >>> x = Tensor(np.ones((6, 3)), mindspore.float32)
2666            >>> output = x.fill_diagonal(5.0, wrap=True)
2667            >>> print(output)
2668            [[5. 1. 1.]
2669             [1. 5. 1.]
2670             [1. 1. 5.]
2671             [1. 1. 1.]
2672             [5. 1. 1.]
2673             [1. 5. 1.]]
2674        """
2675        return tensor_operator_registry.get('fill_diagonal')(fill_value, wrap)(self)
2676
2677    def masked_fill(self, mask, value):
2678        """
2679        For details, please refer to :func:`mindspore.ops.masked_fill`.
2680        """
2681        if isinstance(value, (float, int)):
2682            value = tensor_operator_registry.get("scalar_to_tensor")(value, self.dtype)
2683        if not isinstance(mask, Tensor):
2684            raise TypeError("For 'Tensor.masked_fill', the type of the argument 'mask' must be Tensor, but "
2685                            "got {}.".format(type(mask)))
2686        validator.check_type_name('mask', mask.dtype, [mstype.bool_], "Tensor")
2687        return tensor_operator_registry.get("masked_fill")(self, mask, value)
2688
2689    def ptp(self, axis=None, keepdims=False):
2690        """
2691        The name of the function comes from the acronym for "peak to peak". Calculate the difference between the
2692        maximum value and the minimum value along the axis.
2693
2694        Note:
2695            Numpy argument `out` is not supported.
2696
2697        Args:
2698            axis (Union[None, int, tuple(int)]): Axis or axes along which the range is computed.
2699                The default is to compute the variance of the flattened tensor. Default: ``None`` .
2700            keepdims (bool): If this is set to ``True`` , the axes which are reduced are left in the result as
2701                dimensions with size one. With this option, the result will broadcast correctly against the tensor.
2702                Default is ``False`` .
2703
2704        Returns:
2705            Tensor.
2706
2707        Raises:
2708            TypeError: If `self` is not a tensor, or `axis` and `keepdims` have types not specified above.
2709
2710        Supported Platforms:
2711            ``Ascend`` ``GPU`` ``CPU``
2712
2713        Examples:
2714            >>> from mindspore import Tensor
2715            >>> x = Tensor([[4.0, 9.0, 2.0, 10.0], [6.0, 9.0, 7.0, 12.0]]).astype("float32")
2716            >>> print(x.ptp(axis=1))
2717            [8. 6.]
2718            >>> print(x.ptp(axis=0))
2719            [2. 0. 5. 2.]
2720        """
2721        if not isinstance(keepdims, bool):
2722            raise TypeError("For 'Tensor.ptp', the type of the argument 'keepdims' must be bool, "
2723                            "but got {}.".format(type(keepdims)))
2724        if axis is None:
2725            axis = ()
2726        else:
2727            validator.check_axis_type(axis, True, True, False)
2728            axis = validator.check_axis_valid(axis, self.ndim)
2729
2730        return self.max(axis, keepdims) - self.min(axis, keepdims)
2731
2732    def minimum(self, other):
2733        r"""
2734        For details, please refer to :func:`mindspore.ops.minimum`.
2735        """
2736        return tensor_operator_registry.get('minimum')(self, other)
2737
2738    def clamp(self, min=None, max=None):
2739        r"""
2740        For details, please refer to :func:`mindspore.ops.clamp`.
2741        """
2742        return tensor_operator_registry.get('clamp')(self, min, max)
2743
2744    def clip(self, min=None, max=None):
2745        r"""
2746        Alias for :func:`mindspore.Tensor.clamp`.
2747        """
2748        return self.clamp(min, max)
2749
2750    def init_data(self, slice_index=None, shape=None, opt_shard_group=None):
2751        """
2752        Get the tensor format data of this Tensor.
2753
2754        Note:
2755            The init_data function can be called once for the same tensor.
2756
2757        Args:
2758            slice_index (int): Slice index of a parameter's slices.
2759                It is used when initialize a slice of a parameter, it guarantees that devices
2760                using the same slice can generate the same tensor. Default: ``None``.
2761            shape (list[int]): Shape of the slice, it is used when initialize a slice of the parameter.
2762                Default: ``None``.
2763            opt_shard_group(str): Optimizer shard group which is used in auto or semi auto parallel mode
2764                to get one shard of a parameter's slice. For more information about optimizer parallel, please refer to:
2765                `Optimizer Parallel
2766                <https://www.mindspore.cn/tutorials/experts/en/master/parallel/optimizer_parallel.html>`_.
2767                Default: ``None``.
2768
2769        Returns:
2770            Initialized Tensor.
2771
2772        Supported Platforms:
2773            ``Ascend`` ``GPU`` ``CPU``
2774
2775        Examples:
2776            >>> import mindspore as ms
2777            >>> from mindspore.common.initializer import initializer, Constant
2778            >>> x = initializer(Constant(1), [2, 2], ms.float32)
2779            >>> out = x.init_data()
2780            >>> print(out)
2781            [[1. 1.]
2782             [1. 1.]]
2783        """
2784        if self.init is None:
2785            raise TypeError("init_data must be set Tensor.init, init can't be None")
2786
2787        if shape is None:
2788            shape = self.shape
2789        # At embedding cache scenes, we need limit the size of memory for tensor.
2790        # And save out of range data to persistent storage to support TB-Level size of tensor.
2791        data_shape = list(shape)
2792        slice_num_of_persistent_data = get_slice_num(self.dtype, shape)
2793        if slice_num_of_persistent_data > 1:
2794            slice_first_dim = math.ceil(shape[0] / slice_num_of_persistent_data)
2795            data_shape[0] = slice_first_dim
2796            self.slice_shape_of_persistent_data_ = data_shape
2797            self.slice_num_of_persistent_data_ = slice_num_of_persistent_data
2798
2799        try:
2800            data = np.ndarray(data_shape, dtype=mstype.dtype_to_nptype(self.dtype))
2801        except ValueError as e:
2802            msg = "Error shape={}".format(shape)
2803            logger.critical(msg)
2804            raise ValueError(msg) from e
2805
2806        class seed_context:
2807            """Set and restore seed."""
2808
2809            def __init__(self, init):
2810                self.init = init
2811                global_seed = get_seed()
2812                self._np_seed = np.random.get_state()[1][0]
2813                self.need_set_seed = (slice_index is not None)
2814                self._global_seed = global_seed
2815                self._seed_offset = 1
2816                if self.need_set_seed:
2817                    self._seed_offset = get_group_size() * 2
2818
2819            def __enter__(self):
2820                if self.need_set_seed:
2821                    self.seed = self.init.seed
2822                    if self._global_seed is not None:
2823                        np.random.seed(slice_index + self._global_seed)
2824                        self.init.seed = slice_index + self._global_seed
2825                    else:
2826                        np.random.seed(slice_index + Tensor.delta_seed)
2827                        self.init.seed = slice_index + Tensor.delta_seed
2828                        Tensor.delta_seed += self._seed_offset
2829
2830            def __exit__(self, ptype, value, trace):
2831                if self.need_set_seed:
2832                    np.random.seed(self._np_seed)
2833                    self.init.seed, _ = self.seed
2834
2835        with seed_context(self.init):
2836            if slice_num_of_persistent_data == 1:
2837                self.init(data)
2838        self.init = None
2839
2840        # At embedding cache scenes. When size of tensor is out of range, we store data to persistent storage
2841        if slice_num_of_persistent_data > 1:
2842            self.assign_value(Tensor_.persistent_data_from_numpy(data, slice_num_of_persistent_data))
2843        else:
2844            self.assign_value(Tensor_.from_numpy(data))
2845        return self
2846
2847    def resize(self, *new_shape):
2848        """
2849        Changes shape and size of tensor in-place.
2850
2851        If the shape of the new tensor is larger than the shape of the original tensor, the new tensor will be filled
2852        with 0. And if the shape of the new tensor is smaller than the shape of the original tensor, the new tensor is
2853        filled with the elements of the original tensor in order.
2854
2855        Note:
2856            Instead of changing the size of the input tensor and returns nothing as in numpy,
2857            this method returns a new Tensor with the input size.
2858            Numpy argument `refcheck` is not supported.
2859
2860        Args:
2861            new_shape (Union[ints, tuple of ints]): Shape of resized tensor.
2862
2863        Returns:
2864            Tensor.
2865
2866        See also:
2867            - :func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
2868            - :func:`mindspore.Tensor.repeat`: Repeat elements of a tensor.
2869
2870        Supported Platforms:
2871            ``Ascend`` ``GPU`` ``CPU``
2872
2873        Examples:
2874            >>> import numpy as np
2875            >>> from mindspore import Tensor
2876            >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32))
2877            >>> y = x.resize(3, 3)
2878            >>> print(y)
2879            [[1. 2. 3.]
2880            [4. 5. 6.]
2881            [0. 0. 0.]]
2882            >>> y = x.resize(2, 2)
2883            >>> print(y)
2884            [[1. 2.]
2885            [3. 4.]]
2886        """
2887        if not new_shape:
2888            return self
2889        if len(new_shape) == 1:
2890            if isinstance(new_shape[0], tuple):
2891                new_shape = new_shape[0]
2892        flattened = self.ravel()
2893        cur_size = flattened.size
2894        new_size = tensor_operator_registry.get('shape_mul')(new_shape)
2895        diff_size = new_size - cur_size
2896        if diff_size > 0:
2897            pad_val = tensor_operator_registry.get('fill')(self.dtype, (diff_size,), 0)
2898            res = tensor_operator_registry.get('concatenate')((flattened, pad_val), 0)
2899        else:
2900            res = flattened[:new_size]
2901        return res.reshape(new_shape)
2902
2903    def det(self):
2904        r"""
2905        For details, please refer to :func:`mindspore.ops.det`.
2906        """
2907        return tensor_operator_registry.get('det')(self)
2908
2909    def diff(self, n=1, axis=-1, prepend=None, append=None):
2910        r"""
2911        For details, please refer to :func:`mindspore.ops.diff`.
2912        """
2913        return tensor_operator_registry.get('diff')(self, n, axis, prepend, append)
2914
2915    def frac(self):
2916        r"""
2917        For details, please refer to :func:`mindspore.ops.frac`.
2918        """
2919        return tensor_operator_registry.get('frac')(self)
2920
2921    def argwhere(self):
2922        r"""
2923        For details, please refer to :func:`mindspore.ops.argwhere`.
2924        """
2925        return tensor_operator_registry.get('argwhere')(self)
2926
2927    def moveaxis(self, source, destination):
2928        r"""
2929        For details, please refer to :func:`mindspore.ops.moveaxis`.
2930        """
2931        return tensor_operator_registry.get('moveaxis')(self, source, destination)
2932
2933    def movedim(self, source, destination):
2934        r"""
2935        For details, please refer to :func:`mindspore.ops.movedim`.
2936        """
2937        return tensor_operator_registry.get('movedim')(self, source, destination)
2938
2939    def digamma(self):
2940        r"""
2941        For details, please refer to :func:`mindspore.ops.digamma`.
2942        """
2943        return tensor_operator_registry.get('digamma')(self)
2944
2945    def lgamma(self):
2946        r"""
2947        For details, please refer to :func:`mindspore.ops.lgamma`.
2948        """
2949        return tensor_operator_registry.get('lgamma')(self)
2950
2951    def diagonal(self, offset=0, axis1=0, axis2=1):
2952        """
2953        For details, please refer to :func:`mindspore.ops.diagonal`.
2954        """
2955        return tensor_operator_registry.get('diagonal')(self, offset, axis1, axis2)
2956
2957    def diagonal_scatter(self, src, offset=0, dim1=0, dim2=1):
2958        r"""
2959        For details, please refer to :func:`mindspore.ops.diagonal_scatter`.
2960        """
2961        return tensor_operator_registry.get('diagonal_scatter')(self, src, offset, dim1, dim2)
2962
2963    def trace(self, offset=0, axis1=0, axis2=1, dtype=None):
2964        """
2965        Return the sum along diagonals of the tensor.
2966
2967        Args:
2968            offset (int, optional): Offset of the diagonal from the main diagonal.
2969                Can be positive or negative. Defaults to main diagonal.
2970            axis1 (int, optional): Axis to be used as the first axis of the 2-D
2971                sub-arrays from which the diagonals should be taken. Defaults to
2972                first axis (0).
2973            axis2 (int, optional): Axis to be used as the second axis of the 2-D
2974                sub-arrays from which the diagonals should be taken. Defaults to
2975                second axis.
2976            dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
2977                output Tensor.
2978
2979        Returns:
2980            Tensor, the sum along diagonals.
2981
2982        Raises:
2983            ValueError: If the input tensor has less than two dimensions.
2984
2985        See also:
2986            - :func:`mindspore.Tensor.diagonal`: Return specified diagonals.
2987
2988        Supported Platforms:
2989            ``Ascend`` ``GPU`` ``CPU``
2990
2991        Examples:
2992            >>> import numpy as np
2993            >>> from mindspore import Tensor
2994            >>> x = Tensor(np.eye(3, dtype=np.float32))
2995            >>> print(x.trace())
2996            3.0
2997        """
2998        if offset == 0 and axis1 == 0 and axis2 == 1 and dtype is None:
2999            return tensor_operator_registry.get('trace')(self)
3000        d = self.diagonal(offset, axis1=axis1, axis2=axis2)
3001        shape = d.shape
3002        if dtype is None:
3003            dtype = d.dtype
3004        if shape[-1] == 0:
3005            return tensor_operator_registry.get('fill')(dtype, shape[:-1], 0)
3006        res = tensor_operator_registry.get('reduce_sum')(d.astype(mstype.float32), -1)
3007        return res.astype(dtype)
3008
3009    def take(self, indices, axis=None, mode='clip'):
3010        """
3011        Takes elements from a tensor along an axis.
3012
3013        Args:
3014            indices (Tensor): The indices with shape :math:`(Nj...)` of the values to extract.
3015            axis (int, optional): The axis over which to select values. By default,
3016                the flattened input tensor is used. Default: ``None`` .
3017            mode (str, optional): Support ``'raise'``, ``'wrap'``, ``'clip'``.
3018
3019                - ``raise``: Raises an error;
3020
3021                - ``wrap``: Wraps around;
3022
3023                - ``clip``: Clips to the range. ``'clip'`` mode means that all indices that are
3024                  too large are replaced by the index that addresses the last element
3025                  along that axis. Note that this disables indexing with negative numbers.
3026
3027                Default: ``'clip'`` .
3028
3029        Returns:
3030            Tensor, the indexed result.
3031
3032        Raises:
3033            ValueError: If `axis` is out of range, or `mode` has values other than ('raise', 'wrap', 'clip')
3034
3035        Supported Platforms:
3036            ``Ascend`` ``GPU`` ``CPU``
3037
3038        Examples:
3039            >>> import numpy as np
3040            >>> from mindspore import Tensor
3041            >>> a = Tensor(np.array([4, 3, 5, 7, 6, 8]))
3042            >>> indices = Tensor(np.array([0, 1, 4]))
3043            >>> output = a.take(indices)
3044            >>> print(output)
3045            [4 3 6]
3046        """
3047        if mode not in ('raise', 'wrap', 'clip'):
3048            raise ValueError(f"For 'Tensor.take', the argument 'mode' should be one of in ['raise', 'wrap', 'clip'],"
3049                             f" but got {mode}.")
3050        if axis is None:
3051            a = self.ravel()
3052            axis = 0
3053        else:
3054            a = self
3055        ndim = a.ndim
3056        validator.check_axis_in_range(axis, ndim)
3057        axis = axis + ndim if axis < 0 else axis
3058
3059        shape_a = a.shape
3060        shape_indices = indices.shape
3061        size_indices = indices.size
3062        indices = tensor_operator_registry.get('check_indices')(shape_a[axis], indices, mode)
3063
3064        # reshapes indices to shape (Ni..., Nj..., Nk)
3065        shape_ni = shape_a[:axis]
3066        shape_nk = shape_a[axis + 1:]
3067        shape_out = shape_ni + shape_indices + shape_nk
3068        shape_indices = tuple(size_indices if i == axis else 1 for i in range(ndim))
3069        indices = indices.reshape(shape_indices)
3070        shape_indices = shape_ni + (indices.size,) + shape_nk
3071        indices = tensor_operator_registry.get('broadcast_to')(indices, shape_indices)
3072
3073        res = tensor_operator_registry.get('gather_d')(a, axis, indices)
3074        return res.reshape(shape_out)
3075
3076    def choose(self, choices, mode='clip'):
3077        """
3078        Construct a tensor from an index tensor and a list of tensors to choose from.
3079
3080        Args:
3081            choices (Union[tuple, list, Tensor]): Choice tensors. The input tensor and all of the
3082                `choices` must be broadcasted to the same shape. If `choices` is itself a tensor,
3083                then its outermost dimension (i.e., the one corresponding to ``choices.shape[0]``)
3084                is taken as defining the "sequence".
3085            mode (str, optional): Specifies how indices outside
3086                ``[0, n-1]`` will be treated. Support ``'raise'``, ``'wrap'``, ``'clip'``.
3087
3088                - ``raise``: Raises an error;
3089
3090                - ``wrap``: Wraps around;
3091
3092                - ``clip``: Clips to the range. The values greater than n-1 will be mapped to n-1.
3093                  Note that this mode disables indexing with negative numbers.
3094
3095                Default: ``'clip'``.
3096
3097        Returns:
3098            Tensor, the merged result.
3099
3100        Raises:
3101            ValueError: If the input tensor and any of the `choices` cannot be broadcast.
3102
3103        Supported Platforms:
3104            ``Ascend`` ``GPU`` ``CPU``
3105
3106        Examples:
3107            >>> import numpy as np
3108            >>> from mindspore import Tensor
3109            >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33]]
3110            >>> x = Tensor(np.array([2, 3, 1, 0]))
3111            >>> print(x.choose(choices))
3112            [20 31 12  3]
3113        """
3114        if isinstance(choices, Tensor):
3115            shape_choice = validator.infer_out_shape(self.shape, choices.shape[1:])
3116            choices = tensor_operator_registry.get('broadcast_to')(choices, (choices.shape[0],) + shape_choice)
3117        else:
3118            # broadcasts choices to the same shape if choices is a sequence
3119            choicelist = []
3120            shapes = ()
3121            for choice in choices:
3122                if not isinstance(choice, Tensor):
3123                    choice = tensor_operator_registry.get('make_tensor')(choice)
3124                shapes += (choice.shape,)
3125                choicelist.append(choice)
3126            shape_choice = validator.infer_out_shape(self.shape, *shapes)
3127            tmp = []
3128            for choice in choicelist:
3129                tmp.append(tensor_operator_registry.get('broadcast_to')(choice, shape_choice))
3130            choices = tensor_operator_registry.get('stack')(tmp, 0)
3131
3132        if self.ndim == 0 or choices.ndim == 0:
3133            raise ValueError(f"For 'Tensor.choose', the original tensor and the argument 'choices' cannot be scalars."
3134                             f" Their dimensions should all be > 0, but got the original tensor's dimension "
3135                             f"{self.ndim}, 'choices' dimension {choices.ndim}.")
3136        a = tensor_operator_registry.get('broadcast_to')(self, shape_choice)
3137        dtype = choices.dtype
3138        # adjusts dtype for F.tensor_mul and F.gather_nd
3139        a = a.astype(mstype.int32)
3140        choices = choices.astype(mstype.int32)
3141        a = tensor_operator_registry.get('check_indices')(choices.shape[0], a, mode, allow_negative_index=False)
3142
3143        grids = []
3144        ndim = len(a.shape)
3145        for i in range(ndim):
3146            dim_grid = Tensor(list(range(a.shape[i])), mstype.int32)
3147            dim_shape = validator.expanded_shape(ndim, a.shape[i], i)
3148            dim_grid = tensor_operator_registry.get('broadcast_to')(dim_grid.reshape(dim_shape), a.shape)
3149            grids.append(dim_grid)
3150        grid = tensor_operator_registry.get('stack')(grids, -1)
3151        indices = tensor_operator_registry.get('concatenate')((a.reshape(a.shape + (1,)), grid), -1)
3152        return tensor_operator_registry.get('gather_nd')(choices, indices).astype(dtype)
3153
3154    def searchsorted(self, v, side='left', sorter=None):
3155        """
3156        Finds indices where elements should be inserted to maintain order.
3157
3158        Args:
3159            v (Union[int, float, bool, list, tuple, Tensor]): Values to insert into the tensor.
3160            side (str, optional): If 'left', the index of the first suitable
3161                location found is given. If 'right', return the last such index. If there is
3162                no suitable index, return either 0 or N (where N is the length of the tensor).
3163                Default: ``left`` .
3164            sorter (Union[int, list, tuple, Tensor]): optional tensor of
3165                integer indices that sort the tensor into ascending order on the innermost dimension
3166                and the type must be int64. They are typically the result of argsort. Default: ``None`` .
3167
3168        Returns:
3169            Tensor, array of insertion points with the same shape as `v`.
3170
3171        Raises:
3172            ValueError: If argument for `side` or `sorter` is invalid.
3173
3174        Supported Platforms:
3175            ``Ascend`` ``GPU`` ``CPU``
3176
3177        Examples:
3178            >>> import numpy as np
3179            >>> from mindspore import Tensor
3180            >>> x = Tensor(np.array([1, 2, 3, 4, 5]))
3181            >>> print(x.searchsorted(3))
3182            2
3183        """
3184        if side not in ('left', 'right'):
3185            raise ValueError(f"For 'Tensor.searchsorted', the argument 'side' should be one of in "
3186                             f"['left', 'right'], but got {side}.")
3187        if not isinstance(v, Tensor):
3188            v = tensor_operator_registry.get('make_tensor')(v)
3189        if sorter is not None:
3190            if not isinstance(sorter, (int, list, tuple, Tensor)):
3191                raise TypeError("For Tensor.searchsorted, the type of the argument 'sorter' must be one of 'int', "
3192                                "'list', 'tuple', 'Tensor', but got {}.".format(type(sorter)))
3193            if not isinstance(sorter, Tensor):
3194                sorter = tensor_operator_registry.get('make_tensor')(sorter)
3195            if sorter.size != self.size:
3196                raise ValueError('The size of sorter must be the same as the Tensor')
3197
3198        dtype = mstype.int32
3199        right = (side == 'right')
3200        search_sorted_ = tensor_operator_registry.get('searchsorted')(dtype, right)
3201        return search_sorted_(self, v, sorter)
3202
3203    def gather_nd(self, indices):
3204        r"""
3205        For details, please refer to :func:`mindspore.ops.gather_nd`.
3206        """
3207        validator.check_value_type('indices', indices, (Tensor, Tensor_,), 'Tensor.gather_nd')
3208        return tensor_operator_registry.get('gather_nd')(self, indices)
3209
3210    def gather(self, input_indices, axis, batch_dims=0):
3211        r"""
3212        For details, please refer to :func:`mindspore.ops.gather`.
3213        """
3214        validator.check_is_int(axis, 'axis')
3215        validator.check_is_int(batch_dims, "batch_dims")
3216        return tensor_operator_registry.get('gather')(self, input_indices, axis, batch_dims)
3217
3218    def uniform(self, from_=0., to=1., generator=None):
3219        r"""
3220        Generates random numbers in the half-open interval [from_, to).
3221
3222        Args:
3223            from_ (number): The lower bound of the interval.
3224            to (number): The upper bound of the interval.
3225            generator (Generator, optional): The random seed. Default: None.
3226
3227        Returns:
3228            Tensor, with the same shape as tensor.
3229
3230        Raises:
3231            TypeError: If `from_` is larger than `to`.
3232
3233        Supported Platforms:
3234            ``Ascend``
3235
3236        Examples:
3237            >>> import mindspore
3238            >>> x = mindspore.ops.ones((4, 2))
3239            >>> generator = mindspore.Generator()
3240            >>> generator.manual_seed(100)
3241            >>> output = x.uniform(1., 2., generator)
3242            >>> print(output.shape)
3243            (4, 2)
3244        """
3245        return tensor_operator_registry.get('uniform')(self, from_, to, generator)
3246
3247    def var(self, axis=None, ddof=0, keepdims=False):
3248        """
3249        Compute the variance along the specified axis.
3250
3251        The variance is the average of the squared deviations from the mean, i.e.,
3252        :math:`var = mean(abs(x - x.mean())**2)`.
3253
3254        Return the variance, which is computed for the flattened array by default,
3255        otherwise over the specified axis.
3256
3257        Note:
3258            Numpy arguments `dtype`, `out` and `where` are not supported.
3259
3260        Args:
3261            axis (Union[None, int, tuple(int)]): Axis or axes along which the variance is computed.
3262                The default is to compute the variance of the flattened array. Default: ``None`` .
3263            ddof (int): Means Delta Degrees of Freedom. Default: ``0`` .
3264                The divisor used in calculations is :math:`N - ddof`, where :math:`N` represents the number of elements.
3265            keepdims (bool): Default: ``False`` .
3266
3267        Returns:
3268            Variance tensor.
3269
3270        See also:
3271            - :func:`mindspore.Tensor.mean`: Reduce a dimension of a tensor by averaging all elements in the dimension.
3272            - :func:`mindspore.Tensor.std`: Compute the standard deviation along the specified axis.
3273
3274        Supported Platforms:
3275            ``Ascend`` ``GPU`` ``CPU``
3276
3277        Examples:
3278            >>> import numpy as np
3279            >>> from mindspore import Tensor
3280            >>> input_x = Tensor(np.array([1., 2., 3., 4.], np.float32))
3281            >>> output = input_x.var()
3282            >>> print(output)
3283            1.25
3284        """
3285        if 0 in self.shape:
3286            return Tensor(float('nan'), self.dtype)
3287        if not isinstance(ddof, int):
3288            raise TypeError("For 'Tensor.var', the type of the argument 'ddof' must be int, but got "
3289                            "{}.".format(type(ddof)))
3290        if not isinstance(keepdims, bool):
3291            raise TypeError("For 'Tensor.var', the type of the argument 'keepdims' must be bool, but "
3292                            "got {}.".format(type(keepdims)))
3293
3294        if axis is None:
3295            axis = ()
3296        else:
3297            axis = validator.check_and_canonicalize_axes(axis, self.ndim)
3298        x_mean = tensor_operator_registry.get('mean')(self, axis, True)
3299        x_sub = tensor_operator_registry.get('__sub__')(self, x_mean)
3300        x_pow = tensor_operator_registry.get('__pow__')(x_sub, 2)
3301        x_sum = tensor_operator_registry.get('reducesum')(bool(keepdims))(x_pow, axis)
3302        nums = 1
3303        if axis == ():
3304            nums = self.size
3305        else:
3306            for ax in axis:
3307                nums *= self.shape[ax]
3308        return tensor_operator_registry.get('__truediv__')(x_sum, nums - ddof)
3309
3310    def std(self, axis=None, ddof=0, keepdims=False):
3311        """
3312        For details, please refer to :func:`mindspore.ops.std`.
3313        """
3314        x_var = self.var(axis, ddof, keepdims)
3315        return tensor_operator_registry.get('__pow__')(x_var, 0.5)
3316
3317    def sum(self, axis=None, dtype=None, keepdims=False, initial=None):
3318        """
3319        Return sum of tensor elements over a given axis.
3320
3321        Note:
3322            Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are not supported.
3323            The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
3324
3325        Args:
3326            axis (Union[None, int, tuple(int), list(int), Tensor]): Axis or axes along which a sum is performed.
3327                Default: ``None`` .
3328                If ``None`` , sum all the elements of the input tensor.
3329                If the `axis` is negative, it counts from the last to the first `axis`.
3330                If the `axis` is a tuple or list of ints, a sum is performed on all the axes specified in the tuple
3331                or list instead of a single `axis` or all the axes as before.
3332            dtype (:class:`mindspore.dtype`, optional): defaults to ``None`` . Overrides the dtype of the
3333                output Tensor.
3334            keepdims (bool): If this is set to ``True`` , the axes which are reduced are left in the result as
3335                dimensions with size one. With this option, the result will broadcast correctly against the input
3336                array. If the default value is passed, then `keepdims` will not be passed through to the sum method
3337                of sub-classes of ndarray, however any non-default value will be. If the sub-class method does not
3338                implement `keepdims` any exceptions will be raised. Default: ``False`` .
3339            initial (scalar): Starting value for the sum. Default: ``None`` .
3340
3341        Returns:
3342            Tensor. A tensor with the same shape as input, with the specified `axis` removed.
3343            If the input tensor is a 0-d array, or if the `axis` is ``None`` , a scalar is returned.
3344
3345        Raises:
3346            TypeError: If input is not array_like, or `axis` is not int, tuple of ints, list of ints or Tensor,
3347                or `keepdims` is not integer, or `initial` is not scalar.
3348            ValueError: If any `axis` is out of range or duplicate axes exist.
3349
3350        See also:
3351            - :func:`mindspore.Tensor.cumsum`: Return the cumulative sum of the elements along a given `axis`.
3352
3353        Supported Platforms:
3354            ``Ascend`` ``GPU`` ``CPU``
3355
3356        Examples:
3357            >>> import numpy as np
3358            >>> from mindspore import Tensor
3359            >>> input_x = Tensor(np.array([-1, 0, 1]).astype(np.float32))
3360            >>> print(input_x.sum())
3361            0.0
3362            >>> input_x = Tensor(np.arange(10).reshape(2, 5).astype(np.float32))
3363            >>> print(input_x.sum(axis=1))
3364            [10. 35.]
3365        """
3366        if initial is None:
3367            res = tensor_operator_registry.get("sum")(self, axis, keepdims, dtype=dtype)
3368        else:
3369            res = tensor_operator_registry.get("sum")(self, axis, keepdims, dtype=dtype) + initial
3370        if dtype is not None and (dtype == mstype.bool_):
3371            res = res.astype(mstype.bool_)
3372        return res
3373
3374    def sum_to_size(self, *size):
3375        r"""
3376        Sum self Tensor to the `size`. `size` must be expandable to the Tensor size.
3377
3378        Args:
3379            size (Union[tuple(int), int]): The expected shape of output Tensor.
3380
3381        Returns:
3382            Tensor, the sum result of self Tensor according to the `size`.
3383
3384        Raises:
3385            ValueError: If `size` is not expandable to the size of self Tensor.
3386
3387        Supported Platforms:
3388            ``Ascend`` ``GPU`` ``CPU``
3389
3390        Examples:
3391            >>> import numpy as np
3392            >>> import mindspore
3393            >>> from mindspore import Tensor
3394            >>> x = Tensor(np.random.randn(3, 3, 3, 3, 3, 3), mindspore.float32)
3395            >>> output = x.sum_to_size((1, 3, 1, 3))
3396            >>> print(output.shape)
3397            (1, 3, 1, 3)
3398        """
3399        x = self
3400        if len(size) == 1 and isinstance(size[0], tuple):
3401            size = size[0]
3402        shape_x = x.shape
3403        if len(size) > x.ndim:
3404            raise ValueError(f"For sum_to_size, size {size} is not expandable to the tensor size {shape_x}.")
3405        if len(size) < x.ndim:
3406            pre_axis = tuple([axis for axis in range(x.ndim - len(size))])
3407            x = x.sum(pre_axis)
3408        axes = []
3409        for i, element in enumerate(size):
3410            if element != x.shape[i] and element == 1:
3411                axes.append(i)
3412            elif element != x.shape[i]:
3413                raise ValueError(f"For sum_to_size, size {size} is not expandable to the tensor size {shape_x}.")
3414        if axes:
3415            return x.sum(tuple(axes), keepdims=True)
3416        return x
3417
3418    def nansum(self, axis=None, keepdims=False, dtype=None):
3419        """
3420        For details, please refer to :func:`mindspore.ops.nansum`.
3421        """
3422        return tensor_operator_registry.get('nansum')(self, axis=axis, keepdims=keepdims, dtype=dtype)
3423
3424    def nanmean(self, axis=None, keepdims=False, *, dtype=None):
3425        r"""
3426        For details, please refer to :func:`mindspore.ops.nanmean`.
3427        """
3428        return tensor_operator_registry.get('nanmean')(self, axis, keepdims, dtype=dtype)
3429
3430    def nanmedian(self, axis=-1, keepdims=False):
3431        r"""
3432        For details, please refer to :func:`mindspore.ops.nanmedian`.
3433        """
3434        return tensor_operator_registry.get('nanmedian')(self, axis, keepdims)
3435
3436    def repeat(self, repeats, axis=None):
3437        """
3438        Repeat elements of a tensor.
3439
3440        Args:
3441            repeats (Union[int, tuple, list]): The number of repetitions for each element.
3442                `repeats` is broadcasted to fit the shape of the given axis.
3443            axis (int, optional): The axis along which to repeat values. By default,
3444                use the flattened input tensor, and return a flat output tensor. Default: ``None``.
3445
3446        Returns:
3447            Tensor, has the same shape as input tensor except along the given axis.
3448
3449        Raises:
3450            ValueError: If the axis is out of range.
3451            TypeError: If arguments have types not specified above.
3452
3453        See also:
3454            - :func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
3455            - :func:`mindspore.Tensor.resize`: Changes shape and size of tensor in-place.
3456
3457        Supported Platforms:
3458            ``Ascend`` ``GPU`` ``CPU``
3459
3460        Examples:
3461            >>> import numpy as np
3462            >>> from mindspore import Tensor
3463            >>> x = Tensor(np.array(3))
3464            >>> print(x.repeat(4))
3465            [3 3 3 3]
3466            >>> x = Tensor(np.array([[1, 2],[3, 4]]))
3467            >>> print(x.repeat(2))
3468            [1 1 2 2 3 3 4 4]
3469            >>> print(x.repeat(3, axis=1))
3470            [[1 1 1 2 2 2]
3471            [3 3 3 4 4 4]]
3472            >>> print(x.repeat([1,2], axis=0))
3473            [[1 2]
3474            [3 4]
3475            [3 4]]
3476        """
3477        if not isinstance(repeats, (tuple, list)):
3478            repeats = (repeats,)
3479        for index, element in enumerate(repeats):
3480            if not isinstance(element, int):
3481                raise TypeError(f"For 'Tensor.repeat', each element in {repeats} should be int, but got "
3482                                f"{type(element)} at index {index}.")
3483        input_x = self
3484        if axis is None:
3485            input_x = self.ravel()
3486            axis = 0
3487        if axis is not None and not isinstance(axis, int):
3488            raise TypeError(f"For 'Tensor.repeat', the argument 'axis' should be int, but got {type(axis)}.")
3489        validator.check_axis_in_range(axis, input_x.ndim)
3490        axis = axis + input_x.ndim if axis < 0 else axis
3491
3492        if len(repeats) == 1:
3493            repeats = repeats[0]
3494            if repeats == 0:
3495                return Tensor_(input_x.dtype, (0,))
3496            return tensor_operator_registry.get('repeat_elements')(input_x, repeats, axis)
3497        size = input_x.shape[axis]
3498        if len(repeats) != size:
3499            raise ValueError(f"For 'Tensor.repeat', the length of 'repeats' must be the same as the shape of the "
3500                             f"original tensor in the 'axis' dimension, but got the length of 'repeats' "
3501                             f"{len(repeats)}, the shape of the original tensor in the 'axis' dimension {size}.")
3502        subs = tensor_operator_registry.get('tensor_split')(input_x, size, axis)
3503        repeated_subs = []
3504        for sub, rep in zip(subs, repeats):
3505            if rep != 0:
3506                repeated_subs.append(tensor_operator_registry.get('repeat_elements')(sub, rep, axis))
3507        return tensor_operator_registry.get('concatenate')(repeated_subs, axis)
3508
3509    def repeat_interleave(self, repeats, dim=None):
3510        """
3511        For details, please refer to :func:`mindspore.ops.repeat_interleave`.
3512        """
3513        return tensor_operator_registry.get('repeat_interleave')(self, repeats, dim)
3514
3515    def bernoulli(self, p=0.5, seed=None):
3516        r"""
3517        For details, please refer to :func:`mindspore.ops.bernoulli`.
3518        """
3519        return tensor_operator_registry.get('bernoulli')(self, p, seed)
3520
3521    def random_categorical(self, num_sample, seed=0, dtype=mstype.int64):
3522        r"""
3523        For details, please refer to :func:`mindspore.ops.random_categorical`.
3524        """
3525        validator.check_is_int(num_sample, 'num_sample')
3526        validator.check_is_int(seed, 'seed')
3527        return tensor_operator_registry.get('random_categorical')(self, num_sample, seed, dtype)
3528
3529    def masked_select(self, mask):
3530        """
3531        For details, please refer to :func:`mindspore.ops.masked_select`.
3532        """
3533        return tensor_operator_registry.get('masked_select')(self, mask)
3534
3535    def gather_elements(self, dim, index):
3536        """
3537        For details, please refer to :func:`mindspore.ops.gather_elements`.
3538        """
3539        validator.check_value_type('index', index, (Tensor, Tensor_,), 'Tensor.gather_elements')
3540        return tensor_operator_registry.get('gather_elements')(self, dim, index)
3541
3542    def nonzero(self, as_tuple=False):
3543        """
3544        For details, please refer to :func:`mindspore.ops.nonzero`.
3545        """
3546        return tensor_operator_registry.get('nonzero')(self, as_tuple)
3547
3548    def svd(self, full_matrices=False, compute_uv=True):
3549        """
3550        For details, please refer to :func:`mindspore.ops.svd`.
3551        """
3552        svd_op = tensor_operator_registry.get("svd")
3553        if compute_uv:
3554            return svd_op(full_matrices, compute_uv)(self)
3555
3556        s, _, _ = svd_op(full_matrices, compute_uv)(self)
3557        return s
3558
3559    def hardshrink(self, lambd=0.5):
3560        r"""
3561        For details, please refer to :func:`mindspore.ops.hardshrink`.
3562        """
3563        return tensor_operator_registry.get('hardshrink')(self, lambd)
3564
3565    def heaviside(self, values):
3566        r"""
3567        For details, please refer to :func:`mindspore.ops.heaviside`.
3568        """
3569        return tensor_operator_registry.get('heaviside')(self, values)
3570
3571    def hypot(self, other):
3572        r"""
3573        For details, please refer to :func:`mindspore.ops.hypot`.
3574        """
3575        return tensor_operator_registry.get('hypot')(self, other)
3576
3577    def soft_shrink(self, lambd=0.5):
3578        r"""
3579        For details, please refer to :func:`mindspore.ops.soft_shrink`.
3580        """
3581        return tensor_operator_registry.get('soft_shrink')(self, lambd)
3582
3583    def matrix_determinant(self):
3584        r"""
3585        For details, please refer to :func:`mindspore.ops.matrix_determinant`.
3586        """
3587        return tensor_operator_registry.get('matrix_determinant')(self)
3588
3589    def log_matrix_determinant(self):
3590        r"""
3591        For details, please refer to :func:`mindspore.ops.log_matrix_determinant`.
3592        """
3593        return tensor_operator_registry.get('log_matrix_determinant')(self)
3594
3595    def to_coo(self):
3596        """
3597        Convert a Tensor to COOTensor.
3598
3599        Note:
3600            Only 2-D tensor is supported for now.
3601
3602        Returns:
3603            COOTensor, a sparse representation of the original dense tensor, containing the following parts.
3604
3605            - indices (Tensor): 2-D integer tensor, indicates the positions of `values` of the dense tensor.
3606            - values (Tensor): 1-D tensor, indicates the non-zero values of the dense tensor.
3607            - shape (tuple(int)): the shape of the COOTensor, is the same as the original dense tensor.
3608
3609        Raises:
3610            ValueError: If input tensor is not 2-D.
3611
3612        Supported Platforms:
3613            ``GPU``
3614
3615        Examples:
3616            >>> import numpy as np
3617            >>> import mindspore
3618            >>> from mindspore import Tensor
3619            >>> x = Tensor(np.array([[1,  0], [-5, 0]]), mindspore.float32)
3620            >>> output = x.to_coo()
3621            >>> print(output.indices, output.values, output.shape)
3622            [[0 0]
3623             [1 0]] [ 1. -5.] (2, 2)
3624
3625        """
3626        return tensor_operator_registry.get('dense_to_sparse_coo')(self)
3627
3628    def to_csr(self):
3629        """
3630        Convert a Tensor to CSRTensor.
3631
3632        Note:
3633            Only 2-D tensor is supported for now.
3634
3635        Returns:
3636            CSRTensor, a sparse representation of the original dense tensor, containing the following parts.
3637
3638            - indptr (Tensor): 1-D integer tensor, indicates the start and end point for `values` in each row.
3639            - indices (Tensor): 1-D integer tensor, indicates the column positions of all non-zero values of the input.
3640            - values (Tensor): 1-D tensor, indicates the non-zero values of the dense tensor.
3641            - shape (tuple(int)): the shape of the CSRTensor, is the same as the original dense tensor.
3642
3643        Raises:
3644            ValueError: If input tensor is not 2-D.
3645
3646        Supported Platforms:
3647            ``GPU``
3648
3649        Examples:
3650            >>> import numpy as np
3651            >>> import mindspore
3652            >>> from mindspore import Tensor
3653            >>> x = Tensor(np.array([[1,  0], [-5, 0]]), mindspore.float32)
3654            >>> output = x.to_csr()
3655            >>> print(output.indptr, output.indices, output.values, output.shape)
3656            [0 1 2] [0 0] [ 1. -5.] (2, 2)
3657        """
3658        return tensor_operator_registry.get('dense_to_sparse_csr')(self)
3659
3660    def tolist(self):
3661        r"""
3662        Convert a Tensor to List. If the input is Tensor scalar, a Python scalar will be returned.
3663
3664        Returns:
3665            List or Python scalar.
3666
3667        Supported Platforms:
3668            ``Ascend`` ``GPU`` ``CPU``
3669
3670        Examples:
3671            >>> import mindspore as ms
3672            >>> x = ms.Tensor([[1, 2, 3], [4, 5, 6]])
3673            >>> out1 = x.tolist()
3674            >>> print(out1)
3675            [[1, 2, 3], [4, 5, 6]]
3676            >>> out2 = x[0][0].tolist()
3677            >>> print(out2)
3678            1
3679        """
3680        return self.asnumpy().tolist()
3681
3682    def unbind(self, dim=0):
3683        r"""
3684        For details, please refer to :func:`mindspore.ops.unbind`.
3685        """
3686        return tensor_operator_registry.get('unbind')(self, dim)
3687
3688    def unsorted_segment_min(self, segment_ids, num_segments):
3689        r"""
3690        For details, please refer to :func:`mindspore.ops.unsorted_segment_min`.
3691        """
3692        return tensor_operator_registry.get('unsorted_segment_min')(self, segment_ids, num_segments)
3693
3694    def unsorted_segment_max(self, segment_ids, num_segments):
3695        r"""
3696        For details, please refer to :func:`mindspore.ops.unsorted_segment_max`.
3697        """
3698        return tensor_operator_registry.get('unsorted_segment_max')(self, segment_ids, num_segments)
3699
3700    def unsorted_segment_prod(self, segment_ids, num_segments):
3701        r"""
3702        For details, please refer to :func:`mindspore.ops.unsorted_segment_prod`.
3703        """
3704        return tensor_operator_registry.get('unsorted_segment_prod')(self, segment_ids, num_segments)
3705
3706    def unique_consecutive(self, return_idx=False, return_counts=False, axis=None):
3707        """
3708        For details, please refer to :func:`mindspore.ops.unique_consecutive`.
3709        """
3710        output, idx, counts = tensor_operator_registry.get("unique_consecutive")(return_idx, return_counts, axis)(self)
3711        if return_idx and return_counts:
3712            return output, idx, counts
3713        if return_idx:
3714            return output, idx
3715        if return_counts:
3716            return output, counts
3717        return output
3718
3719    def unique_with_pad(self, pad_num):
3720        """
3721        For details, please refer to :func:`mindspore.ops.unique_with_pad`.
3722        """
3723        return tensor_operator_registry.get("unique_with_pad")(self, pad_num)
3724
3725    def diag(self):
3726        r"""
3727        For details, please refer to :func:`mindspore.ops.diag`.
3728        """
3729        return tensor_operator_registry.get('diag')(self)
3730
3731    def diagflat(self, offset=0):
3732        r"""
3733        For details, please refer to :func:`mindspore.ops.diagflat`.
3734        """
3735        return tensor_operator_registry.get('diagflat')(self, offset)
3736
3737    def xdivy(self, y):
3738        r"""
3739        For details, please refer to :func:`mindspore.ops.xdivy`.
3740        """
3741        return tensor_operator_registry.get("xdivy")(self, y)
3742
3743    def split(self, split_size_or_sections, axis=0):
3744        """
3745        For details, please refer to :func:`mindspore.ops.split`.
3746        """
3747        return tensor_operator_registry.get('split')(self, split_size_or_sections, axis)
3748
3749    def tensor_split(self, indices_or_sections, axis=0):
3750        """
3751        For details, please refer to :func:`mindspore.ops.tensor_split`.
3752        """
3753        return tensor_operator_registry.get('tensor_split')(self, indices_or_sections, axis)
3754
3755    def vsplit(self, indices_or_sections):
3756        """
3757        For details, please refer to :func:`mindspore.ops.vsplit`.
3758        """
3759
3760        return tensor_operator_registry.get('vsplit')(self, indices_or_sections)
3761
3762    def hsplit(self, indices_or_sections):
3763        """
3764        For details, please refer to :func:`mindspore.ops.hsplit`.
3765        """
3766        return tensor_operator_registry.get('hsplit')(self, indices_or_sections)
3767
3768    def dsplit(self, indices_or_sections):
3769        """
3770        For details, please refer to :func:`mindspore.ops.dsplit`.
3771        """
3772        return tensor_operator_registry.get('dsplit')(self, indices_or_sections)
3773
3774    def xlogy(self, y):
3775        r"""
3776        For details, please refer to :func:`mindspore.ops.xlogy`.
3777        """
3778        return tensor_operator_registry.get("xlogy")(self, y)
3779
3780    def eigvals(self):
3781        r"""
3782        For details, please refer to :func:`mindspore.ops.eigvals`.
3783
3784        .. warning::
3785            This is an experimental API that is subject to change or deletion.
3786        """
3787        return tensor_operator_registry.get("eigvals")()(self)
3788
3789    def erf(self):
3790        r"""
3791        For details, please refer to :func:`mindspore.ops.erf`.
3792        """
3793        return tensor_operator_registry.get("erf")(self)
3794
3795    def erfc(self):
3796        r"""
3797        For details, please refer to :func:`mindspore.ops.erfc`.
3798        """
3799        return tensor_operator_registry.get("erfc")(self)
3800
3801    def tile(self, reps):
3802        r"""
3803        For details, please refer to :func:`mindspore.ops.tile`.
3804        """
3805        return tensor_operator_registry.get('tile')(self, reps)
3806
3807    def topk(self, k, dim=None, largest=True, sorted=True):
3808        r"""
3809        For details, please refer to :func:`mindspore.ops.topk`.
3810        """
3811        return tensor_operator_registry.get("topk")(self, k, dim, largest, sorted)
3812
3813    def top_k(self, k, sorted=True):
3814        r"""
3815        `Tensor.top_k` is deprecated, please use `Tensor.topk` instead.
3816        """
3817        validator.check_is_int(k, 'k')
3818        validator.check_bool(sorted, 'sorted')
3819        return tensor_operator_registry.get("top_k")(self, k, sorted)
3820
3821    def sigmoid(self):
3822        r"""
3823        For details, please refer to :func:`mindspore.ops.sigmoid`.
3824        """
3825        return tensor_operator_registry.get("sigmoid")(self)
3826
3827    def median(self, axis=-1, keepdims=False):
3828        r"""
3829        For details, please refer to :func:`mindspore.ops.median`.
3830        """
3831        validator.check_axis_in_range(axis, self.ndim)
3832        return tensor_operator_registry.get('median')(False, axis, keepdims)(self)
3833
3834    def addmv(self, mat, vec, beta=1, alpha=1):
3835        r"""
3836        For details, please refer to :func:`mindspore.ops.addmv`.
3837        """
3838        return tensor_operator_registry.get('addmv')(self, mat, vec, beta=beta, alpha=alpha)
3839
3840    def asinh(self):
3841        r"""
3842        For details, please refer to :func:`mindspore.ops.asinh`.
3843        """
3844        return tensor_operator_registry.get('asinh')(self)
3845
3846    def arcsinh(self):
3847        r"""
3848        Alias for :func:`mindspore.Tensor.asinh`.
3849        """
3850        return tensor_operator_registry.get('arcsinh')(self)
3851
3852    def atan(self):
3853        r"""
3854        For details, please refer to :func:`mindspore.ops.atan`.
3855        """
3856        return tensor_operator_registry.get('atan')(self)
3857
3858    def atanh(self):
3859        r"""
3860        For details, please refer to :func:`mindspore.ops.atanh`.
3861        """
3862        return tensor_operator_registry.get('atanh')(self)
3863
3864    def arctanh(self):
3865        r"""
3866        Alias for :func:`mindspore.Tensor.atanh`.
3867        """
3868        return tensor_operator_registry.get('arctanh')(self)
3869
3870    def bmm(self, mat2):
3871        r"""
3872        For details, please refer to :func:`mindspore.ops.bmm`.
3873        """
3874        return tensor_operator_registry.get('bmm')(self, mat2)
3875
3876    def to(self, dtype):
3877        r"""
3878        Performs tensor dtype conversion.
3879
3880        Args:
3881            dtype (Number): The valid data type of the output tensor. Only constant value is allowed.
3882
3883        Returns:
3884            Tensor, converted to the specified `dtype`.
3885
3886        Raises:
3887            TypeError: If `dtype` is not a Number.
3888
3889        Supported Platforms:
3890            ``Ascend`` ``GPU`` ``CPU``
3891
3892        Examples:
3893            >>> import numpy as np
3894            >>> import mindspore
3895            >>> from mindspore import Tensor
3896            >>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
3897            >>> input_x = Tensor(input_np)
3898            >>> dtype = mindspore.int32
3899            >>> output = input_x.to(dtype)
3900            >>> print(output.dtype)
3901            Int32
3902        """
3903        return tensor_operator_registry.get('to')(self, dtype)
3904
3905    def type(self, dtype=None):
3906        r"""
3907        Change the dtype of the Tensor to the `dtype` . Return the type if `dtype` is ``None`` .
3908
3909        Args:
3910            dtype (mindspore.dtype, optional): The specified dtype of output tensor. Default: ``None``.
3911
3912        Returns:
3913            Tensor or str. If `dtype` is ``None`` , return a str, which describes the dtype of Tensor.
3914            If `dtype` is not ``None`` , then return a Tensor, and the dtype of returned Tensor is `dtype` .
3915
3916        Supported Platforms:
3917            ``Ascend`` ``GPU`` ``CPU``
3918
3919        Examples:
3920            >>> import mindspore
3921            >>> from mindspore import Tensor
3922            >>> x = Tensor([[1.2, 2], [3.4, 4]], dtype=mindspore.float32)
3923            >>> print(x.type())
3924            Float32
3925            >>> print(x.type(dtype=mindspore.int32))
3926            [[1 2]
3927             [3 4]]
3928        """
3929        if dtype is None:
3930            return str(self.dtype)
3931        return self.astype(dtype)
3932
3933    def type_as(self, other):
3934        r"""
3935        Change the dtype of the Tensor to the dtype of `other`.
3936
3937        Args:
3938            other (Tensor): The return tensor has the same dtype as `other`.
3939
3940        Returns:
3941            Tensor, has the same dtype as `other`.
3942
3943        Supported Platforms:
3944            ``Ascend`` ``GPU`` ``CPU``
3945
3946        Examples:
3947            >>> import mindspore
3948            >>> from mindspore import Tensor
3949            >>> x = Tensor([[1, 2], [3, 4]], dtype=mindspore.float32)
3950            >>> y = Tensor([[1, 2], [3, 4]], dtype=mindspore.int32)
3951            >>> x = x.type_as(y)
3952            >>> print(x.dtype)
3953            Int32
3954        """
3955        return self.astype(other.dtype)
3956
3957    def bool(self):
3958        r"""
3959        Converts input tensor dtype to `bool`.
3960        If the value in tensor is zero, it will be `False`, otherwise it will be `True`.
3961
3962        Returns:
3963            Tensor, converted to the `bool` dtype.
3964
3965        Supported Platforms:
3966            ``Ascend`` ``GPU`` ``CPU``
3967
3968        Examples:
3969            >>> import numpy as np
3970            >>> import mindspore
3971            >>> from mindspore import Tensor
3972            >>> input_x = Tensor(np.ones([2,2]), mindspore.float32)
3973            >>> output = input_x.bool()
3974            >>> print(output.dtype)
3975            Bool
3976        """
3977        return tensor_operator_registry.get('bool')(self, mstype.bool_)
3978
3979    def float(self):
3980        r"""
3981        Converts input tensor dtype to `float32`.
3982
3983        Returns:
3984            Tensor, converted to the `float32` dtype.
3985
3986        Supported Platforms:
3987            ``Ascend`` ``GPU`` ``CPU``
3988
3989        Examples:
3990            >>> import numpy as np
3991            >>> import mindspore
3992            >>> from mindspore import Tensor
3993            >>> input_x = Tensor(np.ones([2,2]), mindspore.int32)
3994            >>> output = input_x.float()
3995            >>> print(output.dtype)
3996            Float32
3997        """
3998        return tensor_operator_registry.get('float')(self, mstype.float32)
3999
4000    def half(self):
4001        r"""
4002        Converts input tensor dtype to `float16`.
4003
4004        Returns:
4005            Tensor, converted to the `float16` dtype.
4006
4007        Supported Platforms:
4008            ``Ascend`` ``GPU`` ``CPU``
4009
4010        Examples:
4011            >>> import numpy as np
4012            >>> import mindspore
4013            >>> from mindspore import Tensor
4014            >>> input_x = Tensor(np.ones([2,2]), mindspore.int32)
4015            >>> output = input_x.half()
4016            >>> print(output.dtype)
4017            Float16
4018        """
4019        return tensor_operator_registry.get('half')(self, mstype.float16)
4020
4021    def int(self):
4022        r"""
4023        Converts input tensor dtype to `int32`. If the value in tensor is float or half, the decimal will be discarded.
4024
4025        Returns:
4026            Tensor, converted to the `int32` dtype.
4027
4028        Supported Platforms:
4029            ``Ascend`` ``GPU`` ``CPU``
4030
4031        Examples:
4032            >>> import numpy as np
4033            >>> import mindspore
4034            >>> from mindspore import Tensor
4035            >>> input_x = Tensor(np.ones([2,2]), mindspore.float32)
4036            >>> output = input_x.int()
4037            >>> print(output.dtype)
4038            Int32
4039        """
4040        return tensor_operator_registry.get('int')(self, mstype.int32)
4041
4042    def long(self):
4043        r"""
4044        Converts input tensor dtype to `int64`. If the value in tensor is float or half, the decimal will be discarded.
4045
4046        Returns:
4047            Tensor, converted to the `int64` dtype.
4048
4049        Supported Platforms:
4050            ``Ascend`` ``GPU`` ``CPU``
4051
4052        Examples:
4053            >>> import numpy as np
4054            >>> import mindspore
4055            >>> from mindspore import Tensor
4056            >>> input_x = Tensor(np.ones([2,2]), mindspore.int32)
4057            >>> output = input_x.long()
4058            >>> print(output.dtype)
4059            Int64
4060        """
4061        return tensor_operator_registry.get('long')(self, mstype.int64)
4062
4063    def short(self):
4064        r"""
4065        Return a copy of the tensor, cast to int16 type, equivalent to self.astype(mstype.int16).
4066        If the value in tensor is float or half, the decimal will be discarded.
4067        For details, please refer to :func:`mindspore.Tensor.astype`.
4068
4069        Returns:
4070            Tensor, converted to the `int16` dtype.
4071
4072        Supported Platforms:
4073            ``Ascend`` ``GPU`` ``CPU``
4074
4075        Examples:
4076            >>> import mindspore as ms
4077            >>> import numpy as np
4078            >>> x = ms.Tensor(np.array([1,2,3,4,5]), ms.int32)
4079            >>> output = x.short()
4080            >>> output
4081            Tensor(shape=[5], dtype=Int16, value= [1, 2, 3, 4, 5])
4082        """
4083        return tensor_operator_registry.get('cast')(self, mstype.int16)
4084
4085    def cholesky(self, upper=False):
4086        r"""
4087        For details, please refer to :func:`mindspore.ops.cholesky`.
4088        """
4089        return tensor_operator_registry.get('cholesky')(self, upper=upper)
4090
4091    def cholesky_inverse(self, upper=False):
4092        r"""
4093        For details, please refer to :func:`mindspore.ops.cholesky_inverse`.
4094        """
4095        return tensor_operator_registry.get('cholesky_inverse')(self, upper=upper)
4096
4097    def cholesky_solve(self, input2, upper=False):
4098        r"""
4099        For details, please refer to :func:`mindspore.ops.cholesky_solve`.
4100
4101        .. warning::
4102            This is an experimental API that is subject to change or deletion.
4103        """
4104        return tensor_operator_registry.get('cholesky_solve')(self, input2, upper)
4105
4106    def conj(self):
4107        r"""
4108        For details, please refer to :func:`mindspore.ops.conj`.
4109        """
4110        return tensor_operator_registry.get('conj')(self)
4111
4112    def count_nonzero(self, axis=(), keep_dims=False, dtype=mstype.int32):
4113        r"""
4114        For details, please refer to :func:`mindspore.ops.count_nonzero`.
4115        """
4116        return tensor_operator_registry.get('count_nonzero')(self, axis, keep_dims, dtype)
4117
4118    def cross(self, other, dim=None):
4119        r"""
4120        For details, please refer to :func:`mindspore.ops.cross`.
4121        """
4122        return tensor_operator_registry.get('cross')(self, other, dim)
4123
4124    def erfinv(self):
4125        r"""
4126        For details, please refer to :func:`mindspore.ops.erfinv`.
4127        """
4128        return tensor_operator_registry.get('erfinv')(self)
4129
4130    def less_equal(self, other):
4131        r"""
4132        For details, please refer to :func:`mindspore.ops.less_equal`.
4133        """
4134        return tensor_operator_registry.get('less_equal')(self, other)
4135
4136    def lcm(self, other):
4137        r"""
4138        For details, please refer to :func:`mindspore.ops.lcm`.
4139        """
4140        return tensor_operator_registry.get('lcm')(self, other)
4141
4142    def ldexp(self, other):
4143        r"""
4144        For details, please refer to :func:`mindspore.ops.ldexp`.
4145        """
4146        return tensor_operator_registry.get('ldexp')(self, other)
4147
4148    def fold(self, output_size, kernel_size, dilation=1, padding=0, stride=1):
4149        r"""
4150        For details, please refer to :func:`mindspore.ops.fold`.
4151        """
4152        return tensor_operator_registry.get('fold')(self, output_size, kernel_size, dilation, padding, stride)
4153
4154    def unfold(self, kernel_size, dilation=1, padding=0, stride=1):
4155        r"""
4156        For details, please refer to :func:`mindspore.ops.unfold`.
4157
4158        .. warning::
4159            This is an experimental API that is subject to change or deletion.
4160
4161        """
4162        return tensor_operator_registry.get('unfold')(self, kernel_size, dilation, padding, stride)
4163
4164    def expand(self, size):
4165        r"""
4166        For details, please refer to :func:`mindspore.ops.broadcast_to`.
4167        """
4168        if isinstance(size, Tensor):
4169            size = tensor_operator_registry.get('tensortotuple')()(size)
4170        return tensor_operator_registry.get('expand')(self, size)
4171
4172    def cumprod(self, dim, dtype=None):
4173        r"""
4174        For details, please refer to :func:`mindspore.ops.cumprod`.
4175        """
4176        return tensor_operator_registry.get('cumprod')(self, dim, dtype)
4177
4178    def multiply(self, value):
4179        r"""
4180        For details, please refer to :func:`mindspore.ops.multiply`.
4181        """
4182        return tensor_operator_registry.get('multiply')(self, value)
4183
4184    def div(self, value, *, rounding_mode=None):
4185        r"""
4186        For details, please refer to :func:`mindspore.ops.div`.
4187        """
4188        return tensor_operator_registry.get('div')(self, value, rounding_mode=rounding_mode)
4189
4190    def divide(self, value, *, rounding_mode=None):
4191        r"""
4192        Alias for :func:`mindspore.Tensor.div`.
4193        """
4194        return tensor_operator_registry.get('div')(self, value, rounding_mode=rounding_mode)
4195
4196    def eq(self, other):
4197        r"""
4198        For details, please refer to :func:`mindspore.ops.eq`.
4199        """
4200        return tensor_operator_registry.get('equal')(self, other)
4201
4202    def equal(self, other):
4203        r"""
4204        For details, please refer to :func:`mindspore.ops.equal`.
4205        """
4206        return tensor_operator_registry.get('equal')(self, other)
4207
4208    def expm1(self):
4209        r"""
4210        For details, please refer to :func:`mindspore.ops.expm1`.
4211        """
4212        return tensor_operator_registry.get('expm1')(self)
4213
4214    def index_add(self, dim, index, source, *, alpha=1):
4215        r"""
4216        For details, please refer to :func:`mindspore.ops.index_add`.
4217        """
4218        check_is_number(alpha, (int, float))
4219        source = tensor_operator_registry.get('__mul__')(source, alpha)
4220        return tensor_operator_registry.get('index_add')(self, indices=index, y=source, axis=dim)
4221
4222    def greater(self, other):
4223        r"""
4224        For details, please refer to :func:`mindspore.ops.greater`.
4225        """
4226        return tensor_operator_registry.get('greater')(self, other)
4227
4228    def greater_equal(self, other):
4229        r"""
4230        For details, please refer to :func:`mindspore.ops.greater_equal`.
4231        """
4232        return tensor_operator_registry.get('greater_equal')(self, other)
4233
4234    def igamma(self, other):
4235        r"""
4236        For details, please refer to :func:`mindspore.ops.igamma`.
4237        """
4238        return tensor_operator_registry.get('igamma')(self, other)
4239
4240    def igammac(self, other):
4241        r"""
4242        For details, please refer to :func:`mindspore.ops.igammac`.
4243        """
4244        return tensor_operator_registry.get('igammac')(self, other)
4245
4246    def isinf(self):
4247        r"""
4248        For details, please refer to :func:`mindspore.ops.isinf`.
4249        """
4250        return tensor_operator_registry.get('isinf')(self)
4251
4252    def isnan(self):
4253        r"""
4254        For details, please refer to :func:`mindspore.ops.isnan`.
4255        """
4256        return tensor_operator_registry.get('isnan')(self)
4257
4258    def flip(self, dims):
4259        """
4260        For details, please refer to :func:`mindspore.ops.flip`.
4261        """
4262        return tensor_operator_registry.get('flip')(self, dims)
4263
4264    def fliplr(self):
4265        """
4266        For details, please refer to :func:`mindspore.ops.fliplr`.
4267        """
4268        return tensor_operator_registry.get('fliplr')(self)
4269
4270    def flipud(self):
4271        """
4272        For details, please refer to :func:`mindspore.ops.flipud`.
4273        """
4274        return tensor_operator_registry.get('flipud')(self)
4275
4276    def is_floating_point(self):
4277        """
4278        For details, please refer to :func:`mindspore.ops.is_floating_point`.
4279        """
4280        return tensor_operator_registry.get('is_floating_point')(self)
4281
4282    def is_signed(self):
4283        """
4284        Judge whether the data type of tensor is a signed data type.
4285
4286        Returns:
4287            Bool. If the dtype of `self` is a signed data type, return True. Otherwise, return False.
4288
4289        Supported Platforms:
4290            ``Ascend`` ``GPU`` ``CPU``
4291
4292        Examples:
4293            >>> import mindspore as ms
4294            >>> x = ms.Tensor([1, 2, 3], ms.int64)
4295            >>> y = ms.Tensor([1, 2, 3], ms.uint64)
4296            >>> output = x.is_signed()
4297            >>> output2 = y.is_signed()
4298            >>> print(output)
4299            True
4300            >>> print(output2)
4301            False
4302        """
4303        return self.dtype in mstype.signed_type
4304
4305    def le(self, other):
4306        r"""
4307        For details, please refer to :func:`mindspore.ops.le`.
4308        """
4309        return tensor_operator_registry.get('le')(self, other)
4310
4311    def less(self, other):
4312        r"""
4313        For details, please refer to :func:`mindspore.ops.less`.
4314        """
4315        return tensor_operator_registry.get('less')(self, other)
4316
4317    def lt(self, other):
4318        """
4319        Alias for :func:`mindspore.Tensor.less`.
4320        """
4321        return self.less(other)
4322
4323    def logical_and(self, other):
4324        r"""
4325        For details, please refer to :func:`mindspore.ops.logical_and`.
4326        """
4327        return tensor_operator_registry.get('logical_and')(self, other)
4328
4329    def logical_not(self):
4330        r"""
4331        For details, please refer to :func:`mindspore.ops.logical_not`.
4332        """
4333        return tensor_operator_registry.get('logical_not')(self)
4334
4335    def logical_or(self, other):
4336        r"""
4337        For details, please refer to :func:`mindspore.ops.logical_or`.
4338        """
4339        return tensor_operator_registry.get('logical_or')(self, other)
4340
4341    def logical_xor(self, other):
4342        r"""
4343        For details, please refer to :func:`mindspore.ops.logical_xor`.
4344        """
4345        return tensor_operator_registry.get('logical_xor')(self, other)
4346
4347    def lstsq(self, A):
4348        r"""
4349        For details, please refer to :func:`mindspore.ops.lstsq`.
4350        """
4351        return tensor_operator_registry.get('lstsq')(self, A)
4352
4353    @property
4354    def mH(self):
4355        r"""
4356        Accessing this property is equivalent to Calling self.adjoint().
4357        For details, please refer to :func:`mindspore.ops.adjoint`.
4358        """
4359        return self.adjoint()
4360
4361    @property
4362    def mT(self):
4363        r"""
4364        Returns the Tensor that exchanges the last two dimensions.
4365        Accessing the attribute, x.mT, is equal to calling the method, x.swapaxes(-2, -1).
4366        For details, please refer to :func:`mindspore.Tensor.swapaxes`.
4367        """
4368        return self.swapaxes(-2, -1)
4369
4370    def mvlgamma(self, p):
4371        r"""
4372        For details, please refer to :func:`mindspore.ops.mvlgamma`.
4373        """
4374        return tensor_operator_registry.get('mvlgamma')(self, p)
4375
4376    def matmul(self, tensor2):
4377        r"""
4378        For details, please refer to :func:`mindspore.ops.matmul`.
4379        """
4380        return tensor_operator_registry.get('matmul')(self, tensor2)
4381
4382    def inner(self, other):
4383        r"""
4384        For details, please refer to :func:`mindspore.ops.inner`.
4385        """
4386        return tensor_operator_registry.get('inner')(self, other)
4387
4388    def multinomial(self, num_samples, replacement=True, seed=None):
4389        r"""
4390        For details, please refer to :func:`mindspore.ops.multinomial`.
4391        """
4392        return tensor_operator_registry.get('multinomial')(self, num_samples, replacement, seed)
4393
4394    def matrix_power(self, n):
4395        r"""
4396        For details, please refer to :func:`mindspore.ops.matrix_power`.
4397
4398        .. warning::
4399            This is an experimental API that is subject to change or deletion.
4400
4401        """
4402        return tensor_operator_registry.get('matrix_power')(self, n)
4403
4404    def maximum(self, other):
4405        r"""
4406        For details, please refer to :func:`mindspore.ops.maximum`.
4407        """
4408        return tensor_operator_registry.get('maximum')(self, other)
4409
4410    def mm(self, mat2):
4411        r"""
4412        For details, please refer to :func:`mindspore.ops.mm`.
4413        """
4414        return tensor_operator_registry.get('mm')(self, mat2)
4415
4416    def msort(self):
4417        r"""
4418        For details, please refer to :func:`mindspore.ops.msort`.
4419        """
4420        return tensor_operator_registry.get('msort')(self)
4421
4422    def mul(self, value):
4423        r"""
4424        For details, please refer to :func:`mindspore.ops.mul`.
4425        """
4426        return tensor_operator_registry.get('mul')(self, value)
4427
4428    def nan_to_num(self, nan=0.0, posinf=None, neginf=None):
4429        """
4430        For details, please refer to :func:`mindspore.ops.nan_to_num`.
4431        """
4432        return tensor_operator_registry.get('nan_to_num')(self, nan, posinf, neginf)
4433
4434    def neg(self):
4435        r"""
4436        For details, please refer to :func:`mindspore.ops.neg`.
4437        """
4438        return tensor_operator_registry.get('neg')(self)
4439
4440    def ne(self, other):
4441        r"""
4442        For details, please refer to :func:`mindspore.ops.ne`.
4443        """
4444        return tensor_operator_registry.get('ne')(self, other)
4445
4446    def not_equal(self, other):
4447        r"""
4448        For details, please refer to :func:`mindspore.ops.not_equal`.
4449        """
4450        return tensor_operator_registry.get('not_equal')(self, other)
4451
4452    def new_zeros(self, size, dtype=None):
4453        r"""
4454        Return a tensor of `size` filled with zeros.
4455
4456        .. warning::
4457            For argument `size`, Tensor type input will be deprecated in the future version.
4458
4459        Args:
4460            size (Union[int, tuple, list, Tensor]): An int, list or tuple of integers defining the output shape.
4461            dtype (mindspore.dtype, optional): The desired dtype of the output tensor. If None, the returned tensor has
4462                thesame dtype as `self`. Default: ``None``.
4463
4464        Returns:
4465            Tensor, the shape and dtype is defined above and filled with zeros.
4466
4467        Raises:
4468            TypeError: If `size` is neither an int nor an tuple/list/Tensor of int.
4469
4470        Supported Platforms:
4471            ``Ascend`` ``GPU`` ``CPU``
4472
4473        Examples:
4474            >>> import numpy as np
4475            >>> import mindspore
4476            >>> from mindspore import Tensor
4477            >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
4478            >>> output = x.new_zeros((2, 2))
4479            >>> print(output)
4480            [[0. 0.]
4481             [0. 0.]]
4482        """
4483        return tensor_operator_registry.get('zeros')(size, dtype)
4484
4485    def new_ones(self, size, dtype=None):
4486        r"""
4487        Return a tensor of `size` filled with ones.
4488
4489        .. warning::
4490            For argument `size`, Tensor type input will be deprecated in the future version.
4491
4492        Args:
4493            size (Union[int, tuple, list, Tensor]): An int, list or tuple of integers defining the output shape.
4494            dtype (mindspore.dtype, optional): The desired dtype of the output tensor. If None, the returned
4495                tensor has the same dtype as `self`. Default: ``None``.
4496
4497        Returns:
4498            Tensor, the shape and dtype is defined above and filled with ones.
4499
4500        Raises:
4501            TypeError: If `size` is neither an int nor an tuple/list/Tensor of int.
4502
4503        Supported Platforms:
4504            ``Ascend`` ``GPU`` ``CPU``
4505
4506        Examples:
4507            >>> import numpy as np
4508            >>> import mindspore
4509            >>> from mindspore import Tensor
4510            >>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
4511            >>> output = x.new_ones((2, 2))
4512            >>> print(output)
4513            [[1. 1.]
4514             [1. 1.]]
4515        """
4516        return tensor_operator_registry.get('ones')(size, dtype)
4517
4518    def sign(self):
4519        r"""
4520        For details, please refer to :func:`mindspore.ops.sign`.
4521        """
4522        return tensor_operator_registry.get('sign')(self)
4523
4524    def signbit(self):
4525        """
4526        For details, please refer to :func:`mindspore.ops.signbit`.
4527        """
4528        return tensor_operator_registry.get('signbit')(self)
4529
4530    def sgn(self):
4531        """
4532        For details, please refer to :func:`mindspore.ops.sgn`.
4533        """
4534        return tensor_operator_registry.get('sgn')(self)
4535
4536    def sin(self):
4537        r"""
4538        For details, please refer to :func:`mindspore.ops.sin`.
4539        """
4540        return tensor_operator_registry.get('sin')(self)
4541
4542    def sinc(self):
4543        r"""
4544        For details, please refer to :func:`mindspore.ops.sinc`.
4545        """
4546        return tensor_operator_registry.get('sinc')(self)
4547
4548    def sinh(self):
4549        r"""
4550        For details, please refer to :func:`mindspore.ops.sinh`.
4551        """
4552        return tensor_operator_registry.get('sinh')(self)
4553
4554    def sort(self, axis=-1, descending=False):
4555        r"""
4556        For details, please refer to :func:`mindspore.ops.sort`.
4557        """
4558        return tensor_operator_registry.get('sort')(self, axis=axis, descending=descending)
4559
4560    def argsort(self, axis=-1, descending=False):
4561        """
4562        For details, please refer to :func:`mindspore.ops.argsort`.
4563        """
4564        return tensor_operator_registry.get('argsort')(self, axis, descending)
4565
4566    def trunc(self):
4567        r"""
4568        For details, please refer to :func:`mindspore.ops.trunc`.
4569        """
4570        return tensor_operator_registry.get('trunc')(self)
4571
4572    def where(self, condition, y):
4573        r"""
4574        For details, please refer to :func:`mindspore.ops.where`.
4575        """
4576        return tensor_operator_registry.get('where')(condition, self, y)
4577
4578    def imag(self):
4579        r"""
4580        For details, please refer to :func:`mindspore.ops.imag`.
4581        """
4582        return tensor_operator_registry.get('imag')(self)
4583
4584    def quantile(self, q, axis=None, keepdims=False):
4585        r"""
4586        For details, please refer to :func:`mindspore.ops.quantile`.
4587        """
4588        return tensor_operator_registry.get('quantile')(self, q, axis, keepdims)
4589
4590    def nanquantile(self, q, axis=None, keepdims=False):
4591        """
4592        For details, please refer to :func:`mindspore.ops.nanquantile`.
4593        """
4594        return tensor_operator_registry.get('nanquantile')(self, q, axis, keepdims)
4595
4596    def orgqr(self, input2):
4597        r"""
4598        For details, please refer to :func:`mindspore.ops.orgqr`.
4599        """
4600        return tensor_operator_registry.get('orgqr')(self, input2)
4601
4602    def lu_solve(self, LU_data, LU_pivots):
4603        r"""
4604        For details, please refer to :func:`mindspore.ops.lu_solve`.
4605
4606        .. warning::
4607            This is an experimental API that is subject to change or deletion.
4608        """
4609        return tensor_operator_registry.get('lu_solve')(self, LU_data, LU_pivots)
4610
4611
4612    def nextafter(self, other):
4613        r"""
4614        For details, please refer to :func:`mindspore.ops.nextafter`.
4615        """
4616        return tensor_operator_registry.get('nextafter')(self, other)
4617
4618    def qr(self, some=True):
4619        r"""
4620        For details, please refer to :func:`mindspore.ops.qr`.
4621        """
4622        validator.check_value_type('some', some, bool, 'Tensor.qr')
4623        return tensor_operator_registry.get('qr')(self, 'reduced' if some else 'complete')
4624
4625
4626    def ormqr(self, input2, input3, left=True, transpose=False):
4627        r"""
4628        For details, please refer to :func:`mindspore.ops.ormqr`,
4629        Args `input2` and `input3` correspond to the args `tau` and `other` of :func:`mindspore.ops.ormqr`.
4630        """
4631        return tensor_operator_registry.get('ormqr')(self, input2, input3, left, transpose)
4632
4633
4634    def masked_scatter(self, mask, x):
4635        r"""
4636        Returns a Tensor. Updates the value in the "self Tensor" with the `tensor` value according to the mask.
4637        The shape of `mask` and the "self Tensor" must be the same or `mask` is broadcastable.
4638
4639        .. warning::
4640            This is an experimental API that is subject to change or deletion.
4641
4642        Args:
4643            mask (Tensor[bool]): A bool tensor with a shape broadcastable to the "self Tensor".
4644            x (Tensor): A tensor with the same data type as the "self Tensor". The number
4645                of elements must be greater than or equal to the number of True's in `mask`.
4646
4647        Returns:
4648            Tensor, with the same type and shape as the "self Tensor".
4649
4650        Raises:
4651            TypeError: If `mask` or `x` is not a Tensor.
4652            TypeError: If data type of the "self Tensor" is not be supported.
4653            TypeError: If dtype of `mask` is not bool.
4654            TypeError: If the dim of the "self Tensor" less than the dim of `mask`.
4655            ValueError: If `mask` can not be broadcastable to the "self Tensor".
4656            ValueError: If the number of elements in `x` is less than the number required for the updates.
4657
4658        Supported Platforms:
4659            ``Ascend`` ``CPU``
4660
4661        Examples:
4662            >>> import numpy as np
4663            >>> import mindspore
4664            >>> from mindspore import Tensor
4665            >>> x = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
4666            >>> mask = Tensor(np.array([True, True, False, True]), mindspore.bool_)
4667            >>> tensor = Tensor(np.array([5., 6., 7.]), mindspore.float32)
4668            >>> output = x.masked_scatter(mask, tensor)
4669            >>> print(output)
4670            [5. 6. 3. 7.]
4671        """
4672        return tensor_operator_registry.get('masked_scatter')()(self, mask, x)
4673
4674
4675    def index_put(self, indices, values, accumulate=False):
4676        r"""
4677        Returns a Tensor. According to the index number of `indices` ,
4678        replace the value corresponding to the "self Tensor" with the value in `values`.
4679
4680        Args:
4681            indices (tuple[Tensor], list[Tensor]): the indices of type int32 or int64, used to index into the "self
4682                Tensor". The rank of tensors in indices should be 1-D, size of indices should <= "self Tensor".rank
4683                and the tensors in indices should be broadcastable.
4684            values (Tensor): 1-D Tensor of the same type as "self Tensor". if size == 1 will be broadcast
4685            accumulate (bool): If `accumulate` is True, the elements in values are added to "self Tensor",
4686                else the elements in `values` replace the corresponding element in the "self Tensor".
4687                Default: ``False``.
4688
4689        Returns:
4690            Tensor, with the same type and shape as the "self Tensor".
4691
4692        Raises:
4693            TypeError: If the dtype of the "self Tensor" is not equal to the dtype of `values`.
4694            TypeError: If the dtype of `indices` is not tuple[Tensor], list[Tensor].
4695            TypeError: If the dtype of tensors in `indices` are not int32 or int64.
4696            TypeError: If the dtype of tensors in `indices` are inconsistent.
4697            TypeError: If the dtype of `accumulate` is not bool.
4698            ValueError: If rank(`values`) is not 1-D.
4699            ValueError: If size(`values`) is not 1 or max size of the tensors in `indices` when
4700                rank("self Tensor") == size(`indices`).
4701            ValueError: If size(`values`) is not 1 or "self Tensor".shape[-1] when
4702                rank("self Tensor") > size(`indices`).
4703            ValueError: If the rank of tensors in `indices` is not 1-D.
4704            ValueError: If the tensors in `indices` is not be broadcastable.
4705            ValueError: If size(`indices`) > rank("self Tensor").
4706
4707        Supported Platforms:
4708            ``Ascend`` ``CPU``
4709
4710        Examples:
4711            >>> import numpy as np
4712            >>> import mindspore
4713            >>> from mindspore import Tensor
4714            >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6]]).astype(np.int32))
4715            >>> values = Tensor(np.array([3]).astype(np.int32))
4716            >>> indices = [Tensor(np.array([0, 1, 1]).astype(np.int32)), Tensor(np.array([1, 2, 1]).astype(np.int32))]
4717            >>> accumulate = True
4718            >>> output = x.index_put(indices, values, accumulate)
4719            >>> print(output)
4720            [[1 5 3]
4721            [4 8 9]]
4722        """
4723        validator.check_value_type('accumulate', accumulate, bool, 'Tensor.index_put')
4724        _index_put = tensor_operator_registry.get('index_put')(0 if accumulate is False else 1)
4725        return _index_put(self, values, indices)
4726
4727
4728    def move_to(self, to, blocking=True):
4729        r"""
4730        Copy Tensor to target device synchronously or asynchronously, default synchronously. only support PyNative mode.
4731
4732        Args:
4733            to (str): a string type value, one of ``"Ascend"``, ``"GPU"``, ``"CPU"``.
4734            blocking (bool): a bool type value, using synchronous copy or asynchronous copy.
4735                Default: ``True`` , synchronous copy.
4736
4737        Returns:
4738            New Tensor, storged on target device which with the same type and shape as the "self Tensor".
4739
4740        Raises:
4741            ValueError: If the type of `blocking` is not bool type.
4742            ValueError: If the value of `to` is not one of ``"Ascend"``, ``"GPU"``, ``"CPU"``.
4743            ValueError: If the run mode is not PyNative mode.
4744
4745        Supported Platforms:
4746            ``Ascend`` ``GPU`` ``CPU``
4747
4748        Examples:
4749            >>> import mindspore as ms
4750            >>> from mindspore import Tensor
4751            >>> x = ms.Tensor([1, 2, 3], ms.int64)
4752            >>> new_tensor = x.move_to("CPU")
4753        """
4754        if not isinstance(blocking, bool):
4755            raise ValueError(f"The type of 'blocking' must be bool, but got {blocking}")
4756        if to not in ("Ascend", "GPU", "CPU"):
4757            raise ValueError(f"The value of 'to' must be one of ['Ascend', 'GPU', 'CPU'], but got {to}")
4758        mode = context.get_context("mode")
4759        if mode != context.PYNATIVE_MODE:
4760            raise ValueError(f"The method of 'move_to' only supported in pynative mode, but got: {mode}.")
4761        return Tensor(Tensor_.move_to(self, to, blocking))
4762
4763
4764    def _offload(self):
4765        r"""
4766        Offload tensor parameter to host. Currently, only support for pynative mode.
4767
4768        Supported Platforms:
4769            ``Ascend``
4770
4771        Examples:
4772            >>> import mindspore as ms
4773            >>> from mindspore import Tensor
4774            >>> x = ms.Tensor([1, 2, 3], ms.int64)
4775            >>> x._offload()
4776        """
4777        return Tensor_._offload(self)
4778
4779
4780def _vm_compare(*args):
4781    """Implement `vm_compare` for tensor."""
4782    if args:
4783        obj_str = args[-1]
4784    else:
4785        raise ValueError("_vm_compare does not receive any input.")
4786    if obj_str == "shape":
4787        fn = getattr(args[0].asnumpy(), obj_str)
4788        return fn
4789    if obj_str == "__setitem__":
4790        fn = getattr(args[0].asnumpy(), obj_str)
4791        index = args[1].asnumpy() if isinstance(args[1], Tensor) else args[1]
4792        value = args[2].asnumpy() if isinstance(args[2], Tensor) else args[2]
4793        fn(index, value)
4794        return args[0]
4795    if len(args) == 2:
4796        fn = getattr(args[0].asnumpy(), obj_str)
4797        return Tensor(fn())
4798    if isinstance(args[0], Tensor):
4799        fn = getattr(args[0].asnumpy(), obj_str)
4800        y = args[1].asnumpy() if isinstance(args[1], Tensor) else args[1]
4801    else:
4802        obj_str = "__r" + obj_str[2:]
4803        fn = getattr(args[1].asnumpy(), obj_str)
4804        y = args[0]
4805    return Tensor(np.array(fn(y)))
4806
4807
4808def _check_tensor_input(input_data=None, dtype=None, shape=None, init=None):
4809    """Check the tensor input."""
4810    if input_data is not None and shape is not None:
4811        raise ValueError(f"When initializing a tensor with 'input_data', 'shape' should be set to None."
4812                         f"But got shape: {shape}.")
4813
4814    if init is not None and (shape is None or dtype is None):
4815        raise ValueError("init, dtype and shape must have values at the same time.")
4816
4817    if input_data is not None:
4818        if isinstance(input_data, np.ndarray) and input_data.ndim >= 1 and input_data.size == 0:
4819            raise ValueError("input_data can not contain zero dimension.")
4820        if isinstance(input_data, (tuple, list)) and np.array(input_data).ndim >= 1 \
4821                and np.array(input_data).size == 0:
4822            raise ValueError("input_data can not contain zero dimension.")
4823
4824    if shape is not None and not (hasattr(init, "__enable_zero_dim__") and init.__enable_zero_dim__) and 0 in shape:
4825        raise ValueError("Shape can not contain zero value.")
4826
4827
4828def _check_tensor_dynamic_shape(dtype=None, shape=None, init=None):
4829    """Check if the tensor has dynamic shape."""
4830    shape_list = list(shape)
4831    if len(shape_list) >= 1:
4832        shape_replaced_list = [-1 if i is None else i for i in shape_list]
4833        if isinstance(shape, tuple):
4834            shape = tuple(shape_replaced_list)
4835        if isinstance(shape, list):
4836            shape = shape_replaced_list
4837    if is_shape_unknown(shape) and (dtype is None or init is not None):
4838        raise ValueError("If setting dynamic shape, dtype must not be None, init must be None")
4839    return shape
4840
4841
4842def _check_astype_and_convert(dtype):
4843    """Check whether dtype is a valid input, and convert to mstype"""
4844    all_types = mstype.__dtype__ + ["int", "float", "bool"]
4845    if isinstance(dtype, str):
4846        if dtype.lower() not in all_types:
4847            raise TypeError(f"For Tensor.astype, the string input type must be one of {all_types}, "
4848                            f"but got '{dtype}'.")
4849        dtype = mstype.pytype_to_dtype(np.dtype(dtype.lower()))
4850    elif isinstance(dtype, type):
4851        dtype = mstype.pytype_to_dtype(dtype)
4852    elif dtype not in mstype.number_type + (mstype.bool_,):
4853        raise TypeError(
4854            f"For Tensor.astype, the input type must be one of {list(mstype.number_type + (mstype.bool_,) + np_types)},"
4855            f" but got '{dtype}'.")
4856    return dtype
4857
4858
4859setattr(tensor_operator_registry, 'vm_compare', _vm_compare)
4860