• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2020-2021 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15"""Tensor implementation."""
16import numbers
17import numpy as np
18
19from mindspore import log as logger
20from mindspore.communication.management import get_rank, get_group_size
21from . import dtype as mstype
22from ._register_for_tensor import tensor_operator_registry
23from .._c_expression import Tensor as Tensor_
24from .._c_expression import PynativeExecutor_
25from .._checkparam import Validator as validator
26
27__all__ = ['Tensor', 'RowTensor', 'SparseTensor']
28np_types = (np.int8, np.int16, np.int32, np.int64,
29            np.uint8, np.uint16, np.uint32, np.uint64, np.float16,
30            np.float32, np.float64, np.bool_, np.complex64, np.complex128)
31
32
33class Tensor(Tensor_):
34    """
35    Tensor is used for data storage.
36
37    Tensor inherits tensor object in C++.
38    Some functions are implemented in C++ and some functions are implemented in Python.
39
40    Args:
41        input_data (Union[Tensor, float, int, bool, tuple, list, numpy.ndarray]): Input data of the tensor.
42        dtype (:class:`mindspore.dtype`): Input data should be None, bool or numeric type defined in `mindspore.dtype`.
43            The argument is used to define the data type of the output tensor. If it is None, the data type of the
44            output tensor will be the same as the `input_data`. Default: None.
45        shape (Union[tuple, list, int]): A list of integers, a tuple of integers or an integer as the shape of
46            output. If `input_data` is available, `shape` doesn't need to be set. Default: None.
47        init (Initializer): The information of init data.
48            'init' is used for delayed initialization in parallel mode. Usually, it is not recommended to use
49            'init' interface to initialize parameters in other conditions. If 'init' interface is used to initialize
50            parameters, the `Tensor.init_data` API needs to be called to convert `Tensor` to the actual data.
51
52    Outputs:
53        Tensor. If `dtype` and `shape` are not set, return a tensor with the same dtype and shape as `input_data`.
54        If `dtype` or `shape` is set, the dtype or shape of the output Tensor is consistent with the setting.
55
56    Examples:
57        >>> import numpy as np
58        >>> import mindspore as ms
59        >>> from mindspore import Tensor
60        >>> from mindspore.common.initializer import One
61        >>> # initialize a tensor with input data
62        >>> t1 = Tensor(np.zeros([1, 2, 3]), ms.float32)
63        >>> assert isinstance(t1, Tensor)
64        >>> assert t1.shape == (1, 2, 3)
65        >>> assert t1.dtype == ms.float32
66        >>>
67        >>> # initialize a tensor with a float scalar
68        >>> t2 = Tensor(0.1)
69        >>> assert isinstance(t2, Tensor)
70        >>> assert t2.dtype == ms.float64
71        ...
72        >>> # initialize a tensor with init
73        >>> t3 = Tensor(shape = (1, 3), dtype=ms.float32, init=One())
74        >>> assert isinstance(t3, Tensor)
75        >>> assert t3.shape == (1, 3)
76        >>> assert t3.dtype == ms.float32
77    """
78
79    def __init__(self, input_data=None, dtype=None, shape=None, init=None):
80        self.init_finished = False
81        # If input data is numpy number, convert it to np array
82        if isinstance(input_data, np_types):
83            input_data = np.array(input_data)
84
85        if isinstance(shape, numbers.Number):
86            shape = (shape,)
87
88        _check_tensor_input(input_data, dtype, shape, init)
89
90        # If input_data is tuple/list/numpy.ndarray, it's support in check_type method.
91        if init is None:
92            validator.check_value_type('input_data', input_data,
93                                       (Tensor_, np.ndarray, list, tuple, float, int, bool, complex), 'Tensor')
94            valid_dtypes = (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64,
95                            np.float16, np.float32, np.float64, np.bool_, np.str_, np.complex64, np.complex128)
96            if isinstance(input_data, np.ndarray) and input_data.dtype not in valid_dtypes and \
97                input_data.dtype.kind != 'U':  # Support dtype np.str_
98                raise TypeError(f"For Tensor, the input_data is a numpy array, "
99                                f"but it's data type: {input_data.dtype} is not in supported list:\
100                                {list(i.__name__ for i in valid_dtypes)}.")
101            if isinstance(input_data, (tuple, list)):
102                if np.array(input_data).dtype not in valid_dtypes:
103                    raise TypeError(f"For Tensor, the input_data is {input_data} that contain unsupported element.")
104            if dtype is not None:
105                validator.check_type_name('dtype', dtype, mstype.number_type + (mstype.bool_, mstype.string), "Tensor")
106
107            if isinstance(input_data, np.ndarray) and (not input_data.flags['FORC']):
108                input_data = np.ascontiguousarray(input_data)
109            if dtype is None:
110                Tensor_.__init__(self, input_data)
111            else:
112                Tensor_.__init__(self, input_data, dtype)
113        else:
114            Tensor_.__init__(self, dtype, shape)
115        self._virtual_flag = False
116        self.init = init
117        self.init_finished = True
118
119    def __deepcopy__(self, memodict):
120        new_obj = Tensor(self)
121        new_obj.init = self.init
122        new_obj._virtual_flag = self._virtual_flag  # pylint:disable=w0212
123        return new_obj
124
125    def __repr__(self):
126        if self.init_finished:
127            Tensor_.data_sync(self, False)
128            return Tensor_.__repr__(self)
129        return ''
130
131    def __eq__(self, other):
132        if not isinstance(other, (int, float, Tensor)):
133            return False
134        # bool type is not supported for `Equal` operator in backend.
135        if self.dtype == mstype.bool_ or (isinstance(other, Tensor) and other.dtype == mstype.bool_):
136            if isinstance(other, Tensor):
137                return Tensor(np.array(self.asnumpy() == other.asnumpy()))
138            return Tensor(np.array(self.asnumpy() == other))
139        return tensor_operator_registry.get('__eq__')(self, other)
140
141    def __ne__(self, other):
142        if not isinstance(other, (int, float, Tensor)):
143            return True
144        #  bool type is not supported for `NotEqual` operator in backend.
145        if self.dtype == mstype.bool_ or (isinstance(other, Tensor) and other.dtype == mstype.bool_):
146            return Tensor(np.array(self.asnumpy() != other.asnumpy()))
147        return tensor_operator_registry.get('__ne__')(self, other)
148
149    def __hash__(self):
150        return hash(id(self))
151
152    def __neg__(self):
153        out = tensor_operator_registry.get('__neg__')(self)
154        return out
155
156    def __invert__(self):
157        out = tensor_operator_registry.get('__logical_not__')(self)
158        return out
159
160    def __bool__(self):
161        data = self.asnumpy()
162        if data.shape == ():
163            return bool(data)
164        if data.shape == (1,):
165            return bool(data[0])
166        raise ValueError("The truth value of an array with several elements is ambiguous.")
167
168    def __index__(self):
169        data = self.asnumpy()
170        if not (data.dtype == "int8"
171                or data.dtype == "int16"
172                or data.dtype == "int32"
173                or data.dtype == "int64"
174                or data.dtype == "bool"):
175            raise ValueError("Only integer tensors of a single element can be converted to an index.")
176        if data.shape == ():
177            return int(data)
178        if data.shape == (1,):
179            return int(data[0])
180        raise ValueError("Only integer tensors of a single element can be converted to an index.")
181
182    def __pos__(self):
183        return self
184
185    def __add__(self, other):
186        return tensor_operator_registry.get('__add__')(self, other)
187
188    def __radd__(self, other):
189        return self.__add__(other)
190
191    def __iadd__(self, other):
192        return self.__add__(other)
193
194    def __sub__(self, other):
195        return tensor_operator_registry.get('__sub__')(self, other)
196
197    def __rsub__(self, other):
198        return tensor_operator_registry.get('__sub__')(other, self)
199
200    def __isub__(self, other):
201        return self.__sub__(other)
202
203    def __mul__(self, other):
204        return tensor_operator_registry.get('__mul__')(self, other)
205
206    def __rmul__(self, other):
207        return self.__mul__(other)
208
209    def __imul__(self, other):
210        return self.__mul__(other)
211
212    def __truediv__(self, other):
213        return tensor_operator_registry.get('__truediv__')(self, other)
214
215    def __rtruediv__(self, other):
216        return tensor_operator_registry.get('__truediv__')(other, self)
217
218    def __mod__(self, other):
219        return tensor_operator_registry.get('__mod__')(self, other)
220
221    def __rmod__(self, other):
222        return tensor_operator_registry.get('__mod__')(other, self)
223
224    def __imod__(self, other):
225        return self.__mod__(other)
226
227    def __pow__(self, other):
228        return tensor_operator_registry.get('__pow__')(self, other)
229
230    def __floordiv__(self, other):
231        return tensor_operator_registry.get('__floordiv__')(self, other)
232
233    def __rfloordiv__(self, other):
234        return tensor_operator_registry.get('__floordiv__')(other, self)
235
236    def __ifloordiv__(self, other):
237        return self.__floordiv__(other)
238
239    def __lt__(self, other):
240        out = tensor_operator_registry.get('__lt__')(self, other)
241        return out
242
243    def __le__(self, other):
244        out = tensor_operator_registry.get('__le__')(self, other)
245        return out
246
247    def __getitem__(self, index):
248        out = tensor_operator_registry.get('__getitem__')(self, index)
249        return out
250
251    def __setitem__(self, index, value):
252        out = tensor_operator_registry.get('__setitem__')(self, index, value)
253        self.assign_value(out)
254        return self
255
256    def __gt__(self, other):
257        out = tensor_operator_registry.get('__gt__')(self, other)
258        return out
259
260    def __ge__(self, other):
261        out = tensor_operator_registry.get('__ge__')(self, other)
262        return out
263
264    def __len__(self):
265        out = tensor_operator_registry.get('shape')(self)
266        if out:
267            return out[0]
268        raise TypeError("Not support len of a 0-D tensor")
269
270    def __str__(self):
271        if self.dtype == mstype.type_none:
272            return "Unknown Tensor type!"
273        return str(self.asnumpy())
274
275    @property
276    def shape(self):
277        """Returns the shape of the tensor as a tuple."""
278        return self._shape
279
280    @property
281    def dtype(self):
282        """Return the dtype of the tensor (:class:`mindspore.dtype`)."""
283        return self._dtype
284
285    @property
286    def size(self):
287        """Returns the total number of elements in tensor."""
288        return self._size
289
290    @property
291    def ndim(self):
292        """Return the number of tensor dimensions."""
293        return len(self._shape)
294
295    @property
296    def has_init(self):
297        """tensor is inited."""
298        return self.init is not None
299
300    @property
301    def itemsize(self):
302        """Return the length of one tensor element in bytes."""
303        return self._itemsize
304
305    @property
306    def strides(self):
307        """Return the tuple of bytes to step in each dimension when traversing a tensor."""
308        return self._strides
309
310    @property
311    def nbytes(self):
312        """Return the total number of bytes taken by the tensor."""
313        return self._nbytes
314
315    @property
316    def T(self):
317        """Return the transposed tensor."""
318        return self.transpose()
319
320    @property
321    def virtual_flag(self):
322        """Used to mark whether the tensor is virtual. If the tensor is virtual, return True."""
323        return self._virtual_flag
324
325    @virtual_flag.setter
326    def virtual_flag(self, value):
327        """The setter of virtual_flag."""
328        if not isinstance(value, bool):
329            raise TypeError("virtual_flag must be bool.")
330        self._virtual_flag = value
331
332    @staticmethod
333    def from_numpy(array):
334        """
335        Convert numpy array to Tensor without copy data.
336
337        Args:
338            array (numpy.array): The input array.
339
340        Returns:
341            Tensor, has the same data type as input array.
342        """
343        return Tensor(Tensor_.from_numpy(array))
344
345    def assign_value(self, value):
346        PynativeExecutor_.get_instance().execute_all_task()
347        self.assign_value_cpp(value)
348        return self
349
350    def item(self, index=None):
351        """
352        Getitem from the Tensor with the index.
353
354        Note:
355            Tensor.item returns a Tensor scalar instead of a Python scalar.
356
357        Args:
358            index (Union[None, int, tuple(int)]): The index in Tensor. Default: None.
359
360        Returns:
361            A Tensor scalar, dtype is the same with the original Tensor.
362
363        Raises:
364            ValueError: If the length of the `index` is not euqal to self.ndim.
365
366        Supported Platforms:
367            ``Ascend`` ``GPU``
368
369        Examples:
370            >>> import numpy as np
371            >>> from mindspore import Tensor
372            >>> x = Tensor(np.array([[1,2,3],[4,5,6]], dtype=np.float32))
373            >>> x = x.item((0,1))
374            >>> print(x)
375            2.0
376
377        """
378        output = tensor_operator_registry.get('item')(self, index)
379        return output
380
381    def itemset(self, *args):
382        r"""
383        Insert scalar into a tensor (scalar is cast to tensor’s dtype, if possible).
384
385        There must be at least 1 argument, and define the last argument as item.
386        Then, tensor.itemset(\*args) is equivalent to :math:`tensor[args] = item`.
387
388        Args:
389            args (Union[(numbers.Number), (int/tuple(int), numbers.Number)]): The arguments that
390                specify the index and value. If `args` contain one argument (a scalar),
391                it is only used in case tensor is of size 1. If `args` contain two
392                arguments, the last argument is the value to be set and must be a
393                scalar, the first argument specifies a single tensor element location.
394                It is either an int or a tuple.
395
396        Returns:
397            A new Tensor, with value set by :math:`tensor[args] = item`.
398
399        Raises:
400            ValueError: If the length of the first argument is not euqal to self.ndim.
401            IndexError: If only one argument is provided, and the original Tensor is not scalar.
402
403        Supported Platforms:
404            ``Ascend`` ``GPU``
405
406        Examples:
407            >>> import numpy as np
408            >>> from mindspore import Tensor
409            >>> x = Tensor(np.array([[1,2,3],[4,5,6]], dtype=np.float32))
410            >>> x = x.itemset((0,1), 4)
411            >>> print(x)
412            [[1. 4. 3.]
413            [4. 5. 6.]]
414        """
415        output = tensor_operator_registry.get('itemset')(self, *args)
416        return output
417
418    def asnumpy(self):
419        """Convert tensor to numpy array."""
420        self._init_check()
421        PynativeExecutor_.get_instance().execute_all_task()
422        return Tensor_.asnumpy(self)
423
424    def flush_from_cache(self):
425        """Flush cache data to host if tensor is cache enable."""
426        self._init_check()
427        Tensor_._flush_from_cache(self)
428
429    def all(self, axis=(), keep_dims=False):
430        """
431        Check all array elements along a given axis evaluate to True.
432
433        Args:
434            axis (Union[None, int, tuple(int)): Dimensions of reduction,
435                when the axis is None or empty tuple, reduce all dimensions. Default: ().
436            keep_dims (bool): Whether to keep the reduced dimensions. Default: False.
437
438        Returns:
439            Tensor, if all array elements along the given axis evaluate to True, its value is True,
440            otherwise its value is False. If the axis is None or empty tuple, reduce all dimensions.
441
442        Supported Platforms:
443            ``Ascend`` ``GPU`` ``CPU``
444
445        Examples:
446            >>> from mindspore import Tensor
447            >>> a = Tensor([True, True, False])
448            >>> output = a.all()
449            >>> print(output)
450            False
451        """
452
453        self._init_check()
454        if axis is None:
455            axis = ()
456        return tensor_operator_registry.get('all')(keep_dims)(self, axis)
457
458    def any(self, axis=(), keep_dims=False):
459        """
460        Check any array element along a given axis evaluate to True.
461
462        Args:
463            axis (Union[None, int, tuple(int)): Dimensions of reduction,
464                when the axis is None or empty tuple, reduce all dimensions. Default: ().
465            keep_dims (bool): Whether to keep the reduced dimensions. Default: False.
466
467        Returns:
468            Tensor, if any array element along the given axis evaluates to True, its value is True,
469            otherwise its value is False. If the axis is None or empty tuple, reduce all dimensions.
470
471        Supported Platforms:
472            ``Ascend`` ``GPU`` ``CPU``
473
474        Examples:
475            >>> from mindspore import Tensor
476            >>> a = Tensor([True, True, False])
477            >>> output = a.any()
478            >>> print(output)
479            True
480        """
481
482        self._init_check()
483        if axis is None:
484            axis = ()
485        return tensor_operator_registry.get('any')(keep_dims)(self, axis)
486
487    def view(self, *shape):
488        r"""
489        Reshape the tensor according to the input shape.
490
491        Args:
492            shape (Union[tuple(int), int]): Dimension of the output tensor.
493
494        Returns:
495            Tensor, has the same dimension as the input shape.
496        """
497        self._init_check()
498        if not shape:
499            raise ValueError("The shape variable should not be empty")
500        if isinstance(shape[0], tuple):
501            if len(shape) != 1:
502                raise ValueError(f"Only one tuple is needed, but got {shape}")
503            shape = shape[0]
504        return tensor_operator_registry.get('reshape')()(self, shape)
505
506    def expand_as(self, x):
507        """
508        Expand the dimension of target tensor to the dimension of input tensor.
509
510        Args:
511            x (Tensor): The input tensor. The shape of input tensor must obey
512                the broadcasting rule.
513
514        Returns:
515            Tensor, has the same dimension as input tensor.
516        """
517        self._init_check()
518        return tensor_operator_registry.get('broadcast_to')(x.shape)(self)
519
520    def abs(self):
521        """
522        Return absolute value element-wisely.
523
524        Returns:
525            Tensor, with absolute value element-wisely.
526
527        Supported Platforms:
528            ``Ascend`` ``GPU`` ``CPU``
529
530        Examples:
531            >>> from mindspore import Tensor
532            >>> a = Tensor([1.1, -2.1]).astype("float32")
533            >>> output = a.abs()
534            >>> print(output)
535            [1.1 2.1]
536        """
537        self._init_check()
538        return tensor_operator_registry.get('abs')()(self)
539
540    def mean(self, axis=(), keep_dims=False):
541        """
542        Reduce a dimension of a tensor by averaging all elements in the dimension.
543
544        Args:
545            axis (Union[None, int, tuple(int), list(int)]): Dimensions of reduction,
546                when the axis is None or empty tuple, reduce all dimensions. Default: ().
547            keep_dims (bool): Whether to keep the reduced dimensions. Default: False.
548
549        Returns:
550            Tensor, has the same data type as input tensor.
551
552        Supported Platforms:
553            ``Ascend`` ``GPU`` ``CPU``
554
555        Examples:
556            >>> import numpy as np
557            >>> from mindspore import Tensor
558            >>> input_x = Tensor(np.array([1, 2, 3], dtype=np.float32))
559            >>> output = input_x.mean()
560            >>> print(output)
561            2.0
562        """
563        self._init_check()
564        if axis is None:
565            axis = ()
566        return tensor_operator_registry.get('mean')(keep_dims)(self, axis)
567
568    def transpose(self, *axes):
569        r"""
570        Return a view of the tensor with axes transposed.
571
572        - For a 1-D tensor this has no effect, as a transposed vector is simply the same vector.
573        - For a 2-D tensor, this is a standard matrix transpose.
574        - For an n-D tensor, if axes are given, their order indicates how the axes are permuted.
575
576        If axes are not provided and ``tensor.shape = (i[0], i[1],...i[n-2], i[n-1])``,
577        then ``tensor.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``.
578
579        Args:
580            axes(Union[None, tuple(int), list(int), int], optional): If axes is None or
581                blank, the method will reverse the order of the axes. If axes is tuple(int)
582                or list(int), tensor.transpose() will transpose the tensor to the new axes order.
583                If axes is int, this form is simply intended as a convenience alternative to the
584                tuple/list form.
585
586        Returns:
587            Tensor, has the same dimension as input tensor, with axes suitably permuted.
588
589        Raises:
590            TypeError: If input arguments have types not specified above.
591            ValueError: If the number of `axes` is not euqal to a.ndim.
592
593        Supported Platforms:
594            ``Ascend`` ``GPU`` ``CPU``
595
596        Examples:
597            >>> import numpy as np
598            >>> from mindspore import Tensor
599            >>> x = Tensor(np.ones((1,2,3), dtype=np.float32))
600            >>> x = x.transpose()
601            >>> print(x.shape)
602            (3, 2, 1)
603        """
604        self._init_check()
605        perm = validator.check_transpose_axis(axes, self.ndim)
606        return tensor_operator_registry.get('transpose')()(self, perm)
607
608    def reshape(self, *shape):
609        """
610        Give a new shape to a tensor without changing its data.
611
612        Args:
613            shape(Union[int, tuple(int), list(int)]): The new shape should be compatible
614                with the original shape. If an integer, then the result will be a 1-D
615                array of that length. One shape dimension can be -1. In this case, the
616                value is inferred from the length of the array and remaining dimensions.
617
618        Returns:
619            Tensor, with new specified shape.
620
621        Raises:
622            TypeError: If new_shape is not integer, list or tuple, or `x` is not tensor.
623            ValueError: If new_shape is not compatible with the original shape.
624
625        Supported Platforms:
626            ``Ascend`` ``GPU`` ``CPU``
627
628        Examples:
629            >>> from mindspore import Tensor
630            >>> from mindspore import dtype as mstype
631            >>> x = Tensor([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]], dtype=mstype.float32)
632            >>> output = x.reshape((3, 2))
633            >>> print(output)
634            [[-0.1  0.3]
635            [ 3.6  0.4]
636            [ 0.5 -3.2]]
637        """
638        self._init_check()
639        new_shape = validator.check_reshape_shp(shape)
640        return tensor_operator_registry.get('reshape')()(self, new_shape)
641
642    def ravel(self):
643        """
644        Return a contiguous flattened tensor.
645
646        Returns:
647            Tensor, a 1-D tensor, containing the same elements of the input.
648
649        Supported Platforms:
650            ``Ascend`` ``GPU`` ``CPU``
651
652        Examples:
653            >>> import numpy as np
654            >>> from mindspore import Tensor
655            >>> x = Tensor(np.ones((2,3,4), dtype=np.float32))
656            >>> output = x.ravel()
657            >>> print(output.shape)
658            (24,)
659        """
660        self._init_check()
661        reshape_op = tensor_operator_registry.get('reshape')()
662        return reshape_op(self, (-1,))
663
664    def flatten(self, order='C'):
665        r"""
666        Return a copy of the tensor collapsed into one dimension.
667
668        Args:
669            order (str, optional): Can choose between 'C' and 'F'. 'C' means to
670                flatten in row-major (C-style) order. 'F' means to flatten in column-major
671                (Fortran-style) order. Only 'C' and 'F' are supported. Default: 'C'.
672
673        Returns:
674            Tensor, has the same data type as input.
675
676        Supported Platforms:
677            ``Ascend`` ``GPU`` ``CPU``
678
679        Raises:
680            TypeError: If `order` is not string type.
681            ValueError: If `order` is string type, but not 'C' or 'F'.
682
683        Examples:
684            >>> import numpy as np
685            >>> from mindspore import Tensor
686            >>> x = Tensor(np.ones((2,3,4), dtype=np.float32))
687            >>> output = x.flatten()
688            >>> print(output.shape)
689            (24,)
690        """
691        self._init_check()
692        reshape_op = tensor_operator_registry.get('reshape')()
693        trans_op = tensor_operator_registry.get('transpose')()
694
695        order = validator.check_flatten_order(order)
696        if order == 'C':
697            return reshape_op(self, (-1,))
698
699        perm = tuple(range(self.ndim-1, -1, -1))
700        return reshape_op(trans_op(self, perm), (-1,))
701
702    def swapaxes(self, axis1, axis2):
703        """
704        Interchange two axes of a tensor.
705
706        Args:
707            axis1 (int): First axis.
708            axis2 (int): Second axis.
709
710        Returns:
711            Transposed tensor, has the same data type as the input.
712
713        Raises:
714            TypeError: If `axis1` or `axis2` is not integer.
715            ValueError: If `axis1` or `axis2` is not in the range of :math:`[-ndim, ndim-1]`.
716
717        Supported Platforms:
718            ``Ascend`` ``GPU`` ``CPU``
719
720        Examples:
721            >>> import numpy as np
722            >>> from mindspore import Tensor
723            >>> x = Tensor(np.ones((2,3,4), dtype=np.float32))
724            >>> output = x.swapaxes(0, 2)
725            >>> print(output.shape)
726            (4,3,2)
727        """
728        self._init_check()
729        axis1, axis2 = validator.check_swapaxes_axis((axis1, axis2), self.ndim)
730
731        if axis1 == axis2:
732            return self
733        if axis1 > axis2:
734            axis1, axis2 = axis2, axis1
735
736        perm = tuple(range(0, self.ndim))
737        if axis2 + 1 < self.ndim:
738            new_perm = perm[0:axis1] + perm[axis2:axis2+1] + \
739                perm[axis1+1:axis2] + perm[axis1:axis1+1] + perm[axis2+1:]
740        else:
741            new_perm = perm[0:axis1] + perm[axis2:axis2+1] + \
742                perm[axis1+1:axis2] + perm[axis1:axis1+1]
743
744        return tensor_operator_registry.get('transpose')()(self, new_perm)
745
746    def squeeze(self, axis=None):
747        """
748        Remove single-dimensional entries from the shape of a tensor.
749
750        Args:
751            axis (Union[None, int, list(int), tuple(int)], optional): Selects a subset of the entries of
752                length one in the shape. If an axis is selected with shape entry greater than one,
753                an error is raised. Default is None.
754
755        Returns:
756            Tensor, with all or a subset of the dimensions of length 1 removed.
757
758        Raises:
759            TypeError: If input arguments have types not specified above.
760            ValueError: If specified axis has shape entry :math:`> 1`.
761
762        Supported Platforms:
763            ``Ascend`` ``GPU`` ``CPU``
764
765        Examples:
766            >>> import numpy as np
767            >>> from mindspore import Tensor
768            >>> x = Tensor(np.ones((1,2,2,1), dtype=np.float32))
769            >>> x = x.squeeze()
770            >>> print(x.shape)
771            (2, 2)
772        """
773        self._init_check()
774        if axis is None:
775            return tensor_operator_registry.get('squeeze')(self)
776        new_shape = validator.prepare_shape_for_squeeze(self.shape, axis)
777        return tensor_operator_registry.get('reshape')()(self, new_shape)
778
779    def astype(self, dtype, copy=True):
780        """
781        Return a copy of the tensor, cast to a specified type.
782
783        Args:
784            dtype (Union[:class:`mindspore.dtype`, str]): Designated tensor dtype, can be in format
785                of :class:`mindspore.dtype.float32` or `float32`.
786                Default: :class:`mindspore.dtype.float32`.
787            copy (bool, optional): By default, astype always returns a newly allocated
788                tensor. If this is set to false, the input tensor is returned instead
789                of a copy if possible. Default: True.
790
791        Returns:
792            Tensor, with the designated dtype.
793
794        Raises:
795            TypeError: If `dtype` has types not specified above, or values cannot be understood.
796
797        Supported Platforms:
798            ``Ascend`` ``GPU`` ``CPU``
799
800        Examples:
801            >>> import numpy as np
802            >>> from mindspore import Tensor
803            >>> x = Tensor(np.ones((1,2,2,1), dtype=np.float32))
804            >>> x = x.astype("int32")
805            >>> print(x.dtype)
806            Int32
807        """
808        self._init_check()
809        dtype = validator.check_astype_dtype(dtype)
810        if not copy and dtype == self.dtype:
811            return self
812        return tensor_operator_registry.get('cast')(self, dtype)
813
814    def argmax(self, axis=None):
815        """
816        Return the indices of the maximum values along an axis.
817
818        Args:
819            axis (int, optional): By default, the index is into
820                the flattened tensor, otherwise along the specified axis.
821
822        Returns:
823            Tensor, indices into the input tensor. It has the same
824            shape as self.shape with the dimension along axis removed.
825
826        Raises:
827            ValueError: if the axis is out of range.
828
829        Supported Platforms:
830            ``Ascend`` ``GPU`` ``CPU``
831
832        Examples:
833            >>> import numpy as np
834            >>> from mindspore import Tensor
835            >>> a = Tensor(np.arange(10, 16).reshape(2, 3).astype("float32"))
836            >>> print(a.argmax())
837            5
838        """
839        # P.Argmax only supports float
840        a = self.astype(mstype.float32)
841        if axis is None:
842            a = a.ravel()
843            axis = 0
844        else:
845            axis = validator.check_axis_in_range(axis, a.ndim)
846        return tensor_operator_registry.get('argmax')(axis)(a)
847
848    def argmin(self, axis=None):
849        """
850        Return the indices of the minimum values along an axis.
851
852        Args:
853            axis (int, optional): By default, the index is into
854                the flattened tensor, otherwise along the specified axis.
855
856        Returns:
857            Tensor, indices into the input tensor. It has the same
858            shape as self.shape with the dimension along axis removed.
859
860        Raises:
861            ValueError: if the axis is out of range.
862
863        Supported Platforms:
864            ``Ascend`` ``GPU`` ``CPU``
865
866        Examples:
867            >>> import numpy as np
868            >>> from mindspore import Tensor
869            >>> a = Tensor(np.arange(10, 16).reshape(2, 3).astype("float32"))
870            >>> print(a.argmin())
871            0
872        """
873        # P.Argmax only supports float
874        a = self.astype(mstype.float32)
875        if axis is None:
876            a = a.ravel()
877            axis = 0
878        else:
879            axis = validator.check_axis_in_range(axis, a.ndim)
880        # P.Argmin is currently not supported
881        return tensor_operator_registry.get('argmax')(axis)(tensor_operator_registry.get('__neg__')(a))
882
883    def cumsum(self, axis=None, dtype=None):
884        """
885        Return the cumulative sum of the elements along a given axis.
886
887        Note:
888            If ``self.dtype`` is :class:`int8`, :class:`int16` or :class:`bool`, the result
889            `dtype` will be elevated to :class:`int32`, :class:`int64` is not supported.
890
891        Args:
892            axis (int, optional): Axis along which the cumulative sum is computed. The
893                default (None) is to compute the cumsum over the flattened array.
894            dtype (:class:`mindspore.dtype`, optional): If not specified, stay the same as original,
895                tensor, unless it has an integer dtype with a precision less than :class:`float32`.
896                In that case, :class:`float32` is used. Default: None.
897
898        Raises:
899            ValueError: if the axis is out of range.
900
901        Returns:
902            Tensor.
903
904        Supported Platforms:
905            ``Ascend`` ``GPU`` ``CPU``
906
907        Examples:
908            >>> import numpy as np
909            >>> from mindspore import Tensor
910            >>> a = Tensor(np.ones((3,3)).astype("float32"))
911            >>> output = a.cumsum(axis=0)
912            >>> print(output)
913            [[1. 1. 1.]
914            [2. 2. 2.]
915            [3. 3. 3.]]
916        """
917        x = self
918        original_dtype = x.dtype
919        # If original tensor is int, and has precision less then int32, convert to int32
920        if x.dtype in (mstype.bool_, mstype.int8, mstype.int16, mstype.uint8, mstype.int16):
921            x = x.astype(mstype.int32)
922        if axis is None:
923            x = x.ravel()
924            axis = 0
925        validator.check_axis_in_range(axis, x.ndim)
926        if dtype is not None and original_dtype != dtype:
927            return tensor_operator_registry.get('cumsum')()(x, axis).astype(dtype, copy=False)
928        return tensor_operator_registry.get('cumsum')()(x, axis)
929
930    def copy(self):
931        """
932        Return a copy of the tensor.
933
934        Note:
935            The current implementation does not support `order` argument.
936
937        Returns:
938            Copied tensor.
939
940        Supported Platforms:
941            ``Ascend`` ``GPU`` ``CPU``
942
943        Examples:
944            >>> import numpy as np
945            >>> from mindspore import Tensor
946            >>> a = Tensor(np.ones((3,3)).astype("float32"))
947            >>> output = a.copy()
948            >>> print(output)
949            [[1. 1. 1.]
950            [1. 1. 1.]
951            [1. 1. 1.]]
952        """
953        if self.size == 0:
954            return self
955        origin_dtype = self.dtype
956        x = self
957        logical_not_op = tensor_operator_registry.get('logical_not')()
958        if origin_dtype == mstype.bool_:
959            return logical_not_op(logical_not_op(x))
960        if origin_dtype != mstype.float64:
961            x = x.astype("float32")
962        x = x / 1.0
963        x = x.astype(origin_dtype)
964        return x
965
966    def max(self, axis=None, keepdims=False, initial=None, where=True):
967        """
968        Return the maximum of a tensor or maximum along an axis.
969
970        Args:
971            axis (Union[None, int, tuple of ints], optional): Axis or
972                axes along which to operate. By default, flattened input is used. If
973                this is a tuple of ints, the maximum is selected over multiple axes,
974                instead of a single axis or all the axes as before. Default: None.
975            keepdims (bool, optional):
976                If this is set to True, the axes which are reduced are left in the
977                result as dimensions with size one. With this option, the result will
978                broadcast correctly against the input array. Default: False.
979            initial (scalar, optional):
980                The minimum value of an output element. Must be present to allow
981                computation on empty slice. Default: None.
982            where (bool Tensor, optional):
983                A boolean array which is broadcasted to match the dimensions of array,
984                and selects elements to include in the reduction. If non-default value
985                is passed, initial must also be provided. Default: True.
986
987        Returns:
988            Tensor or scalar, maximum of input tensor. If `axis` is None, the result is a scalar
989            value. If `axis` is given, the result is an array of dimension ``self.ndim - 1``.
990
991        Raises:
992            TypeError: if arguments have types not specified above.
993
994        Supported Platforms:
995            ``Ascend`` ``GPU`` ``CPU``
996
997        Examples:
998            >>> import numpy as np
999            >>> from mindspore import Tensor
1000            >>> a = Tensor(np.arange(4).reshape((2, 2)).astype('float32'))
1001            >>> output = a.max()
1002            >>> print(output)
1003            3.0
1004        """
1005        reduce_ = tensor_operator_registry.get("reduce")
1006        reduce_max = tensor_operator_registry.get("reduce_max")
1007        maximum = tensor_operator_registry.get("maximum")
1008        return reduce_(self, reduce_max(keepdims), cmp_fn=maximum(), axis=axis, keepdims=keepdims,
1009                       initial=initial, where=where)
1010
1011    def min(self, axis=None, keepdims=False, initial=None, where=True):
1012        """
1013        Return the minimum of a tensor or minimum along an axis.
1014
1015        Args:
1016            axis (Union[None, int, tuple of ints], optional): Axis or
1017                axes along which to operate. By default, flattened input is used. If
1018                this is a tuple of ints, the minimum is selected over multiple axes,
1019                instead of a single axis or all the axes as before. Default: None.
1020            keepdims (bool, optional):
1021                If this is set to True, the axes which are reduced are left in the
1022                result as dimensions with size one. With this option, the result will
1023                broadcast correctly against the input array. Default: False.
1024            initial (scalar, optional):
1025                The maximum value of an output element. Must be present to allow
1026                computation on empty slice. Default: None.
1027            where (bool Tensor, optional):
1028                A boolean array which is broadcasted to match the dimensions of array,
1029                and selects elements to include in the reduction. If non-default value
1030                is passed, initial must also be provided. Default: True.
1031
1032        Returns:
1033            Tensor or scalar, minimum of input tensor. If the axis is None, the result is a scalar
1034            value. If `axis` is given, the result is an array of dimension ``self.ndim - 1``.
1035
1036        Raises:
1037            TypeError: if arguments have types not specified above.
1038
1039        Supported Platforms:
1040            ``Ascend`` ``GPU`` ``CPU``
1041
1042        Examples:
1043            >>> import numpy as np
1044            >>> from mindspore import Tensor
1045            >>> import mindspore.numpy as np
1046            >>> a = Tensor(np.arange(4).reshape((2,2)).astype('float32'))
1047            >>> output = a.min()
1048            >>> print(output)
1049            0.0
1050        """
1051        reduce_ = tensor_operator_registry.get("reduce")
1052        reduce_min = tensor_operator_registry.get("reduce_min")
1053        minimum = tensor_operator_registry.get("minimum")
1054        return reduce_(self, reduce_min(keepdims), cmp_fn=minimum(), axis=axis, keepdims=keepdims,
1055                       initial=initial, where=where)
1056
1057    def fill(self, value):
1058        """
1059        Fill the array with a scalar value.
1060
1061        Note:
1062            Unlike Numpy, tensor.fill() will always returns a new tensor, instead of
1063            filling the original tensor.
1064
1065        Args:
1066            value (Union[None, int, float, bool]): All elements of a will be assigned this value.
1067
1068        Returns:
1069            Tensor, with the original dtype and shape as input tensor.
1070
1071        Raises:
1072            TypeError: If input arguments have types not specified above.
1073
1074        Supported Platforms:
1075            ``Ascend`` ``GPU`` ``CPU``
1076
1077        Examples:
1078            >>> import numpy as np
1079            >>> from mindspore import Tensor
1080            >>> a = Tensor(np.arange(4).reshape((2,2)).astype('float32'))
1081            >>> print(a.fill(1.0))
1082            [[1. 1.]
1083            [1. 1.]]
1084        """
1085        if value is None:
1086            if self.dtype not in (mstype.float16, mstype.float32, mstype.float64):
1087                raise TypeError("If None is used as value, the original Tensor's dtype must be float.")
1088            value = Tensor(float('nan')).astype("float32")
1089            return tensor_operator_registry.get("tile")()(value, self.shape).astype(self.dtype)
1090        if not isinstance(value, (int, float, bool)):
1091            raise TypeError("input value must be a scalar.")
1092        return tensor_operator_registry.get("fill")(self.dtype, self.shape, value)
1093
1094    def ptp(self, axis=None, keepdims=False):
1095        """
1096        The name of the function comes from the acronym for ‘peak to peak’.
1097
1098        Note:
1099            Numpy arguments `dtype` and `out` are not supported.
1100
1101        Args:
1102            axis (Union[None, int, tuple(int)]): Axis or axes along which the range is computed.
1103                The default is to compute the variance of the flattened array. Default: None.
1104            keepdims (bool): If this is set to True, the axes which are reduced are left in the result as
1105                dimensions with size one. With this option, the result will broadcast correctly against the array.
1106                Default is False.
1107
1108        Returns:
1109            Tensor.
1110
1111        Raises:
1112            TypeError: if `self` is not a tensor, or `axis` and `keepdims` have types not specified above.
1113
1114        Supported Platforms:
1115            ``Ascend`` ``GPU`` ``CPU``
1116
1117        Examples:
1118            >>> from mindspore import Tensor
1119            >>> x = Tensor([[4.0, 9.0, 2.0, 10.0], [6.0, 9.0, 7.0, 12.0]]).astype("float32")
1120            >>> print(x.ptp(axis=1))
1121            [8. 6.]
1122            >>> print(x.ptp(axis=0))
1123            [2. 0. 5. 2.]
1124        """
1125        if not isinstance(keepdims, bool):
1126            raise TypeError('keepdims should be boolean')
1127        if axis is None:
1128            axis = ()
1129        else:
1130            validator.check_axis_type(axis, True, True, False)
1131            axis = validator.check_axis_valid(axis, self.ndim)
1132
1133        return self.max(axis, keepdims) - self.min(axis, keepdims)
1134
1135    def clip(self, xmin, xmax, dtype=None):
1136        """
1137        Clips (limits) the values in a Tensor.
1138
1139        Given an interval, values outside the interval are clipped to the interval edges.
1140        For example, if an interval of :math:`[0, 1]` is specified, values smaller than 0 become 0,
1141        and values larger than 1 become 1.
1142
1143        Note:
1144            Currently, clip with `xmin=nan` or `xmax=nan` is not supported.
1145
1146        Args:
1147            xmin (Tensor, scalar, None): Minimum value. If None, clipping is not performed
1148                on lower interval edge. Not more than one of `xmin` and `xmax` may be None.
1149            xmax (Tensor, scalar, None): Maximum value. If None, clipping is not performed
1150                on upper interval edge. Not more than one of `xmin` and `xmax` may be None.
1151                If `xmin` or `xmax` are tensors, then the three tensors will be broadcasted
1152                to match their shapes.
1153            dtype (:class:`mindspore.dtype`, optional): Overrides the dtype of the
1154                output Tensor. Default is None.
1155
1156        Returns:
1157            Tensor, a tensor with the elements of input tensor, but where values
1158            < `xmin` are replaced with `xmin`, and those > `xmax` with `xmax`.
1159
1160        Raises:
1161            TypeError: If inputs have types not specified above.
1162            ValueError: If the shapes of `x1` and `x2` cannot broadcast, or both `xmin` and `xmax` are `None`.
1163
1164        Supported Platforms:
1165            ``Ascend`` ``GPU`` ``CPU``
1166
1167        Examples:
1168            >>> from mindspore import Tensor
1169            >>> x = Tensor([1, 2, 3, -4, 0, 3, 2, 0]).astype("float32")
1170            >>> output = x.clip(0, 2)
1171            >>> print(output)
1172            [1. 2. 2. 0. 0. 2. 2. 0.]
1173        """
1174        if xmin is None and xmax is None:
1175            raise ValueError("One of max or min must be given.")
1176        x = self
1177        # F.maximum/minimum does not support when both operands are scalar
1178        if xmin is not None:
1179            xmin = Tensor(xmin).astype(x.dtype)
1180            if x.ndim == 0 and xmin.ndim == 0:
1181                x = tensor_operator_registry.get("maximum")()(x.reshape((1,)), xmin).squeeze()
1182            else:
1183                x = tensor_operator_registry.get("maximum")()(x, xmin)
1184        if xmax is not None:
1185            xmax = Tensor(xmax).astype(x.dtype)
1186            if x.ndim == 0 and xmax.ndim == 0:
1187                x = tensor_operator_registry.get("minimum")()(x.reshape((1,)), xmax).squeeze()
1188            else:
1189                x = tensor_operator_registry.get("minimum")()(x, xmax)
1190        if dtype is not None and dtype != x.dtype:
1191            return x.astype(dtype)
1192        return x
1193
1194    def _init_check(self):
1195        if self.has_init:
1196            self.init_data()
1197        return self
1198
1199    def init_data(self, slice_index=None, shape=None, opt_shard_group=None):
1200        """
1201        Get the tensor format data of this Tensor.
1202        The init_data function can be called once for the same tensor.
1203
1204        Args:
1205            slice_index (int): Slice index of a parameter's slices.
1206                It is used when initialize a slice of a parameter, it guarantees that devices
1207                using the same slice can generate the same tensor. Default: None.
1208            shape (list[int]): Shape of the slice, it is used when initialize a slice of the parameter. Default: None.
1209            opt_shard_group(str): Optimizer shard group which is used in auto or semi auto parallel mode
1210                to get one shard of a parameter's slice. Default: None.
1211
1212        Returns:
1213            Initialized Tensor.
1214
1215        Supported Platforms:
1216            ``Ascend`` ``GPU`` ``CPU``
1217
1218        Examples:
1219            >>> import mindspore as ms
1220            >>> import mindspore.common.initializer as init
1221            >>> x = init.initializer(init.Constant(1), [2, 2], ms.float32)
1222            >>> out = x.init_data()
1223            >>> print(out)
1224            [[1. 1.]
1225             [1. 1.]]
1226        """
1227        if self.init is None:
1228            raise TypeError("init_data must be set Tensor.init, init can't be None")
1229
1230        if shape is None:
1231            shape = self.shape
1232
1233        try:
1234            arr = np.ndarray(shape, dtype=mstype.dtype_to_nptype(self.dtype))
1235        except ValueError:
1236            msg = "Error shape={}".format(shape)
1237            logger.error(msg)
1238            raise ValueError(msg)
1239
1240        class seed_context:
1241            """Set and restore seed."""
1242
1243            def __init__(self, init):
1244                self.init = init
1245                from .seed import get_seed
1246                global_seed = get_seed()
1247                self._np_seed = np.random.get_state()[1][0]
1248                self.need_set_seed = ((slice_index is not None) and (global_seed is None))
1249
1250            def __enter__(self):
1251                if self.need_set_seed:
1252                    self.seed = self.init.seed
1253                    np.random.seed(slice_index)
1254                    self.init.seed = slice_index
1255
1256            def __exit__(self, ptype, value, trace):
1257                if self.need_set_seed:
1258                    np.random.seed(self._np_seed)
1259                    self.init.seed, _ = self.seed
1260
1261        with seed_context(self.init):
1262            self.init(arr)
1263        data = np.array(arr)
1264        if opt_shard_group:
1265            rank = get_rank(opt_shard_group)
1266            size = get_group_size(opt_shard_group)
1267            data = np.split(data, size)[rank]
1268        self.init = None
1269        self.assign_value(Tensor(data, dtype=self.dtype))
1270        return self
1271
1272    def to_tensor(self, slice_index=None, shape=None, opt_shard_group=None):
1273        """
1274        Return init_data() and get the tensor format data of this Tensor.
1275
1276        Note:
1277            The usage of `to_tensor` is deprecated. Please use `init_data`.
1278
1279        Args:
1280            slice_index (int): Slice index of a parameter's slices.
1281                It is used when initialize a slice of a parameter, it guarantees that devices
1282                using the same slice can generate the same tensor. Default: None.
1283            shape (list[int]): Shape of the slice, it is used when initialize a slice of the parameter. Default: None.
1284            opt_shard_group(str): Optimizer shard group which is used in auto or semi auto parallel mode
1285                to get one shard of a parameter's slice. Default: None.
1286
1287        Returns:
1288            Initialized Tensor.
1289
1290        Supported Platforms:
1291            ``Ascend`` ``GPU`` ``CPU``
1292
1293        Examples:
1294            >>> import mindspore as ms
1295            >>> import mindspore.common.initializer as init
1296            >>> x = init.initializer(init.Constant(1), [2, 2], ms.float32)
1297            >>> out = x.to_tensor()
1298            >>> print(out)
1299            [[1. 1.]
1300             [1. 1.]]
1301        """
1302        logger.warning("WARN_DEPRECATED: The usage of to_tensor is deprecated."
1303                       " Please use init_data")
1304        return self.init_data(slice_index, shape, opt_shard_group)
1305
1306    def resize(self, *new_shape):
1307        """
1308        Changes shape and size of array in-place.
1309
1310        Note:
1311            Instead of changing the size of the input array and returns nothing as in numpy,
1312            this method returns a new Tensor with the input size.
1313            Numpy argument `refcheck` is not supported.
1314
1315        Args:
1316            new_shape (Union[ints, tuple of ints]): Shape of resized array.
1317
1318        Returns:
1319            Tensor.
1320
1321        Supported Platforms:
1322            ``Ascend`` ``GPU`` ``CPU``
1323
1324        Examples:
1325            >>> import numpy as np
1326            >>> from mindspore import Tensor
1327            >>> x = Tensor(np.array([[0, 1], [2, 3]]))
1328            >>> x = x.resize(2, 3)
1329            >>> print(x)
1330            [[0 1 2]
1331            [3 0 0]]
1332        """
1333        if not new_shape:
1334            return self
1335        if len(new_shape) == 1:
1336            if isinstance(new_shape[0], tuple):
1337                new_shape = new_shape[0]
1338        flattened = self.ravel()
1339        cur_size = flattened.size
1340        new_size = tensor_operator_registry.get('shape_mul')(new_shape)
1341        diff_size = new_size - cur_size
1342        if diff_size > 0:
1343            pad_val = tensor_operator_registry.get('fill')(self.dtype, (diff_size,), 0)
1344            res = tensor_operator_registry.get('concatenate')(0)((flattened, pad_val))
1345        else:
1346            res = flattened[:new_size]
1347        return res.reshape(new_shape)
1348
1349    def diagonal(self, offset=0, axis1=0, axis2=1):
1350        """
1351        Return specified diagonals.
1352
1353        Args:
1354            offset (int, optional): Offset of the diagonal from the main diagonal.
1355                Can be positive or negative. Defaults to main diagonal.
1356            axis1 (int, optional): Axis to be used as the first axis of the 2-D
1357                sub-arrays from which the diagonals should be taken. Defaults to
1358                first axis (0).
1359            axis2 (int, optional): Axis to be used as the second axis of the 2-D
1360                sub-arrays from which the diagonals should be taken. Defaults to
1361                second axis.
1362
1363        Returns:
1364            Tensor, if `a` is 2-D, then `a` 1-D array containing the diagonal.
1365
1366        Raises:
1367            ValueError: if the input tensor has less than two dimensions.
1368
1369        Supported Platforms:
1370            ``Ascend`` ``GPU`` ``CPU``
1371
1372        Examples:
1373            >>> import numpy as np
1374            >>> from mindspore import Tensor
1375            >>> a = Tensor(np.arange(4).reshape(2, 2))
1376            >>> print(a)
1377            [[0 1]
1378            [2 3]]
1379            >>> output = a.diagonal()
1380            >>> print(output)
1381            [0 3]
1382        """
1383        ndim = self.ndim
1384        if ndim < 2:
1385            raise ValueError('diagonal requires an array of at least two dimensions')
1386        dtype = self.dtype
1387
1388        axes = validator.check_axis_valid((axis1, axis2), ndim)
1389        perm = ()
1390        for i in range(ndim):
1391            if i not in axes:
1392                perm += (i,)
1393        perm += axes
1394        a = self.transpose(perm)
1395
1396        shape = a.shape
1397        n, m = shape[-2:]
1398
1399        e = tensor_operator_registry.get('eye')(n, m, dtype)
1400        if offset >= m or offset <= -n:
1401            e = tensor_operator_registry.get('fill')(dtype, (n, m), 0)
1402        elif offset != 0:
1403            e = e.astype(mstype.float32)
1404            if offset > 0:
1405                e_left = tensor_operator_registry.get('fill')(dtype, (n, offset), 0)
1406                e_right = e[..., 0:m-offset:1]
1407                e = tensor_operator_registry.get('concatenate')(1)((e_left, e_right)).astype(dtype)
1408            elif offset < 0:
1409                e_upper = tensor_operator_registry.get('fill')(dtype, (-offset, m), 0)
1410                e_lower = e[0:n+offset:1, ...]
1411                e = tensor_operator_registry.get('concatenate')(0)((e_upper, e_lower)).astype(dtype)
1412        e = tensor_operator_registry.get('broadcast_to')(shape)(e)
1413
1414        prod = tensor_operator_registry.get('__mul__')(a, e)
1415        res = tensor_operator_registry.get('reduce_sum')(prod.astype(mstype.float32), -1)
1416
1417        begin = ()
1418        for i in range(ndim-2):
1419            begin += (0,)
1420        last_dim_begin = max(0, -offset)
1421        begin += (last_dim_begin,)
1422        size = res.shape[:-1]
1423        last_dim_end = min(
1424            shape[-2], max(0, shape[-1] - offset)) - last_dim_begin
1425        if last_dim_end <= 0:
1426            return Tensor([])
1427        size += (last_dim_end,)
1428        res = tensor_operator_registry.get('tensor_slice')(res, begin, size)
1429        return res.astype(dtype)
1430
1431    def trace(self, offset=0, axis1=0, axis2=1, dtype=None):
1432        """
1433        Return the sum along diagonals of the array.
1434
1435        Args:
1436            offset (int, optional): Offset of the diagonal from the main diagonal.
1437                Can be positive or negative. Defaults to main diagonal.
1438            axis1 (int, optional): Axis to be used as the first axis of the 2-D
1439                sub-arrays from which the diagonals should be taken. Defaults to
1440                first axis (0).
1441            axis2 (int, optional): Axis to be used as the second axis of the 2-D
1442                sub-arrays from which the diagonals should be taken. Defaults to
1443                second axis.
1444            dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
1445                output Tensor.
1446
1447        Returns:
1448            Tensor, sum_along_diagonals.
1449
1450        Raises:
1451            ValueError: if the input tensor has less than two dimensions.
1452
1453        Supported Platforms:
1454            ``Ascend`` ``GPU`` ``CPU``
1455
1456        Examples:
1457            >>> import numpy as np
1458            >>> from mindspore import Tensor
1459            >>> x = Tensor(np.eye(3, dtype=np.float32))
1460            >>> print(x.trace())
1461            3.0
1462        """
1463        d = self.diagonal(offset, axis1=axis1, axis2=axis2)
1464        shape = d.shape
1465        if dtype is None:
1466            dtype = d.dtype
1467        if shape[-1] == 0:
1468            return tensor_operator_registry.get('fill')(dtype, shape[:-1], 0)
1469        res = tensor_operator_registry.get('reduce_sum')(d.astype(mstype.float32), -1)
1470        return res.astype(dtype)
1471
1472    def take(self, indices, axis=None, mode='clip'):
1473        """
1474        Takes elements from an array along an axis.
1475
1476        Args:
1477            indices (Tensor): The indices with shape `(Nj...)` of the values to extract.
1478            axis (int, optional): The axis over which to select values. By default,
1479                the flattened input array is used. Default: `None`.
1480            mode (‘raise’, ‘wrap’, ‘clip’, optional):
1481
1482                - edge: Pads with the edge values of `arr`.
1483                - raise: Raises an error;
1484                - wrap: Wraps around;
1485                - clip: Clips to the range. `clip` mode means that all indices that are
1486                  too large are replaced by the index that addresses the last element
1487                  along that axis. Note that this disables indexing with negative numbers.
1488
1489                Default: `clip`.
1490
1491        Returns:
1492            Tensor, the indexed result.
1493
1494        Raises:
1495            ValueError: if `axis` is out of range, or `mode` has values other than (‘raise’, ‘wrap’, ‘clip’)
1496
1497        Supported Platforms:
1498            ``Ascend`` ``GPU`` ``CPU``
1499
1500        Examples:
1501            >>> import numpy as np
1502            >>> from mindspore import Tensor
1503            >>> a = Tensor(np.array([4, 3, 5, 7, 6, 8]))
1504            >>> indices = Tensor(np.array([0, 1, 4]))
1505            >>> output = a.take(indices)
1506            >>> print(output)
1507            [4 3 6]
1508        """
1509        if mode not in ('raise', 'wrap', 'clip'):
1510            raise ValueError('raise should be one of "raise", "wrap", or "clip"')
1511        if axis is None:
1512            a = self.ravel()
1513            axis = 0
1514        else:
1515            a = self
1516        ndim = a.ndim
1517        validator.check_axis_in_range(axis, ndim)
1518        axis = axis + ndim if axis < 0 else axis
1519
1520        shape_a = a.shape
1521        shape_indices = indices.shape
1522        size_indices = indices.size
1523        indices = tensor_operator_registry.get('check_indices')(shape_a[axis], indices, mode)
1524
1525        # reshapes indices to shape (Ni..., Nj..., Nk)
1526        shape_ni = shape_a[:axis]
1527        shape_nk = shape_a[axis + 1:]
1528        shape_out = shape_ni + shape_indices + shape_nk
1529        shape_indices = tuple(size_indices if i == axis else 1 for i in range(ndim))
1530        indices = indices.reshape(shape_indices)
1531        shape_indices = shape_ni + (indices.size,) + shape_nk
1532        indices = tensor_operator_registry.get('broadcast_to')(shape_indices)(indices)
1533
1534        res = tensor_operator_registry.get('gather_d')(a, axis, indices)
1535        return res.reshape(shape_out)
1536
1537    def choose(self, choices, mode='clip'):
1538        """
1539        Construct an array from an index array and a list of arrays to choose from.
1540
1541        Args:
1542            choices (Union[tuple, list, Tensor]): Choice arrays. `a` and all of the `choices` must
1543                be broadcasted to the same shape. If `choices` is itself an array, then
1544                its outermost dimension (i.e., the one corresponding to ``choices.shape[0]``)
1545                is taken as defining the “sequence”.
1546            mode (‘raise’, ‘wrap’, ‘clip’, optional): Specifies how indices outside
1547                ``[0, n-1]`` will be treated:
1548
1549                ‘raise’ – raise an error (default);
1550
1551                ‘wrap’ – wrap around;
1552
1553                ‘clip’ – clip to the range. ‘clip’ mode means that all indices that are
1554                too large are replaced by the index that addresses the last element
1555                along that axis. Note that this disables indexing with negative numbers.
1556
1557        Returns:
1558            Tensor, the merged result.
1559
1560        Supported Platforms:
1561            ``Ascend`` ``GPU`` ``CPU``
1562
1563        Raises:
1564            ValueError: if the input tensor and any of the `choices` cannot be broadcast.
1565
1566        Examples:
1567            >>> import numpy as np
1568            >>> from mindspore import Tensor
1569            >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33]]
1570            >>> x = Tensor(np.array([2, 3, 1, 0]))
1571            >>> print(x.choose(choices))
1572            [20 31 12  3]
1573        """
1574        if isinstance(choices, Tensor):
1575            shape_choice = validator.infer_out_shape(self.shape, choices.shape[1:])
1576            choices = tensor_operator_registry.get('broadcast_to')((choices.shape[0],) + shape_choice)(choices)
1577        else:
1578            # broadcasts choices to the same shape if choices is a sequence
1579            choicelist = []
1580            shapes = ()
1581            for choice in choices:
1582                if not isinstance(choice, Tensor):
1583                    choice = tensor_operator_registry.get('make_tensor')(choice)
1584                shapes += (choice.shape,)
1585                choicelist.append(choice)
1586            shape_choice = validator.infer_out_shape(self.shape, *shapes)
1587            tmp = []
1588            for choice in choicelist:
1589                tmp.append(tensor_operator_registry.get('broadcast_to')(shape_choice)(choice))
1590            choices = tensor_operator_registry.get('stack')(0)(tmp)
1591
1592        if self.ndim == 0 or choices.ndim == 0:
1593            raise ValueError('input cannot be scalars')
1594        a = tensor_operator_registry.get('broadcast_to')(shape_choice)(self)
1595        dtype = choices.dtype
1596        # adjusts dtype for F.tensor_mul and F.gather_nd
1597        a = a.astype(mstype.int32)
1598        choices = choices.astype(mstype.int32)
1599        a = tensor_operator_registry.get('check_indices')(choices.shape[0], a, mode, allow_negative_index=False)
1600
1601        grids = []
1602        ndim = len(a.shape)
1603        for i in range(ndim):
1604            dim_grid = Tensor(list(range(a.shape[i])), mstype.int32)
1605            dim_shape = validator.expanded_shape(ndim, a.shape[i], i)
1606            dim_grid = tensor_operator_registry.get('broadcast_to')(a.shape)(dim_grid.reshape(dim_shape))
1607            grids.append(dim_grid)
1608        grid = tensor_operator_registry.get('stack')(-1)(grids)
1609        indices = tensor_operator_registry.get('concatenate')(-1)((a.reshape(a.shape + (1,)), grid))
1610        return tensor_operator_registry.get('gather_nd')(choices, indices).astype(dtype)
1611
1612    def searchsorted(self, v, side='left', sorter=None):
1613        """
1614        Finds indices where elements should be inserted to maintain order.
1615
1616        Args:
1617            v (Union[int, float, bool, list, tuple, Tensor]): Values to insert into `a`.
1618            side ('left', 'right', optional): If ‘left’, the index of the first suitable
1619                location found is given. If ‘right’, return the last such index. If there is
1620                no suitable index, return either 0 or N (where N is the length of `a`).
1621                Default: `left`.
1622            sorter (Union[int, float, bool, list, tuple, Tensor]): 1-D optional array of
1623                integer indices that sort array `a` into ascending order. They are typically
1624                the result of argsort.
1625
1626        Returns:
1627            Tensor, array of insertion points with the same shape as `v`.
1628
1629        Raises:
1630            ValueError: if argument for `side` or `sorter` is invalid.
1631
1632        Supported Platforms:
1633            ``Ascend`` ``GPU`` ``CPU``
1634
1635        Examples:
1636            >>> import numpy as np
1637            >>> from mindspore import Tensor
1638            >>> x = Tensor(np.array([1, 2, 3, 4, 5]))
1639            >>> print(x.searchsorted(3))
1640            2
1641        """
1642        if side not in ('left', 'right'):
1643            raise ValueError(f'{side} is an invalid value for keyword "side"')
1644        a = self.astype(mstype.float32)
1645        if not isinstance(v, Tensor):
1646            v = tensor_operator_registry.get('make_tensor')(v)
1647        shape = v.shape
1648        if sorter is not None:
1649            if sorter.ndim != 1 or sorter.size != a.size:
1650                raise ValueError('sorter must be 1-D array with the same size as `a`')
1651            sorter = tensor_operator_registry.get('make_tensor')(sorter)
1652            sorter = sorter.reshape(sorter.shape + (1,))
1653            a = tensor_operator_registry.get('gather_nd')(a, sorter)
1654        less_op = tensor_operator_registry.get('__le__') if side == 'left' else tensor_operator_registry.get('__lt__')
1655        i = tensor_operator_registry.get('fill')(mstype.int32, shape, 0)
1656        j = tensor_operator_registry.get('fill')(mstype.int32, shape, a.size)
1657
1658        sort_range = tuple(range(validator.get_log2_size(
1659            tensor_operator_registry.get('shape_mul')(shape) + 1)))
1660        for _ in sort_range:
1661            mid = (i - -j)//2
1662            mask = less_op(v, tensor_operator_registry.get('gather_nd')(a, mid.reshape(mid.shape + (1,))))
1663            i = tensor_operator_registry.get('select')(mask, i, mid)
1664            j = tensor_operator_registry.get('select')(mask, mid, j)
1665        return j
1666
1667    def var(self, axis=None, ddof=0, keepdims=False):
1668        """
1669        Compute the variance along the specified axis.
1670
1671        The variance is the average of the squared deviations from the mean, i.e.,
1672        :math:`var = mean(abs(x - x.mean())**2)`.
1673
1674        Return the variance, which is computed for the flattened array by default,
1675        otherwise over the specified axis.
1676
1677        Note:
1678            Numpy arguments `dtype`, `out` and `where` are not supported.
1679
1680        Args:
1681            axis (Union[None, int, tuple(int)]): Axis or axes along which the variance is computed.
1682                The default is to compute the variance of the flattened array. Default: `None`.
1683            ddof (int): Means Delta Degrees of Freedom. Default: 0.
1684                The divisor used in calculations is :math:`N - ddof`, where :math:`N` represents the number of elements.
1685            keepdims (bool): Default: `False`.
1686
1687        Supported Platforms:
1688            ``Ascend`` ``GPU`` ``CPU``
1689
1690        Returns:
1691            Standard deviation tensor.
1692
1693        Examples:
1694            >>> import numpy as np
1695            >>> from mindspore import Tensor
1696            >>> input_x = Tensor(np.array([1., 2., 3., 4.], np.float32))
1697            >>> output = input_x.var()
1698            >>> print(output)
1699            1.25
1700        """
1701        if 0 in self.shape:
1702            return Tensor(float('nan'), self.dtype)
1703        if not isinstance(ddof, int):
1704            raise TypeError(f"integer argument expected, but got {type(ddof)}")
1705        if not isinstance(keepdims, int):
1706            raise TypeError(f"integer argument expected, but got {type(keepdims)}")
1707
1708        if axis is None:
1709            axis = ()
1710        else:
1711            axis = validator.check_and_canonicalize_axes(axis, self.ndim)
1712        x_mean = tensor_operator_registry.get('mean')(True)(self, axis)
1713        x_sub = tensor_operator_registry.get('__sub__')(self, x_mean)
1714        x_pow = tensor_operator_registry.get('__pow__')(x_sub, 2)
1715        x_sum = tensor_operator_registry.get('sum')(bool(keepdims))(x_pow, axis)
1716        nums = 1
1717        if axis == ():
1718            nums = self.size
1719        else:
1720            for ax in axis:
1721                nums *= self.shape[ax]
1722        return tensor_operator_registry.get('__truediv__')(x_sum, nums - ddof)
1723
1724    def std(self, axis=None, ddof=0, keepdims=False):
1725        """
1726        Compute the standard deviation along the specified axis.
1727        The standard deviation is the square root of the average of the squared deviations
1728        from the mean, i.e., :math:`std = sqrt(mean(abs(x - x.mean())**2))`.
1729
1730        Return the standard deviation, which is computed for the flattened array by default,
1731        otherwise over the specified axis.
1732
1733        Note:
1734            Numpy arguments `dtype`, `out` and `where` are not supported.
1735
1736        Args:
1737            axis (Union[None, int, tuple(int)]): Axis or axes along which the standard
1738                deviation is computed. Default: `None`.
1739
1740                If `None`, compute the standard deviation of the flattened array.
1741            ddof (int): Means Delta Degrees of Freedom. The divisor used in calculations is :math:`N - ddof`,
1742                where :math:`N` represents the number of elements. Default: 0.
1743            keepdims: Default: `False`.
1744
1745        Returns:
1746            Standard deviation tensor.
1747
1748        Supported Platforms:
1749            ``Ascend`` ``GPU`` ``CPU``
1750
1751        Examples:
1752            >>> import numpy as np
1753            >>> from mindspore import Tensor
1754            >>> input_x = Tensor(np.array([1, 2, 3, 4], dtype=np.float32))
1755            >>> output = input_x.std()
1756            >>> print(output)
1757            1.118034
1758        """
1759        x_var = self.var(axis, ddof, keepdims)
1760        return tensor_operator_registry.get('__pow__')(x_var, 0.5)
1761
1762    def sum(self, axis=None, dtype=None, keepdims=False, initial=None):
1763        """
1764        Return sum of array elements over a given axis.
1765
1766        Note:
1767            Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and
1768            `extobj` are not supported.
1769
1770        Args:
1771            axis (Union[None, int, tuple(int)]): Axis or axes along which a sum is performed. Default: None.
1772                If None, sum all of the elements of the input array.
1773                If the axis is negative, it counts from the last to the first axis.
1774                If the axis is a tuple of ints, a sum is performed on all of the axes specified in the tuple
1775                instead of a single axis or all the axes as before.
1776            dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
1777                output Tensor.
1778            keepdims (bool): If this is set to True, the axes which are reduced are left in the result as
1779                dimensions with size one. With this option, the result will broadcast correctly against the input array.
1780                If the default value is passed, then keepdims will not be passed through to the sum method of
1781                sub-classes of ndarray, however any non-default value will be. If the sub-class’ method does not
1782                implement keepdims any exceptions will be raised. Default: `False`.
1783            initial (scalar): Starting value for the sum. Default: `None`.
1784
1785        Returns:
1786            Tensor. A tensor with the same shape as input, with the specified axis removed.
1787            If input tensor is a 0-d array, or if the axis is None, a scalar is returned.
1788
1789        Raises:
1790            TypeError: If input is not array_like, or `axis` is not int or tuple of ints,
1791                or `keepdims` is not integer, or `initial` is not scalar.
1792            ValueError: If any axis is out of range or duplicate axes exist.
1793
1794        Supported Platforms:
1795            ``Ascend`` ``GPU`` ``CPU``
1796
1797        Examples:
1798            >>> import numpy as np
1799            >>> from mindspore import Tensor
1800            >>> input_x = Tensor(np.array([-1, 0, 1]).astype(np.float32))
1801            >>> print(input_x.sum())
1802            0.0
1803            >>> input_x = Tensor(np.arange(10).reshape(2, 5).astype(np.float32))
1804            >>> print(input_x.sum(axis=1))
1805            [10. 35.]
1806        """
1807        input_x = self.astype(mstype.int32) if self.dtype == mstype.bool_ else self
1808        dtype = input_x.dtype if dtype is None else dtype
1809        if not isinstance(keepdims, int):
1810            raise TypeError(f"integer argument expected, but got {type(keepdims)}")
1811        if initial is not None and not isinstance(initial, (int, float, bool)):
1812            raise TypeError("initial argument should be a scalar.")
1813        if axis is None:
1814            axis = ()
1815        else:
1816            axis = validator.check_and_canonicalize_axes(axis, self.ndim)
1817
1818        if not validator.check_type_support(input_x.dtype, 'GPU',
1819                                            (mstype.float64, mstype.float32, mstype.float16)):
1820            input_x = input_x.astype(mstype.float32)
1821        if 0 in self.shape:
1822            input_x = tensor_operator_registry.get('make_tensor')([0], self.dtype)
1823        res = tensor_operator_registry.get('sum')(bool(keepdims))(input_x, axis)
1824        if initial is not None:
1825            res += initial
1826        return res.astype(dtype)
1827
1828    def repeat(self, repeats, axis=None):
1829        """
1830        Repeat elements of an array.
1831
1832        Args:
1833            repeats (Union[int, tuple, list]): The number of repetitions for each element.
1834                `repeats` is broadcasted to fit the shape of the given axis.
1835            axis (int, optional): The axis along which to repeat values. By default,
1836                use the flattened input tensor, and return a flat output tensor.
1837
1838        Returns:
1839            Tensor, has the same shape as input tensor except along the given axis.
1840
1841        Raises:
1842            ValueError: if the axis is out of range.
1843            TypeError: if arguments have types not specified above.
1844
1845        Supported Platforms:
1846            ``Ascend`` ``GPU`` ``CPU``
1847
1848        Examples:
1849            >>> import numpy as np
1850            >>> from mindspore import Tensor
1851            >>> x = Tensor(np.array(3))
1852            >>> print(x.repeat(4))
1853            [3 3 3 3]
1854            >>> x = Tensor(np.array([[1, 2],[3, 4]]))
1855            >>> print(x.repeat(2))
1856            [1 1 2 2 3 3 4 4]
1857            >>> print(x.repeat(3, axis=1))
1858            [[1 1 1 2 2 2]
1859            [3 3 3 4 4 4]]
1860            >>> print(x.repeat([1,2], axis=0))
1861            [[1 2]
1862            [3 4]
1863            [3 4]]
1864        """
1865        if not isinstance(repeats, (tuple, list)):
1866            repeats = (repeats,)
1867        for element in repeats:
1868            if not isinstance(element, int):
1869                raise TypeError(f"Each element in {repeats} should be integer, but got {type(element)}.")
1870        input_x = self
1871        if axis is None:
1872            input_x = self.ravel()
1873            axis = 0
1874        if axis is not None and not isinstance(axis, int):
1875            raise TypeError(f'axes should be integers, not {type(axis)}')
1876        validator.check_axis_in_range(axis, input_x.ndim)
1877        axis = axis + input_x.ndim if axis < 0 else axis
1878
1879        if len(repeats) == 1:
1880            repeats = repeats[0]
1881            if repeats == 0:
1882                return Tensor_(input_x.dtype, (0,))
1883            return tensor_operator_registry.get('repeat_elements')(input_x, repeats, axis)
1884        size = input_x.shape[axis]
1885        if len(repeats) != size:
1886            raise ValueError('operands could not be broadcast together')
1887        subs = tensor_operator_registry.get('split')(axis, size)(input_x)
1888        repeated_subs = []
1889        for sub, rep in zip(subs, repeats):
1890            if rep != 0:
1891                repeated_subs.append(tensor_operator_registry.get('repeat_elements')(sub, rep, axis))
1892        return tensor_operator_registry.get('concatenate')(axis)(repeated_subs)
1893
1894
1895class RowTensor:
1896    """
1897    A sparse representation of a set of tensor slices at given indices.
1898
1899    An RowTensor is typically used to represent a subset of a larger
1900    tensor dense of shape [L0, D1, .. , DN] where L0 >> D0.
1901
1902    The values in indices are the indices in the first dimension of the slices
1903    that have been extracted from the larger tensor.
1904
1905    The dense tensor dense represented by an RowTensor slices has
1906    `dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...]`.
1907
1908    RowTensor can only be used in the `Cell`'s construct method.
1909
1910    It is not supported in pynative mode at the moment.
1911
1912    Args:
1913        indices (Tensor): A 1-D integer Tensor of shape [D0].
1914        values (Tensor): A Tensor of any dtype of shape [D0, D1, ..., Dn].
1915        dense_shape (tuple(int)): An integer tuple which contains the shape
1916            of the corresponding dense tensor.
1917
1918    Returns:
1919        RowTensor, composed of `indices`, `values`, and `dense_shape`.
1920
1921    Examples:
1922        >>> import mindspore as ms
1923        >>> import mindspore.nn as nn
1924        >>> from mindspore import RowTensor
1925        >>> class Net(nn.Cell):
1926        ...     def __init__(self, dense_shape):
1927        ...         super(Net, self).__init__()
1928        ...         self.dense_shape = dense_shape
1929        ...     def construct(self, indices, values):
1930        ...         x = RowTensor(indices, values, self.dense_shape)
1931        ...         return x.values, x.indices, x.dense_shape
1932        >>>
1933        >>> indices = Tensor([0])
1934        >>> values = Tensor([[1, 2]], dtype=ms.float32)
1935        >>> out = Net((3, 2))(indices, values)
1936        >>> print(out[0])
1937        [[1. 2.]]
1938        >>> print(out[1])
1939        [0]
1940        >>> print(out[2])
1941        (3, 2)
1942    """
1943
1944    def __init__(self, indices, values, dense_shape):
1945        "Init RowTensor"
1946        self.__indices = indices
1947        self.__values = values
1948        self.__dense_shape = dense_shape
1949
1950    @property
1951    def indices(self):
1952        return self.__indices
1953
1954    @property
1955    def values(self):
1956        return self.__values
1957
1958    @property
1959    def dense_shape(self):
1960        return self.__dense_shape
1961
1962
1963class SparseTensor:
1964    """
1965    A sparse representation of a set of nonzero elememts from a tensor at given indices.
1966
1967    SparseTensor can only be used in the `Cell`'s construct method.
1968
1969    Pynative mode not supported at the moment.
1970
1971    For a tensor dense, its SparseTensor(indices, values, dense_shape) has
1972    `dense[indices[i]] = values[i]`.
1973
1974    Args:
1975        indices (Tensor): A 2-D integer Tensor of shape `[N, ndims]`,
1976            where N and ndims are the number of `values` and number of dimensions in
1977            the SparseTensor, respectively.
1978        values (Tensor): A 1-D tensor of any type and shape `[N]`, which
1979            supplies the values for each element in `indices`.
1980        dense_shape (tuple(int)): A integer tuple of size `ndims`,
1981            which specifies the dense_shape of the sparse tensor.
1982
1983    Returns:
1984        SparseTensor, composed of `indices`, `values`, and `dense_shape`.
1985
1986    Examples:
1987        >>> import mindspore as ms
1988        >>> import mindspore.nn as nn
1989        >>> from mindspore import SparseTensor
1990        >>> class Net(nn.Cell):
1991        ...     def __init__(self, dense_shape):
1992        ...         super(Net, self).__init__()
1993        ...         self.dense_shape = dense_shape
1994        ...     def construct(self, indices, values):
1995        ...         x = SparseTensor(indices, values, self.dense_shape)
1996        ...         return x.values, x.indices, x.dense_shape
1997        >>>
1998        >>> indices = Tensor([[0, 1], [1, 2]])
1999        >>> values = Tensor([1, 2], dtype=ms.float32)
2000        >>> out = Net((3, 4))(indices, values)
2001        >>> print(out[0])
2002        [1. 2.]
2003        >>> print(out[1])
2004        [[0 1]
2005         [1 2]]
2006        >>> print(out[2])
2007        (3, 4)
2008    """
2009
2010    def __init__(self, indices, values, dense_shape):
2011        "Init SparseTensor"
2012        self.__indices = indices
2013        self.__values = values
2014        self.__dense_shape = dense_shape
2015
2016    @property
2017    def indices(self):
2018        return self.__indices
2019
2020    @property
2021    def values(self):
2022        return self.__values
2023
2024    @property
2025    def dense_shape(self):
2026        return self.__dense_shape
2027
2028
2029def _vm_compare(*args):
2030    """Implement `vm_compare` for tensor."""
2031    obj_str = args[-1]
2032    if obj_str == "shape":
2033        fn = getattr(args[0].asnumpy(), obj_str)
2034        return fn
2035    if len(args) == 2:
2036        fn = getattr(args[0].asnumpy(), obj_str)
2037        return Tensor(fn())
2038    if isinstance(args[0], Tensor):
2039        fn = getattr(args[0].asnumpy(), obj_str)
2040        y = args[1].asnumpy() if isinstance(args[1], Tensor) else args[1]
2041    else:
2042        obj_str = "__r" + obj_str[2:]
2043        fn = getattr(args[1].asnumpy(), obj_str)
2044        y = args[0]
2045    return Tensor(np.array(fn(y)))
2046
2047
2048def _check_tensor_input(input_data=None, dtype=None, shape=None, init=None):
2049    """Check the tensor input."""
2050    if input_data is not None and shape is not None:
2051        raise ValueError("If input_data is available, shape doesn't need to be set")
2052
2053    if init is not None and (shape is None or dtype is None):
2054        raise ValueError("init, dtype and shape must have values at the same time.")
2055
2056    if (int(input_data is None) + int(init is None)) != 1:
2057        raise TypeError("input_data and init can not be None at the same time.")
2058
2059    if input_data is not None:
2060        if isinstance(input_data, np.ndarray) and input_data.ndim > 1 and input_data.size == 0:
2061            raise ValueError("input_data can not contain zero dimension.")
2062        if isinstance(input_data, (tuple, list)) and np.array(input_data).ndim > 1 \
2063                and np.array(input_data).size == 0:
2064            raise ValueError("input_data can not contain zero dimension.")
2065
2066    if shape is not None and not (hasattr(init, "__enable_zero_dim__") and init.__enable_zero_dim__) and 0 in shape:
2067        raise ValueError("Shape can not contain zero value.")
2068
2069
2070tensor_operator_registry.register('vm_compare', _vm_compare)
2071