• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2020 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "pybind_api/ir/tensor_py.h"
18 
19 #include <vector>
20 #include <sstream>
21 #include <string>
22 #include <utility>
23 #include <complex>
24 
25 #include "pybind_api/api_register.h"
26 #include "abstract/abstract_value.h"
27 #include "utils/shape_utils.h"
28 #include "utils/cache_embedding_hashmap_struct.h"
29 
30 namespace mindspore {
31 namespace tensor {
GetDataTypeBigFormat(const py::buffer_info & buf)32 static TypeId GetDataTypeBigFormat(const py::buffer_info &buf) {
33   const size_t format_size = 2;
34   if (buf.format.size() >= format_size) {
35     // Support np.str_ dtype, format: {x}w. {x} is a number that means the maximum length of the string items.
36     if (buf.format.back() == 'w') {
37       return TypeId::kObjectTypeString;
38     } else if (buf.format == "Zf") {
39       return TypeId::kNumberTypeComplex64;
40     } else if (buf.format == "Zd") {
41       return TypeId::kNumberTypeComplex128;
42     }
43   }
44   MS_LOG(WARNING) << "Unsupported DataType format " << buf.format << ", item size " << buf.itemsize;
45   return TypeId::kTypeUnknown;
46 }
47 
GetDataType(const py::buffer_info & buf)48 static TypeId GetDataType(const py::buffer_info &buf) {
49   if (buf.format.size() == 1) {
50     switch (buf.format.front()) {
51       case 'e':
52       case 'f':
53       case 'd':
54         switch (buf.itemsize) {
55           case 2:
56             return TypeId::kNumberTypeFloat16;
57           case 4:
58             return TypeId::kNumberTypeFloat32;
59           case 8:
60             return TypeId::kNumberTypeFloat64;
61         }
62         break;
63       case 'b':
64       case 'h':
65       case 'i':
66       case 'l':
67       case 'q':
68         switch (buf.itemsize) {
69           case 1:
70             return TypeId::kNumberTypeInt8;
71           case 2:
72             return TypeId::kNumberTypeInt16;
73           case 4:
74             return TypeId::kNumberTypeInt32;
75           case 8:
76             return TypeId::kNumberTypeInt64;
77         }
78         break;
79       case 'B':
80       case 'H':
81       case 'I':
82       case 'L':
83       case 'Q':
84         switch (buf.itemsize) {
85           case 1:
86             return TypeId::kNumberTypeUInt8;
87           case 2:
88             return TypeId::kNumberTypeUInt16;
89           case 4:
90             return TypeId::kNumberTypeUInt32;
91           case 8:
92             return TypeId::kNumberTypeUInt64;
93         }
94         break;
95       case '?':
96         return TypeId::kNumberTypeBool;
97     }
98   }
99   return GetDataTypeBigFormat(buf);
100 }
101 
GetPyTypeFormat(TypeId data_type)102 static std::string GetPyTypeFormat(TypeId data_type) {
103   switch (data_type) {
104     case TypeId::kNumberTypeFloat16:
105       return "e";
106     case TypeId::kNumberTypeFloat32:
107       return py::format_descriptor<float>::format();
108     case TypeId::kNumberTypeFloat64:
109       return py::format_descriptor<double>::format();
110     case TypeId::kNumberTypeUInt8:
111       return py::format_descriptor<uint8_t>::format();
112     case TypeId::kNumberTypeUInt16:
113       return py::format_descriptor<uint16_t>::format();
114     case TypeId::kNumberTypeUInt32:
115       return py::format_descriptor<uint32_t>::format();
116     case TypeId::kNumberTypeUInt64:
117       return py::format_descriptor<uint64_t>::format();
118     case TypeId::kNumberTypeInt8:
119       return py::format_descriptor<int8_t>::format();
120     case TypeId::kNumberTypeInt16:
121       return py::format_descriptor<int16_t>::format();
122     case TypeId::kNumberTypeInt32:
123       return py::format_descriptor<int32_t>::format();
124     case TypeId::kNumberTypeInt64:
125       return py::format_descriptor<int64_t>::format();
126     case TypeId::kNumberTypeBool:
127       return py::format_descriptor<bool>::format();
128     case TypeId::kObjectTypeString:
129       return py::format_descriptor<uint8_t>::format();
130     case TypeId::kNumberTypeComplex64:
131       return py::format_descriptor<std::complex<float>>::format();
132     case TypeId::kNumberTypeComplex128:
133       return py::format_descriptor<std::complex<double>>::format();
134     default:
135       MS_LOG(WARNING) << "Unsupported DataType " << data_type << ".";
136       return "";
137   }
138 }
139 
IsCContiguous(const py::array & input)140 static bool IsCContiguous(const py::array &input) {
141   auto flags = static_cast<unsigned int>(input.flags());
142   return (flags & pybind11::detail::npy_api::NPY_ARRAY_C_CONTIGUOUS_) != 0;
143 }
144 
145 // TensorDataNumpy implements TensorData using numpy array.
146 class TensorDataNumpy : public TensorData {
147  public:
TensorDataNumpy(py::buffer_info && buffer)148   explicit TensorDataNumpy(py::buffer_info &&buffer) : buffer_(std::move(buffer)) {}
149 
150   ~TensorDataNumpy() override = default;
151 
152   /// Total number of elements.
size() const153   ssize_t size() const override { return buffer_.size; }
154 
155   /// Byte size of a single element.
itemsize() const156   ssize_t itemsize() const override { return buffer_.itemsize; }
157 
158   /// Total number of bytes.
nbytes() const159   ssize_t nbytes() const override { return buffer_.itemsize * buffer_.size; }
160 
161   /// Number of dimensions.
ndim() const162   ssize_t ndim() const override { return buffer_.ndim; }
163 
164   /// Data pointer.
data()165   void *data() override { return buffer_data(); }
166 
const_data() const167   const void *const_data() const override { return buffer_.ptr; }
168 
169   /// To string.
ToString(const TypeId,const ShapeVector &,bool use_comma) const170   std::string ToString(const TypeId, const ShapeVector &, bool use_comma) const override {
171     if (use_comma) {
172       // Call python np.array2string(data_, separator=', ') to convert string with comma.
173       py::dict kwargs;
174       kwargs["separator"] = ", ";
175       auto np = py::module::import("numpy");
176       auto array2string = np.attr("array2string");
177       return py::str(array2string(py_array(), **kwargs));
178     }
179     // without comma.
180     return py::str(py_array());
181   }
182 
183   /// py::array object.
py_array() const184   py::array py_array() const {
185     // Use dummy owner to avoid copy data.
186     py::str dummyOwner;
187     return py::array(py::dtype(buffer_), buffer_.shape, buffer_.strides, buffer_.ptr, dummyOwner);
188   }
189 
190  private:
buffer_data()191   void *buffer_data() { return buffer_.ptr; }
192 
193   // The internal buffer.
194   py::buffer_info buffer_;
195 };
196 
MakeTensor(const py::array & input,const TypePtr & type_ptr)197 TensorPtr TensorPy::MakeTensor(const py::array &input, const TypePtr &type_ptr) {
198   // Get input buffer info.
199   py::buffer_info buf = input.request();
200   // Check data types.
201   auto data_type = type_ptr ? type_ptr->type_id() : TypeId::kTypeUnknown;
202   auto buf_type = GetDataType(buf);
203   if (buf_type == TypeId::kTypeUnknown && data_type == TypeId::kTypeUnknown) {
204     MS_LOG(EXCEPTION) << "Unsupported tensor type!";
205   }
206   MS_LOG(DEBUG) << "data_type: " << data_type << ", buf_type: " << buf_type;
207   if (data_type == TypeId::kObjectTypeString || buf_type == TypeId::kObjectTypeString) {
208     return TensorPy::MakeTensorOfNumpy(input);
209   }
210   // Use buf type as data type if type_ptr not set.
211   if (data_type == TypeId::kTypeUnknown) {
212     data_type = buf_type;
213   }
214   // Convert input array to C contiguous if need.
215   std::unique_ptr<char[]> tmp_buf;
216   if (!IsCContiguous(input)) {
217     Py_buffer pybuf;
218     if (PyObject_GetBuffer(input.ptr(), &pybuf, PyBUF_ANY_CONTIGUOUS)) {
219       MS_LOG(EXCEPTION) << "Failed to get buffer from the input!";
220     }
221     tmp_buf = std::make_unique<char[]>(pybuf.len);
222     if (PyBuffer_ToContiguous(tmp_buf.get(), &pybuf, pybuf.len, 'C')) {
223       MS_LOG(EXCEPTION) << "Can't copy numpy.ndarray to a contiguous buffer.";
224     }
225     PyBuffer_Release(&pybuf);
226     buf.ptr = tmp_buf.get();
227   }
228   // Get tensor shape.
229   ShapeVector shape(buf.shape.begin(), buf.shape.end());
230   if (data_type == buf_type) {
231     // Use memory copy if input data type is the same as the required type.
232     return std::make_shared<Tensor>(data_type, shape, buf.ptr, buf.size * buf.itemsize);
233   }
234   // Create tensor with data type converted.
235   return std::make_shared<Tensor>(data_type, shape, buf.ptr, buf_type);
236 }
237 
238 /// Creates a Tensor from a numpy array without copy
MakeTensorOfNumpy(const py::array & input)239 TensorPtr TensorPy::MakeTensorOfNumpy(const py::array &input) {
240   // Check format.
241   if (!IsCContiguous(input)) {
242     MS_LOG(EXCEPTION) << "Array should be C contiguous.";
243   }
244   // Get input buffer info.
245   py::buffer_info buf = input.request();
246   // Get tensor dtype and check it.
247   auto dtype = GetDataType(buf);
248   if (dtype == TypeId::kTypeUnknown) {
249     MS_LOG(EXCEPTION) << "Unsupported data type!";
250   }
251   // Get tensor shape.
252   ShapeVector shape(buf.shape.begin(), buf.shape.end());
253   // Make a tensor with shared data with numpy array.
254   auto tensor_data = std::make_shared<TensorDataNumpy>(std::move(buf));
255   return std::make_shared<Tensor>(dtype, shape, tensor_data);
256 }
257 
GetStrides(const std::vector<ssize_t> & shape,ssize_t item_size)258 static std::vector<ssize_t> GetStrides(const std::vector<ssize_t> &shape, ssize_t item_size) {
259   std::vector<ssize_t> strides;
260   strides.reserve(shape.size());
261   const auto ndim = shape.size();
262   for (size_t i = 0; i < ndim; ++i) {
263     auto stride = item_size;
264     for (size_t j = i + 1; j < ndim; ++j) {
265       stride *= shape[j];
266     }
267     strides.push_back(stride);
268   }
269   return strides;
270 }
271 
GetPyBufferInfo(const Tensor & tensor)272 static py::buffer_info GetPyBufferInfo(const Tensor &tensor) {
273   std::vector<ssize_t> shape(tensor.shape().begin(), tensor.shape().end());
274   std::vector<ssize_t> strides = GetStrides(shape, tensor.data().itemsize());
275   return py::buffer_info{
276     tensor.data_c(), tensor.data().itemsize(), GetPyTypeFormat(tensor.data_type()), tensor.DataDim(), shape, strides};
277 }
278 
GetPyTupleShape(const Tensor & tensor)279 py::tuple TensorPy::GetPyTupleShape(const Tensor &tensor) {
280   auto &shape = tensor.shape();
281   py::tuple dims(shape.size());
282   for (size_t i = 0; i < dims.size(); ++i) {
283     dims[i] = py::int_(shape[i]);
284   }
285   return dims;
286 }
287 
GetPyTupleStrides(const Tensor & tensor)288 py::tuple TensorPy::GetPyTupleStrides(const Tensor &tensor) {
289   std::vector<ssize_t> shape(tensor.shape().begin(), tensor.shape().end());
290   std::vector<ssize_t> strides = GetStrides(shape, tensor.data().itemsize());
291   py::tuple py_strides(strides.size());
292   for (size_t i = 0; i < strides.size(); ++i) {
293     py_strides[i] = py::int_(strides[i]);
294   }
295   return py_strides;
296 }
297 
GetPyItemSize(const Tensor & tensor)298 py::int_ TensorPy::GetPyItemSize(const Tensor &tensor) { return tensor.data().itemsize(); }
299 
GetPyNBytes(const Tensor & tensor)300 py::int_ TensorPy::GetPyNBytes(const Tensor &tensor) { return tensor.data().nbytes(); }
301 
302 template <typename T>
MemCopyFromCacheToHost(void * hashmap_addr,void * host_addr,void * cache_addr,size_t host_max,size_t cache_max,size_t hashmap_size,size_t col_size)303 void MemCopyFromCacheToHost(void *hashmap_addr, void *host_addr, void *cache_addr, size_t host_max, size_t cache_max,
304                             size_t hashmap_size, size_t col_size) {
305   auto host_data = static_cast<char *>(host_addr);
306   auto cache_data = static_cast<char *>(cache_addr);
307   auto hashmap_data = static_cast<HashmapEntry<T> *>(hashmap_addr);
308   // default param type float
309   const size_t param_type_size = 4;
310   size_t single_col_bytes = param_type_size * col_size;
311   for (size_t i = 0; i < hashmap_size; ++i) {
312     if (!hashmap_data[i].IsEmpty()) {
313       size_t host_offset = single_col_bytes * LongToSize(hashmap_data[i].key_);
314       size_t cache_offset = single_col_bytes * LongToSize(hashmap_data[i].value_);
315       if (cache_offset + single_col_bytes <= cache_max) {
316         auto ret =
317           memcpy_s(host_data + host_offset, host_max - host_offset, cache_data + cache_offset, single_col_bytes);
318         if (ret != 0) {
319           MS_LOG(EXCEPTION) << "Memcpy failed.";
320         }
321       }
322     }
323   }
324   MS_LOG(INFO) << "Memcpy from cache to host success!";
325 }
326 
FlushFromCache(const Tensor & tensor)327 void TensorPy::FlushFromCache(const Tensor &tensor) {
328   py::gil_scoped_release gil_release;
329   if (tensor.NeedWait()) {
330     tensor.Wait();
331   }
332   tensor.data_sync();
333 
334   if (tensor.cache_enable()) {
335     MS_LOG(INFO) << tensor.ToString() << " is cache enable.";
336     auto hashmap_tensor_ptr = tensor.hashmap_tensor_ptr();
337     auto cache_tensor_ptr = tensor.cache_tensor_ptr();
338     if (hashmap_tensor_ptr != nullptr && cache_tensor_ptr != nullptr) {
339       hashmap_tensor_ptr->data_sync();
340       cache_tensor_ptr->data_sync();
341       auto hashmap_size = hashmap_tensor_ptr->shape_c()[0];
342       auto host_shape = tensor.shape_c();
343       auto cache_shape = cache_tensor_ptr->shape_c();
344       if (host_shape.size() != 2 && host_shape.size() != 2 && host_shape[1] != cache_shape[1]) {
345         MS_LOG(EXCEPTION) << "Got host shape and cache shape invalid."
346                           << "host shape:" << host_shape << ", cache shape:" << cache_shape;
347       }
348       auto host_data_max_size = static_cast<size_t>(tensor.Size());
349       auto cache_data_max_size = static_cast<size_t>(cache_tensor_ptr->Size());
350       auto hashmap_data_type = hashmap_tensor_ptr->data_type();
351       if (hashmap_data_type == TypeId::kNumberTypeInt32) {
352         MemCopyFromCacheToHost<int32_t>(hashmap_tensor_ptr->data_c(), tensor.data_c(), cache_tensor_ptr->data_c(),
353                                         host_data_max_size, cache_data_max_size, hashmap_size, host_shape[1]);
354       } else if (hashmap_data_type == TypeId::kNumberTypeInt64) {
355         MemCopyFromCacheToHost<int32_t>(hashmap_tensor_ptr->data_c(), tensor.data_c(), cache_tensor_ptr->data_c(),
356                                         host_data_max_size, cache_data_max_size, hashmap_size, host_shape[1]);
357       } else {
358         MS_LOG(ERROR) << "Hashmap dtype only suppotr int32, in64.";
359       }
360     }
361   }
362 }
363 
SyncAsNumpy(const Tensor & tensor)364 py::array TensorPy::SyncAsNumpy(const Tensor &tensor) {
365   {
366     py::gil_scoped_release gil_release;
367     if (tensor.NeedWait()) {
368       tensor.Wait();
369     }
370     tensor.data_sync();
371 
372     // Release device address of graph output tensor.
373     if (tensor.need_release_device_mem()) {
374       const_cast<Tensor &>(tensor).set_device_address(nullptr);
375     }
376   }
377   return AsNumpy(tensor);
378 }
379 
AsNumpy(const Tensor & tensor)380 py::array TensorPy::AsNumpy(const Tensor &tensor) {
381   auto data_numpy = dynamic_cast<const TensorDataNumpy *>(&tensor.data());
382   if (data_numpy != nullptr) {
383     // Return internal numpy array if tensor data is implemented base on it.
384     return data_numpy->py_array();
385   }
386   // Otherwise, create numpy array by buffer protocol.
387   auto info = GetPyBufferInfo(tensor);
388   py::object self = py::cast(&tensor);
389   return py::array(py::dtype(info), info.shape, info.strides, info.ptr, self);
390 }
391 
GetShapeFromTuple(const py::tuple & tuple)392 static ShapeVector GetShapeFromTuple(const py::tuple &tuple) {
393   ShapeVector shape;
394   const size_t size = tuple.size();
395   shape.reserve(tuple.size());
396   for (size_t i = 0; i < size; ++i) {
397     shape.push_back(py::int_(tuple[i]));
398   }
399   return shape;
400 }
401 
__anon6f0eb06f0102(const py::module *m) 402 REGISTER_PYBIND_DEFINE(Tensor, ([](const py::module *m) {
403                          // Define python MetaTensor class.
404                          (void)py::class_<MetaTensor, std::shared_ptr<MetaTensor>>(*m, "MetaTensor")
405                            .def(py::init<TypePtr, const ShapeVector>(), py::arg("dtype"), py::arg("shape"))
406                            .def_property_readonly("dtype", &MetaTensor::Dtype, "Get the MetaTensor's dtype.")
407                            .def_property_readonly("shape", &MetaTensor::shape, "Get the MetaTensor's shape.")
408                            .def_property("param_info", &MetaTensor::param_info, &MetaTensor::set_param_info)
409                            .def(py::pickle(
410                              [](const MetaTensor &t) {  // __getstate__
411                                /* Return a tuple that fully encodes the state of the object */
412                                return py::make_tuple(static_cast<int>(t.data_type()), t.shape());
413                              },
414                              [](const py::tuple &t) {  // __setstate__
415                                if (t.size() != 2) {
416                                  throw std::runtime_error("Invalid state!");
417                                }
418                                /* Create a new C++ instance */
419                                MetaTensor tensor(TypeId(t[0].cast<int>()), t[1].cast<ShapeVector>());
420                                return tensor;
421                              }));
422                          // Define python Tensor class.
423                          // dtype should define before Tensor, because Tensor init depend dtype
424                          (void)py::class_<Tensor, MetaTensor, std::shared_ptr<Tensor>>(*m, "Tensor")
425                            .def(py::init([](const Tensor &tensor) { return std::make_shared<Tensor>(tensor); }),
426                                 py::arg("input"))
427                            .def(py::init([](const Tensor &tensor, const TypePtr &type_ptr) {
428                                   TypeId data_type = type_ptr ? type_ptr->type_id() : kTypeUnknown;
429                                   if (data_type == kTypeUnknown || tensor.data_type() == data_type) {
430                                     return std::make_shared<Tensor>(tensor);
431                                   }
432                                   return std::make_shared<Tensor>(tensor, data_type);
433                                 }),
434                                 py::arg("input"), py::arg("dtype"))
435                            .def(py::init([](const TypePtr &type_ptr, const py::tuple &shape) {
436                                   auto data_type = type_ptr ? type_ptr->type_id() : TypeId::kNumberTypeFloat64;
437                                   return std::make_shared<Tensor>(data_type, GetShapeFromTuple(shape));
438                                 }),
439                                 py::arg("dtype"), py::arg("shape"))
440                            .def(py::init([](const TypePtr &type_ptr, const py::list &shape) {
441                                   auto data_type = type_ptr ? type_ptr->type_id() : TypeId::kNumberTypeFloat64;
442                                   return std::make_shared<Tensor>(data_type, GetShapeFromTuple(shape));
443                                 }),
444                                 py::arg("dtype"), py::arg("shape"))
445                            .def(py::init([](const py::array &input, const TypePtr &type_ptr) {
446                                   return TensorPy::MakeTensor(input, type_ptr);
447                                 }),
448                                 py::arg("input"), py::arg("dtype") = nullptr)
449                            .def(py::init([](const py::float_ input, const TypePtr &type_ptr) {
450                                   return TensorPy::MakeTensor(py::array(input), type_ptr);
451                                 }),
452                                 py::arg("input"), py::arg("dtype") = nullptr)
453                            .def(py::init([](const py::int_ input, const TypePtr &type_ptr) {
454                                   return TensorPy::MakeTensor(py::array(input), type_ptr);
455                                 }),
456                                 py::arg("input"), py::arg("dtype") = nullptr)
457                            .def(py::init([](const py::list &input, const TypePtr &type_ptr) {
458                                   return TensorPy::MakeTensor(py::array(input), type_ptr);
459                                 }),
460                                 py::arg("input"), py::arg("dtype") = nullptr)
461                            .def(py::init([](const py::tuple &input, const TypePtr &type_ptr) {
462                                   return TensorPy::MakeTensor(py::array(input), type_ptr);
463                                 }),
464                                 py::arg("input"), py::arg("dtype") = nullptr)
465                            // We only suppot array/bool_/int_/float_/list/tuple/complex pybind objects as tensor input,
466                            // and array/bool_/int_/float_/list/tuple init will be matched above, other pybind objects
467                            // input will raise error except complex data type.
468                            .def(py::init([](const py::object &input, const TypePtr &type_ptr) {
469                                   if (!PyComplex_CheckExact(input.ptr())) {
470                                     MS_LOG(EXCEPTION) << "Unsupported tensor type: " << input.get_type();
471                                   }
472                                   return TensorPy::MakeTensor(py::array(input), type_ptr);
473                                 }),
474                                 py::arg("input"), py::arg("dtype") = nullptr)
475                            .def_property("init_flag", &Tensor::is_init, &Tensor::set_init_flag)
476                            .def_property_readonly("_dtype", &Tensor::Dtype, R"mydelimiter(
477                              Get the tensor's data type.
478 
479                              Returns:
480                                  type, the data type of tensor.
481 
482                              Examples:
483                                  >>> data = mindspore.Tensor(np.ones((2, 1), np.int32))
484                                  >>> data.dtype
485                                  Int32
486                              )mydelimiter")
487                            .def_property_readonly("_shape", TensorPy::GetPyTupleShape, R"mydelimiter(
488                              Get the tensor's shape.
489 
490                              Returns:
491                                  tuple[int], the shape of tensor.
492 
493                              Examples:
494                                  >>> data = mindspore.Tensor(np.ones((3, 3)))
495                                  >>> data.shape()
496                                  (3, 3)
497                              )mydelimiter")
498                            .def_property_readonly("_size", &Tensor::DataSize, R"mydelimiter(
499                              Get tensor's data size.
500 
501                              Returns:
502                                  int, the size of tensor.
503 
504                              Examples:
505                                  >>> data = mindspore.Tensor(np.ones((2, 3)))
506                                  >>> data.size
507                                  6
508                              )mydelimiter")
509                            .def_property_readonly("_itemsize", TensorPy::GetPyItemSize, R"mydelimiter(
510                              Get the tensor's length of one element in bytes.
511 
512                              Returns:
513                                  itemsize, length of one element in bytes.
514 
515                              Examples:
516                                  >>> data = mindspore.Tensor(np.ones((2, 1), np.int32))
517                                  >>> data.itemsize
518                                  4
519                              )mydelimiter")
520                            .def_property_readonly("_nbytes", TensorPy::GetPyNBytes, R"mydelimiter(
521                              Get the tensor's total number of bytes.
522 
523                              Returns:
524                                  nbytes, total number of bytes taken by the tensor.
525 
526                              Examples:
527                                  >>> data = mindspore.Tensor(np.ones((2, 1), np.int32))
528                                  >>> data.nbytes
529                                  4
530                              )mydelimiter")
531                            .def_property_readonly("_strides", TensorPy::GetPyTupleStrides, R"mydelimiter(
532                              Get the tensor's tuple of bytes to step in each dimension
533                              when traversing an array.
534 
535                              Returns:
536                                  tuple[int], the strides of the tensor.
537 
538                              Examples:
539                                  >>> data = mindspore.Tensor(np.ones((2, 1), np.int32))
540                                  >>> data.strides
541                                  (4, 4)
542                              )mydelimiter")
543                            .def("from_numpy", TensorPy::MakeTensorOfNumpy, R"mydelimiter(
544                              Creates a Tensor from a numpy.ndarray without copy.
545 
546                              Arg:
547                                  array (numpy.ndarray): The input ndarray.
548 
549                              Returns:
550                                  Tensor, tensor with shared data to input ndarray.
551 
552                              Examples:
553                                  >>> a = np.ones((2, 3))
554                                  >>> t = mindspore.Tensor.from_numpy(a)
555                              )mydelimiter")
556                            .def("asnumpy", TensorPy::SyncAsNumpy, R"mydelimiter(
557                              Convert tensor to numpy.ndarray.
558 
559                              Returns:
560                                  numpy.ndarray.
561 
562                              Examples:
563                                  >>> data = mindspore.Tensor(np.ones((2, 3)))
564                                  >>> array = data.asnumpy()
565                                  >>> array
566                                  array([[1., 1., 1.],
567                                         [1., 1., 1.]])
568                              )mydelimiter")
569                            .def("_flush_from_cache", TensorPy::FlushFromCache, R"mydelimiter(
570                              Flush Cache data to Host if tensor is cache enable.
571 
572                              Returns:
573                                  None.
574 
575                              Examples:
576                                  >>> data = mindspore.Tensor(np.ones((2, 3)))
577                                  >>> data._flush_from_cache()
578                              )mydelimiter")
579                            .def("is_init", &Tensor::is_init, R"mydelimiter(
580                              Get tensor init_flag.
581 
582                              Returns:
583                                  bool, whether the tensor init.
584 
585                              Examples:
586                                  >>> data = mindspore.Tensor(np.ones((2, 3)))
587                                  >>> data.is_init()
588                                  False
589                              )mydelimiter")
590                            .def("set_init_flag", &Tensor::set_init_flag, R"mydelimiter(
591                              Set tensor init_flag.
592 
593                              Examples:
594                                  >>> data = mindspore.Tensor(np.ones((2, 3)))
595                                  >>> data.set_init_flag(True)
596                              )mydelimiter")
597                            .def("dim", &Tensor::DataDim, R"mydelimiter(
598                              Get tensor's data dimension.
599 
600                              Returns:
601                                  int, the dimension of tensor.
602 
603                              Examples:
604                                  >>> data = mindspore.Tensor(np.ones((2, 3)))
605                                  >>> data.dim()
606                                  2
607                              )mydelimiter")
608                            .def("assign_value_cpp", &Tensor::AssignValue, R"mydelimiter(
609                              Assign another tensor value to this.
610 
611                              Arg:
612                                  value (:class:`mindspore.tensor`): The value tensor.
613 
614                              Examples:
615                                  >>> data = mindspore.Tensor(np.ones((1, 2), np.float32))
616                                  >>> data2 = mindspore.Tensor(np.ones((2, 2), np.float32))
617                                  >>> data.assign_value(data2)
618                                  >>> data.shape
619                                  (2, 2)
620                              )mydelimiter")
621                            .def("set_dtype", &Tensor::SetDtype, R"mydelimiter(
622                               Set the tensor's data type.
623 
624                               Arg:
625                                   dtype (:class:`mindspore.dtype`): The type of output tensor.
626 
627                               Examples:
628                                   >>> data = mindspore.Tensor(np.ones((1, 2), np.float32))
629                                   >>> data.set_dtype(mindspore.int32)
630                                   mindspore.int32
631                               )mydelimiter")
632                            .def("set_cast_dtype", &Tensor::set_cast_dtype, py::arg("dtype") = nullptr)
633                            .def("data_sync", &Tensor::data_sync)
634                            .def("__str__", &Tensor::ToString)
635                            .def("__repr__", &Tensor::ToStringRepr)
636                            .def(py::pickle(
637                              [](const Tensor &t) {  // __getstate__
638                                /* Return a tuple that fully encodes the state of the object */
639                                return py::make_tuple(TensorPy::SyncAsNumpy(t));
640                              },
641                              [](const py::tuple &t) {  // __setstate__
642                                if (t.size() != 1) {
643                                  throw std::runtime_error("Invalid state!");
644                                }
645                                /* Create a new C++ instance */
646                                return TensorPy::MakeTensor(t[0].cast<py::array>());
647                              }));
648                        }));
649 }  // namespace tensor
650 }  // namespace mindspore
651