1 /**
2 * Copyright 2023 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "include/backend/py_execute_utils.h"
18
19 #include "include/common/fallback.h"
20 #include "include/common/utils/stub_tensor.h"
21 #include "include/backend/anf_runtime_algorithm.h"
22 #include "runtime/hardware/device_context_manager.h"
23 #include "plugin/device/cpu/kernel/pyexecute/py_execute_cpu_kernel.h"
24 #include "include/common/utils/convert_utils.h"
25 #include "include/common/utils/convert_utils_py.h"
26
27 namespace mindspore::pyexecute {
28 PyDataConverter py_data_convert_handler{nullptr};
29
set_pydata_converter(const PyDataConverter & pydata_converter)30 void set_pydata_converter(const PyDataConverter &pydata_converter) { py_data_convert_handler = pydata_converter; }
31
32 namespace {
33
TensorToRawMemory(const tensor::TensorPtr & tensor,DeviceAddress * const device_address)34 void TensorToRawMemory(const tensor::TensorPtr &tensor, DeviceAddress *const device_address) {
35 MS_EXCEPTION_IF_NULL(tensor);
36 MS_EXCEPTION_IF_NULL(device_address);
37 MS_EXCEPTION_IF_NULL(tensor->data_c());
38 MS_LOG(DEBUG) << "tensor:" << tensor->ToString();
39 if (tensor->Size() != device_address->GetSize()) {
40 MS_LOG(EXCEPTION) << "Invalid tensor size:" << tensor->Size() << " device tensor size:" << device_address->GetSize()
41 << " for device tensor:" << device_address;
42 }
43 if (device_address->device_name() == "CPU") {
44 MS_EXCEPTION_IF_NULL(device_address->GetMutablePtr());
45 const auto &res = memcpy_s(reinterpret_cast<char *>(device_address->GetMutablePtr()), device_address->GetSize(),
46 tensor->data_c(), device_address->GetSize());
47 if (res != EOK) {
48 MS_LOG(EXCEPTION) << "memcpy failed. res: " << res << ", for tensor:" << tensor->ToString()
49 << " size:" << device_address->GetSize();
50 }
51 } else {
52 MS_LOG(DEBUG) << "Tensor:" << tensor->ToString() << " shape:" << tensor->shape() << " type:" << tensor->data_type()
53 << " size:" << tensor->Size();
54 device_address->SyncHostToDevice(tensor->Size(), tensor->data_c());
55 }
56 }
57
ScalarToValue(const py::object & obj)58 tensor::TensorPtr ScalarToValue(const py::object &obj) {
59 ValuePtr value = nullptr;
60 if (py::isinstance<py::bool_>(obj)) {
61 value = MakeValue(py::cast<bool>(obj));
62 } else if (py::isinstance<py::int_>(obj)) {
63 value = MakeValue(py::cast<int64_t>(obj));
64 } else if (py::isinstance<py::float_>(obj)) {
65 value = MakeValue(py::cast<float>(obj));
66 } else {
67 MS_LOG(EXCEPTION) << "Invalid scalar py obj.";
68 }
69 if (value == nullptr || (!value->isa<Scalar>())) {
70 MS_LOG(EXCEPTION) << "Invalid value for obj.";
71 }
72 return ScalarToTensor(value->cast<ScalarPtr>());
73 }
74
75 template <typename T>
CheckSequenceElementSame(const py::sequence & obj)76 bool CheckSequenceElementSame(const py::sequence &obj) {
77 // Check from second element, the type of first element is determined by T.
78 for (size_t i = 1; i < py::len(obj); ++i) {
79 if (!py::isinstance<T>(obj[i])) {
80 return false;
81 }
82 }
83 return true;
84 }
85
CheckSequenceToMemory(const py::sequence & obj)86 bool CheckSequenceToMemory(const py::sequence &obj) {
87 // A sequence object can be passed to raw memory and used by other operator if:
88 // 1. The length of sequence is not empty.
89 // 2. The sequence is not nested.
90 // 3. The sequence only contains Scalar or Tensor elements.
91 // 4. All the elements in sequence should be the same.
92 if (py::len(obj) == 0) {
93 return false;
94 }
95 auto first_obj = obj[0];
96 if (py::isinstance<py::bool_>(first_obj)) {
97 return CheckSequenceElementSame<py::bool_>(obj);
98 } else if (py::isinstance<py::int_>(first_obj)) {
99 return CheckSequenceElementSame<py::int_>(obj);
100 } else if (py::isinstance<py::float_>(first_obj)) {
101 return CheckSequenceElementSame<py::float_>(obj);
102 } else if (py::isinstance<tensor::Tensor>(first_obj)) {
103 return CheckSequenceElementSame<tensor::Tensor>(obj);
104 }
105 return false;
106 }
107
SequenceToValue(const py::sequence & obj)108 tensor::TensorPtr SequenceToValue(const py::sequence &obj) {
109 if (!CheckSequenceToMemory(obj)) {
110 MS_LOG(EXCEPTION) << "Invalid py object.";
111 }
112
113 size_t obj_len = py::len(obj);
114 std::vector<ValuePtr> values;
115 for (size_t i = 0; i < obj_len; ++i) {
116 auto element_obj = obj[i];
117 if (py::isinstance<tensor::Tensor>(element_obj)) {
118 values.emplace_back(element_obj.cast<tensor::TensorPtr>());
119 } else {
120 values.emplace_back(ScalarToValue(element_obj));
121 }
122 }
123 return AnfAlgo::SequenceToTensor(std::make_shared<ValueTuple>(values));
124 }
125
IsValidObj(const py::object & obj)126 bool IsValidObj(const py::object &obj) {
127 py::gil_scoped_acquire gil_acquire;
128 return py::isinstance<tensor::Tensor>(obj) ||
129 ((py::isinstance<py::list>(obj) || py::isinstance<py::tuple>(obj)) &&
130 CheckSequenceToMemory(py::sequence(obj))) ||
131 py::isinstance<py::bool_>(obj) || py::isinstance<py::int_>(obj) || py::isinstance<py::float_>(obj);
132 }
133
GetTypeIdByAbstract(const AbstractBasePtr & abstract)134 TypeId GetTypeIdByAbstract(const AbstractBasePtr &abstract) {
135 MS_EXCEPTION_IF_NULL(abstract);
136 if (abstract->isa<abstract::AbstractScalar>()) {
137 const auto &type = abstract->BuildType();
138 MS_EXCEPTION_IF_NULL(type);
139 return type->type_id();
140 } else if (abstract->isa<abstract::AbstractTensor>()) {
141 const auto &tensor_abstract = abstract->cast<abstract::AbstractTensorPtr>();
142 MS_EXCEPTION_IF_NULL(tensor_abstract);
143 MS_EXCEPTION_IF_NULL(tensor_abstract->element());
144 const auto &type = tensor_abstract->element()->BuildType();
145 MS_EXCEPTION_IF_NULL(type);
146 return type->type_id();
147 } else {
148 MS_LOG(EXCEPTION) << "Invalid abstract:" << abstract->ToString();
149 }
150 }
151
IsValidAbstract(const abstract::AbstractBasePtr & abstract)152 bool IsValidAbstract(const abstract::AbstractBasePtr &abstract) {
153 MS_EXCEPTION_IF_NULL(abstract);
154 if (abstract->isa<abstract::AbstractScalar>() || abstract->isa<abstract::AbstractTensor>()) {
155 return true;
156 }
157 if (!abstract->isa<abstract::AbstractSequence>()) {
158 return false;
159 }
160 const auto &seq_abstract = abstract->cast<abstract::AbstractSequencePtr>();
161 MS_EXCEPTION_IF_NULL(seq_abstract);
162 const auto &sub_abstracts = seq_abstract->elements();
163 if (sub_abstracts.size() <= 1) {
164 return true;
165 }
166 if (sub_abstracts[0] == nullptr ||
167 ((!sub_abstracts[0]->isa<abstract::AbstractScalar>()) && (!sub_abstracts[0]->isa<abstract::AbstractTensor>()))) {
168 return false;
169 }
170
171 auto get_shape_vector_by_abstract = [](const AbstractBasePtr &abstract) -> ShapeVector {
172 MS_EXCEPTION_IF_NULL(abstract);
173 if (abstract->isa<abstract::AbstractScalar>()) {
174 return {};
175 } else if (abstract->isa<abstract::AbstractTensor>()) {
176 const auto &base_shape = abstract->BuildShape();
177 MS_EXCEPTION_IF_NULL(base_shape);
178 if (!base_shape->isa<abstract::Shape>()) {
179 MS_LOG(EXCEPTION) << "Invalid shape:" << base_shape->ToString() << " in abstract:" << abstract->ToString();
180 }
181 const auto &shape = base_shape->cast<abstract::ShapePtr>();
182 MS_EXCEPTION_IF_NULL(shape);
183 return shape->shape();
184 } else {
185 MS_LOG(EXCEPTION) << "Invalid abstract:" << abstract->ToString();
186 }
187 };
188
189 const auto &base_type_id = GetTypeIdByAbstract(sub_abstracts[0]);
190 const auto &base_shape_vector = get_shape_vector_by_abstract(sub_abstracts[0]);
191 for (size_t i = 1; i < sub_abstracts.size(); ++i) {
192 MS_EXCEPTION_IF_NULL(sub_abstracts[i]);
193 if (sub_abstracts[i] == nullptr ||
194 ((!sub_abstracts[i]->isa<abstract::AbstractScalar>()) &&
195 (!sub_abstracts[i]->isa<abstract::AbstractTensor>())) ||
196 base_type_id != GetTypeIdByAbstract(sub_abstracts[i]) ||
197 base_shape_vector != get_shape_vector_by_abstract(sub_abstracts[i])) {
198 return false;
199 }
200 }
201 return true;
202 }
203
GetSizeForAbstract(const abstract::AbstractBasePtr & abstract)204 size_t GetSizeForAbstract(const abstract::AbstractBasePtr &abstract) {
205 MS_EXCEPTION_IF_NULL(abstract);
206 if (abstract->isa<abstract::AbstractScalar>()) {
207 return GetTypeByte(abstract->BuildType());
208 } else if (abstract->isa<abstract::AbstractTensor>()) {
209 const auto &tensor_abstract = abstract->cast<abstract::AbstractTensorPtr>();
210 MS_EXCEPTION_IF_NULL(tensor_abstract);
211 const auto &base_shape = tensor_abstract->BuildShape();
212 MS_EXCEPTION_IF_NULL(base_shape);
213 const auto &shape = base_shape->cast<abstract::ShapePtr>();
214 MS_EXCEPTION_IF_NULL(shape);
215 const auto &shape_vector = shape->shape();
216 MS_EXCEPTION_IF_NULL(tensor_abstract->element());
217 const auto &type = tensor_abstract->element()->BuildType();
218 return std::accumulate(shape_vector.begin(), shape_vector.end(), GetTypeByte(type), std::multiplies<size_t>());
219 }
220
221 const auto &seq_abstract = abstract->cast<abstract::AbstractSequencePtr>();
222 MS_EXCEPTION_IF_NULL(seq_abstract);
223 const auto &sub_abstracts = seq_abstract->elements();
224 if (sub_abstracts.empty()) {
225 return 0;
226 }
227 return sub_abstracts.size() * GetSizeForAbstract(sub_abstracts[0]);
228 }
229 } // namespace
230
GetValueByPyObj(const py::object & obj)231 tensor::TensorPtr GetValueByPyObj(const py::object &obj) {
232 py::gil_scoped_acquire gil_acquire;
233 if (py::isinstance<tensor::Tensor>(obj)) {
234 return obj.cast<tensor::TensorPtr>();
235 } else if (py::isinstance<py::list>(obj) || py::isinstance<py::tuple>(obj)) {
236 return SequenceToValue(py::sequence(obj));
237 } else if (py::isinstance<py::bool_>(obj) || py::isinstance<py::int_>(obj) || py::isinstance<py::float_>(obj)) {
238 return ScalarToValue(obj);
239 }
240 MS_LOG(EXCEPTION) << "Invalid object:" << obj;
241 }
242
GenerateAbstractFromPyObject(const py::object & obj)243 abstract::AbstractBasePtr GenerateAbstractFromPyObject(const py::object &obj) {
244 // This function will be moved to runtime compile pass later.
245 py::gil_scoped_acquire gil_acquire;
246 if (py::isinstance<tensor::Tensor>(obj) || IsStubTensor(obj)) {
247 const auto &tensor = IsStubTensor(obj) ? ConvertStubTensor(obj) : obj.cast<tensor::TensorPtr>();
248 MS_EXCEPTION_IF_NULL(tensor);
249 MS_LOG(DEBUG) << "tensor:" << tensor->ToString() << " is stub tensor:" << IsStubTensor(obj);
250 return tensor->ToAbstract();
251 }
252
253 if (py::isinstance<py::bool_>(obj)) {
254 return MakeValue(py::cast<bool>(obj))->ToAbstract();
255 } else if (py::isinstance<py::int_>(obj)) {
256 return MakeValue(py::cast<int64_t>(obj))->ToAbstract();
257 } else if (py::isinstance<py::float_>(obj)) {
258 return MakeValue(py::cast<float>(obj))->ToAbstract();
259 }
260
261 // obj is tuple will add later.
262 if (py::isinstance<py::list>(obj) || py::isinstance<py::tuple>(obj)) {
263 ValuePtr converted_res = nullptr;
264 MS_EXCEPTION_IF_NULL(py_data_convert_handler);
265 if (py_data_convert_handler(obj, &converted_res)) {
266 auto ret_list = converted_res->ToAbstract();
267 return fallback::GenerateAbstractSequence(ret_list->BuildShape(), ret_list->BuildType(), false);
268 }
269 }
270 ShapeVector shape = {1};
271 return std::make_shared<abstract::AbstractTensor>(TypeIdToType(TypeId::kNumberTypeFloat64), shape);
272 }
273
UserDataToRawMemory(DeviceAddress * const device_address)274 void UserDataToRawMemory(DeviceAddress *const device_address) {
275 MS_EXCEPTION_IF_NULL(device_address);
276 MS_EXCEPTION_IF_NULL(device_address->user_data());
277 MS_LOG(DEBUG) << "Start sync data from device address:" << device_address
278 << " user data:" << device_address->user_data();
279 const auto &user_data_obj =
280 device_address->user_data()->get<kernel::PyExecuteOutputUserData>(kernel::PyExecuteOutputUserData::key);
281 MS_EXCEPTION_IF_NULL(user_data_obj);
282 const auto &obj = user_data_obj->obj;
283 if (!IsValidObj(obj)) {
284 return;
285 }
286 const auto abstract = GenerateAbstractFromPyObject(obj);
287 MS_EXCEPTION_IF_NULL(abstract);
288 if (!IsValidAbstract(abstract)) {
289 MS_LOG(DEBUG) << "Invalid abstract:" << abstract->ToString();
290 return;
291 }
292 device_address->SetSize(GetSizeForAbstract(abstract));
293
294 MS_LOG(DEBUG) << "Infer abstract:" << abstract->ToString() << " size:" << device_address->GetSize()
295 << " device name:" << device_address->device_name() << " device id:" << device_address->device_id();
296
297 const auto &device_context = device::DeviceContextManager::GetInstance().GetOrCreateDeviceContext(
298 {device_address->device_name(), device_address->device_id()});
299 MS_EXCEPTION_IF_NULL(device_context);
300 MS_EXCEPTION_IF_NULL(device_context->device_res_manager_);
301 if (device_address->GetPtr() != nullptr) {
302 device_context->device_res_manager_->FreeMemory(device_address);
303 }
304 device_address->set_ptr(nullptr);
305 if (!device_context->device_res_manager_->AllocateMemory(device_address)) {
306 MS_LOG(ERROR) << "Device(id:" << std::to_string(device_context->device_context_key().device_id_)
307 << ") memory isn't enough and alloc failed, alloc size: " + std::to_string(device_address->GetSize());
308 return;
309 }
310 tensor::TensorPtr tensor = GetValueByPyObj(obj);
311 TensorToRawMemory(tensor, device_address);
312 }
313 } // namespace mindspore::pyexecute
314