• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2022 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "c_api/src/utils.h"
18 #include "abstract/ops/primitive_infer_map.h"
19 #include "c_api/src/helper.h"
20 #include "frontend/operator/ops_front_infer_function.h"
21 #include "backend/operator/ops_backend_infer_function.h"
22 
ConvertConstScalarInputToTensor(const AnfNodePtr & input_node)23 void ConvertConstScalarInputToTensor(const AnfNodePtr &input_node) {
24   MS_EXCEPTION_IF_NULL(input_node);
25   if (!input_node->isa<ValueNodeImpl>()) {
26     return;
27   }
28   auto value_node = input_node->cast<ValueNodePtr>();
29   MS_EXCEPTION_IF_NULL(value_node);
30   auto value = value_node->value();
31   MS_EXCEPTION_IF_NULL(value);
32   if (!value->isa<ScalarImpl>()) {
33     return;
34   }
35   TensorPtr tensor_ptr = ScalarToTensor(value->cast<ScalarPtr>());
36   if (tensor_ptr == nullptr) {
37     MS_LOG(WARNING) << "Create tensor of" << input_node->DebugString() << "failed";
38     return;
39   }
40   value_node->set_value(tensor_ptr);
41   value_node->set_abstract(tensor_ptr->ToAbstract());
42 }
43 
ConvertOutputToTensor(const mindspore::BaseRef & output)44 std::vector<TensorPtr> ConvertOutputToTensor(const mindspore::BaseRef &output) {
45   std::vector<TensorPtr> ref_outputs{};
46   if (mindspore::utils::isa<mindspore::VectorRef>(output)) {
47     auto vec_ref = mindspore::utils::cast<mindspore::VectorRef>(output);
48     for (const auto &item : vec_ref) {
49       // for multiple outputs, ascend will return a VectorRef of VectorRef.
50       const std::vector<TensorPtr> &item_out = ConvertOutputToTensor(item);
51       (void)ref_outputs.insert(ref_outputs.end(), item_out.begin(), item_out.end());
52     }
53   } else if (mindspore::utils::isa<TensorPtr>(output)) {
54     auto tensor = std::dynamic_pointer_cast<TensorImpl>(output.copy());
55     tensor->data_sync();
56     ref_outputs.push_back(tensor);
57   } else if (mindspore::utils::isa<ScalarPtr>(output)) {
58     auto value = mindspore::utils::cast<ScalarPtr>(output);
59     auto tensor = ScalarToTensor(value->cast<ScalarPtr>());
60     ref_outputs.push_back(tensor);
61   } else {
62     MS_LOG(ERROR) << "Convert output to tensor failed, unrecognized output type: " << output.ToString();
63   }
64   return ref_outputs;
65 }
66 
OpSetAttrs(ResMgrHandle res_mgr,const PrimitivePtr & prim,const char * const * attr_names,ValueHandle attrs[],size_t attr_num)67 STATUS OpSetAttrs(ResMgrHandle res_mgr, const PrimitivePtr &prim, const char *const *attr_names, ValueHandle attrs[],
68                   size_t attr_num) {
69   AttrMap attr_map{};
70   for (size_t i = 0; i < attr_num; ++i) {
71     if (attr_names[i] == nullptr) {
72       MS_LOG(ERROR) << "Input array [attr_names] has nullptr element, index: " << i;
73       return RET_NULL_PTR;
74     }
75     auto value = GetSrcPtr<ValuePtr>(res_mgr, attrs[i]);
76     if (value == nullptr) {
77       MS_LOG(ERROR) << "Get attribute's source pointer failed, attribute index: " << i;
78       return RET_NULL_PTR;
79     }
80     std::string name(attr_names[i]);
81     auto iter = kOpAttrNameAdaptMap.find(name);
82     if (iter != kOpAttrNameAdaptMap.end()) {
83       attr_map[iter->second] = value;
84     }
85     attr_map[name] = value;
86   }
87   (void)prim->SetAttrs(attr_map);
88   return RET_OK;
89 }
90 
ConvertOutputToTensor(const ValuePtr & output)91 std::vector<TensorPtr> ConvertOutputToTensor(const ValuePtr &output) {
92   std::vector<TensorPtr> tensor_outputs{};
93   if (output->isa<ValueSequenceImpl>()) {
94     auto value_sequeue = output->cast<ValueSequencePtr>();
95     for (const auto &item : value_sequeue->value()) {
96       // for multiple outputs, ascend will return a tuple of ValuePtr.
97       const std::vector<TensorPtr> &item_out = ConvertOutputToTensor(item);
98       (void)tensor_outputs.insert(tensor_outputs.end(), item_out.begin(), item_out.end());
99     }
100   } else if (output->isa<TensorImpl>()) {
101     auto tensor = output->cast<TensorPtr>();
102     tensor->data_sync();
103     tensor_outputs.push_back(tensor);
104   } else if (output->isa<ScalarImpl>()) {
105     auto tensor = ScalarToTensor(output->cast<ScalarPtr>());
106     tensor_outputs.push_back(tensor);
107   } else {
108     MS_LOG(ERROR) << "Convert output to tensor failed, unrecognized output type: " << output->type_name();
109   }
110   return tensor_outputs;
111 }
112 
BuildShape(int64_t ** out_shapes,size_t * out_dims,size_t out_num)113 std::vector<BaseShapePtr> BuildShape(int64_t **out_shapes, size_t *out_dims, size_t out_num) {
114   MS_EXCEPTION_IF_NULL(out_shapes);
115   MS_EXCEPTION_IF_NULL(out_dims);
116   std::vector<BaseShapePtr> shape_list;
117   if (out_num == 1) {
118     int64_t *shape = out_shapes[0];
119     ShapeVector shape_vec(shape, shape + out_dims[0]);
120     auto infer_shape = std::make_shared<Shape>(shape_vec);
121     (void)shape_list.emplace_back(infer_shape);
122   } else {
123     for (size_t i = 0; i < out_num; i++) {
124       int64_t *shape = out_shapes[i];
125       ShapeVector shape_vec(shape, shape + out_dims[i]);
126       auto each_shape = std::make_shared<Shape>(shape_vec);
127       (void)shape_list.emplace_back(each_shape);
128     }
129   }
130   return shape_list;
131 }
132 
BuildType(const DataTypeC * out_dtypes,size_t out_num)133 std::vector<TypePtr> BuildType(const DataTypeC *out_dtypes, size_t out_num) {
134   MS_EXCEPTION_IF_NULL(out_dtypes);
135   std::vector<TypePtr> type_list;
136   if (out_num == 1) {
137     DataTypeC dtype = out_dtypes[0];
138     auto cxx_type = mindspore::TypeId(dtype);
139     auto infer_type = mindspore::TypeIdToType(cxx_type);
140     (void)type_list.emplace_back(infer_type);
141   } else {
142     for (size_t i = 0; i < out_num; i++) {
143       DataTypeC dtype = out_dtypes[i];
144       auto cxx_type = mindspore::TypeId(dtype);
145       auto type_val = mindspore::TypeIdToType(cxx_type);
146       (void)type_list.emplace_back(type_val);
147     }
148   }
149   return type_list;
150 }
151 
BuildAbstract(std::vector<BaseShapePtr> shapes,std::vector<TypePtr> types)152 AbstractBasePtr BuildAbstract(std::vector<BaseShapePtr> shapes, std::vector<TypePtr> types) {
153   MS_EXCEPTION_IF_CHECK_FAIL(!shapes.empty(), "The size of shapes is empty!");
154   MS_EXCEPTION_IF_CHECK_FAIL(!types.empty(), "The size of types is empty!");
155   MS_EXCEPTION_IF_CHECK_FAIL(shapes.size() == types.size(), "The size of shapes and types must be equal!");
156   if (shapes.size() == 1) {
157     auto shape = shapes[0]->cast<ShapePtr>();
158     MS_EXCEPTION_IF_NULL(shape);
159     auto type = types[0];
160     MS_EXCEPTION_IF_NULL(type);
161     auto shape_vec = shape->shape();
162     // if the size of shape list is empty, return an scalar abstract
163     if (shape_vec.empty() && (!type->isa<mindspore::TensorType>())) {
164       AbstractScalarPtr abs_scalar = std::make_shared<AbstractScalarImpl>(mindspore::kValueAny, type);
165       return abs_scalar;
166     }
167     return MakeAbstractTensor(shape, type);
168   } else {
169     mindspore::abstract::AbstractBasePtrList ptr_list;
170     for (size_t i = 0; i < shapes.size(); ++i) {
171       auto shape = shapes[i];
172       MS_EXCEPTION_IF_NULL(shape);
173       auto type = types[i];
174       MS_EXCEPTION_IF_NULL(type);
175       auto tensor_abs = BuildAbstract({shape}, {type});
176       (void)ptr_list.emplace_back(tensor_abs);
177     }
178     return std::make_shared<AbstractTupleImpl>(ptr_list);
179   }
180 }
181 
GetAbstract(const TypePtr & type_ptr,const int64_t shape[],size_t shape_size,bool is_param)182 AbstractBasePtr GetAbstract(const TypePtr &type_ptr, const int64_t shape[], size_t shape_size, bool is_param) {
183   if (shape == nullptr) {
184     if (shape_size == 0) {
185       if (is_param) {
186         ShapeVector shape_vec{};
187         return std::make_shared<AbstractTensorImpl>(type_ptr, shape_vec);
188       }
189       return std::make_shared<AbstractScalarImpl>(type_ptr);
190     } else {
191       MS_LOG(ERROR) << "Input Handle [shape_size] should >= 0.";
192       return nullptr;
193     }
194   }
195   if (shape[0] == 0 && shape_size == 1) {
196     ShapeVector shape_vec{};
197     return std::make_shared<AbstractTensorImpl>(type_ptr, shape_vec);
198   }
199   ShapeVector shape_vec(shape, shape + shape_size);
200   return std::make_shared<AbstractTensorImpl>(type_ptr, shape_vec);
201 }
202 
OpInferShapeAndType(const PrimitivePtr & prim,const mindspore::AbstractBasePtrList & args_abs_list)203 AbstractBasePtr OpInferShapeAndType(const PrimitivePtr &prim, const mindspore::AbstractBasePtrList &args_abs_list) {
204   MS_EXCEPTION_IF_NULL(prim);
205   auto abstract_opt = mindspore::abstract::InferAbstractByFuncImpl(prim, args_abs_list);
206   if (abstract_opt.has_value()) {
207     return abstract_opt.value();
208   }
209 
210   auto front_eval_impl = mindspore::abstract::GetFrontendPrimitiveInferImpl(prim);
211   if (front_eval_impl.has_value()) {
212     auto infer = front_eval_impl.value();
213     MS_EXCEPTION_IF_CHECK_FAIL(infer.IsImplInferShapeAndType(), "There is no infer-abstract implement!");
214     auto abs = infer.InferShapeAndType(nullptr, prim, args_abs_list);
215     return abs;
216   }
217   auto back_eval_impl = mindspore::abstract::GetBackendPrimitiveInferImpl(prim);
218   if (back_eval_impl.has_value()) {
219     auto infer = back_eval_impl.value();
220     MS_EXCEPTION_IF_CHECK_FAIL(infer.IsImplInferShapeAndType(), "There is no infer-abstract implement!");
221     auto abs = infer.InferShapeAndType(nullptr, prim, args_abs_list);
222     return abs;
223   }
224   MS_LOG(EXCEPTION) << "Get infer function failed, the operator has not infer shape of infer type function yet, "
225                        "primitive name:"
226                     << prim->name() << " primitive type:" << prim->type_name();
227 }
228 
CheckCustomOpInfo(const CustomOpInfo & info)229 STATUS CheckCustomOpInfo(const CustomOpInfo &info) {
230   MS_ERROR_IF_FALSE_W_RET_N_LOG(info.func_name != nullptr, RET_ERROR, "The func_name of custom op must be specified!");
231   MS_ERROR_IF_FALSE_W_RET_N_LOG(info.func_type != nullptr, RET_ERROR, "The func_type of custom op must be specified!");
232   MS_ERROR_IF_FALSE_W_RET_N_LOG(info.target != nullptr, RET_ERROR, "The target of custom op must be specified!");
233   MS_ERROR_IF_FALSE_W_RET_N_LOG(info.input_names != nullptr, RET_ERROR,
234                                 "The input_names of custom op must be specified!");
235   MS_ERROR_IF_FALSE_W_RET_N_LOG(info.output_names != nullptr, RET_ERROR,
236                                 "The output_names of custom op must be specified!");
237   MS_ERROR_IF_FALSE_W_RET_N_LOG(info.input_num > 0, RET_ERROR, "The input_num of custom op must be a positive value!");
238   MS_ERROR_IF_FALSE_W_RET_N_LOG(info.output_num > 0, RET_ERROR,
239                                 "The output_num of custom op must be a positive value!");
240   MS_ERROR_IF_TRUE_W_RET_N_LOG(info.attr_num < 0, RET_ERROR, "The attr_num of custom op must be non-negative!");
241   MS_ERROR_IF_TRUE_W_RET_N_LOG(info.dtype_infer_func == nullptr && info.output_dtypes == nullptr, RET_ERROR,
242                                "Either dtype infer function or output shape must be specified!");
243   MS_ERROR_IF_TRUE_W_RET_N_LOG(info.dtype_infer_func != nullptr && info.output_dtypes != nullptr, RET_ERROR,
244                                "Only one should be specified between dtype infer function and output shape!");
245   MS_ERROR_IF_TRUE_W_RET_N_LOG(info.shape_infer_func == nullptr && info.output_shapes == nullptr, RET_ERROR,
246                                "Either shape infer function or output shape must be specified!");
247   MS_ERROR_IF_TRUE_W_RET_N_LOG(info.shape_infer_func != nullptr && info.output_shapes != nullptr, RET_ERROR,
248                                "Only one should be specified between shape infer function and output shape!");
249   MS_ERROR_IF_TRUE_W_RET_N_LOG(info.output_shapes != nullptr && info.output_dims == nullptr, RET_ERROR,
250                                "Output dims must be specified if output_shapes are given!");
251   MS_ERROR_IF_TRUE_W_RET_N_LOG(info.attr_num == 0 && (info.attr_names != nullptr || info.attr_values != nullptr),
252                                RET_ERROR, "The attr_name and attr_values must be nullptr if attr_num is 0!");
253   MS_ERROR_IF_TRUE_W_RET_N_LOG(info.attr_num != 0 && (info.attr_names == nullptr || info.attr_values == nullptr),
254                                RET_ERROR, "The attr_name and attr_values must be specified if attr_num is non-zero!");
255   MS_ERROR_IF_TRUE_W_RET_N_LOG(info.dtype_formats != nullptr && info.dtype_formats_num == 0, RET_ERROR,
256                                "The dtype_formats_num of custom op must be none-zero if dtype_formats is specified!");
257   MS_ERROR_IF_TRUE_W_RET_N_LOG(info.dtype_formats == nullptr && info.dtype_formats_num != 0, RET_ERROR,
258                                "The dtype_formats_num of custom op must be zero if dtype_formats is not specified!");
259   MS_ERROR_IF_TRUE_W_RET_N_LOG(std::string(info.func_name).find(".so:") == std::string::npos, RET_ERROR,
260                                "so file path and function name must be provided in func_name!");
261   return RET_OK;
262 }
263 
ConvertOpInfoToJson(const CustomOpInfo & info)264 nlohmann::json ConvertOpInfoToJson(const CustomOpInfo &info) {
265   nlohmann::json obj;
266   obj["attr"] = {};
267   std::string target = info.target;
268   obj["target"] = target;
269   obj["op_name"] = "Custom" + std::string(info.func_name);
270   obj["fusion_tyoe"] = "OPAQUE";
271   if (info.dtype_formats != nullptr) {
272     std::vector<std::vector<std::string>> dtype_formats;
273     for (size_t i = 0; i < info.dtype_formats_num; i++) {
274       for (size_t j = 0; j < info.input_num + info.output_num; j++) {
275         auto iter = kDTypeFmtEnumToStrMap.find(info.dtype_formats[i][j]);
276         if (iter == kDTypeFmtEnumToStrMap.end()) {
277           MS_LOG(ERROR) << "Unsupported DTypeFormat: " << info.dtype_formats[i][j];
278           return {};
279         }
280         dtype_formats.push_back(iter->second);
281       }
282     }
283     obj["dtype_format"] = {dtype_formats};
284   }
285   std::vector<nlohmann::json> js_inputs;
286   for (size_t i = 0; i < info.input_num; i++) {
287     nlohmann::json js_input;
288     js_input["index"] = i;
289     js_input["name"] = std::string(info.input_names[i]);
290     js_input["paramType"] = "required";
291     js_inputs.push_back(js_input);
292   }
293   obj["inputs"] = js_inputs;
294   std::vector<nlohmann::json> js_outputs;
295   for (size_t i = 0; i < info.output_num; i++) {
296     nlohmann::json js_output;
297     js_output["index"] = i;
298     js_output["name"] = std::string(info.output_names[i]);
299     js_output["paramType"] = "required";
300     js_outputs.push_back(js_output);
301   }
302   obj["outputs"] = js_outputs;
303   auto aot_imply_type = target == "Ascend" ? "BiSheng" : target;
304   const std::map<std::string, std::string> func_type_to_imply_type = {
305     {"hybrid", "AKG"},  {"akg", "AKG"},    {"tbe", "TBE"},         {"aicpu", "AICPU"},
306     {"pyfunc", target}, {"julia", target}, {"aot", aot_imply_type}};
307   auto iter = func_type_to_imply_type.find(std::string(info.func_type));
308   if (iter == func_type_to_imply_type.end()) {
309     MS_LOG(ERROR) << "Unsupported function type: " << std::string(info.func_type);
310     return {};
311   }
312   auto imply_type = iter->second;
313   obj["imply_type"] = imply_type;
314   return obj;
315 }
316 
GetMaxMallocSize()317 size_t GetMaxMallocSize() {
318   size_t max_malloc_size = 0;
319 #if defined(_MSC_VER) || defined(_WIN32)
320   MEMORYSTATUSEX status;
321   status.dwLength = sizeof(status);
322   GlobalMemoryStatusEx(&status);
323   max_malloc_size = static_cast<size_t>(status.ullTotalPhys);
324 #else
325   max_malloc_size = static_cast<size_t>(sysconf(_SC_PHYS_PAGES)) * static_cast<size_t>(sysconf(_SC_PAGESIZE));
326 #endif
327   return max_malloc_size;
328 }
329