1 /** 2 * Copyright 2020-2021 Huawei Technologies Co., Ltd 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_NPU_NPU_CONVERTER_UTILS_H_ 18 #define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_NPU_NPU_CONVERTER_UTILS_H_ 19 #include <string> 20 #include <memory> 21 #include <vector> 22 #ifdef ENABLE_ARM 23 #include <arm_neon.h> 24 #endif 25 #include "schema/ops_generated.h" 26 #include "include/graph/tensor.h" 27 #include "include/graph/op/array_defs.h" 28 #include "include/api/types.h" 29 #include "include/api/data_type.h" 30 #include "include/graph/op/all_ops.h" 31 #include "src/common/log_adapter.h" 32 #include "nnacl/op_base.h" 33 34 namespace mindspore::lite { 35 enum NCHW_SHAPE { NCHW_INVALID = -1, NCHW_N = 0, NCHW_C = 1, NCHW_H = 2, NCHW_W = 3 }; 36 enum NHWC_SHAPE { NHWC_N = 0, NHWC_H = 1, NHWC_W = 2, NHWC_C = 3 }; 37 inline const std::vector<int> NHWC2NCHW_PERM = {0, 3, 1, 2}; 38 inline const std::vector<int> NCHW2NHWC_PERM = {0, 2, 3, 1}; 39 40 enum NPU_ACTIVATION_MODE { 41 ACTIVATION_INVALID = -1, 42 SIGMOID = 0, 43 RELU = 1, 44 TANH = 2, 45 CLIPPED_RELU = 3, 46 ELU = 4, 47 P_RELU = 5, 48 ABS = 6, 49 RELU1 = 7, 50 SOFTSIGN = 8, 51 SOFTPLUS = 9, 52 HARD_SIGMOID = 10, 53 THRESHOLD_RELU = 11, 54 SELU = 12, 55 LINEAR = 13, 56 RELU6 = 14, 57 GELU = 15, 58 }; 59 60 enum PAD { 61 PAD_UP = 0, 62 PAD_DOWN = 1, 63 PAD_LEFT = 2, 64 PAD_RIGHT = 3, 65 }; 66 67 enum NPU_PAD_MODE { 68 PAD_VALID = 5, 69 PAD_SAME = 6, 70 }; 71 72 #ifdef ENABLE_ARM 73 void Float32ToFloat16(const float *__restrict input, float16_t *__restrict output, int number); 74 75 void Float16ToFloat32(const float16_t *__restrict input, float *__restrict output, int number); 76 #endif 77 78 std::shared_ptr<ge::Tensor> ConverterToNPUTensor(mindspore::MSTensor src, bool is_expand_4d = false); 79 80 hiai::op::Data *ConverterToNPUData(const mindspore::MSTensor &src, const std::string &name); 81 82 ge::Format ConverterToNPUFormat(schema::Format format); 83 84 ge::DataType ConverterToNPUDataType(DataType type_id); 85 86 ge::Shape ConverterToNPUShape(const std::vector<int64_t> &src_shape, bool is_expand_4d = false); 87 88 int ConverterToNPUEltwiseMode(schema::EltwiseMode mode); 89 90 int ConverterToNPUActivationMode(schema::ActivationType type); 91 92 int TransFormAxis(int axis); 93 94 template <typename T> 95 hiai::op::Const *GetNPUConst(const uint8_t *const_data, const std::vector<int64_t> &shape, const ge::DataType data_type, 96 std::string name = "const", bool is_expand_4d = false) { 97 MS_CHECK_TRUE_MSG(const_data != nullptr, nullptr, "Const data can not be nullptr."); 98 int element_num = 1; 99 if (!shape.empty()) { 100 for (size_t i = 0; i < shape.size(); i++) { 101 MS_CHECK_GT(shape.at(i), 0, nullptr); 102 MS_CHECK_INT_MUL_NOT_OVERFLOW(element_num, shape.at(i), nullptr); 103 element_num *= shape.at(i); 104 } 105 } 106 ge::TensorDesc const_tensor_desc(ConverterToNPUShape(shape, is_expand_4d), ge::FORMAT_NCHW, data_type); 107 ge::TensorPtr const_tensor = std::make_shared<hiai::Tensor>(const_tensor_desc); 108 const_tensor->SetData(const_data, element_num * sizeof(T)); 109 auto const_op = new (std::nothrow) hiai::op::Const(name); 110 if (const_op == nullptr) { 111 MS_LOG(ERROR) << "New Const op failed."; 112 return const_op; 113 } 114 const_op->set_attr_value(const_tensor); 115 return const_op; 116 } 117 } // namespace mindspore::lite 118 #endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_NPU_NPU_CONVERTER_UTILS_H_ 119