• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "napi/native_api.h"
17 #include <hilog/log.h>
18 #include <rawfile/raw_file_manager.h>
19 #include <iostream>
20 #include <cstdlib>
21 #include <mindspore/types.h>
22 #include <mindspore/model.h>
23 #include <mindspore/context.h>
24 #include <mindspore/status.h>
25 #include <mindspore/tensor.h>
26 #include <sstream>
27 
28 #define LOGI(...) ((void)OH_LOG_Print(LOG_APP, LOG_INFO, LOG_DOMAIN, "[MSLiteNapi]", __VA_ARGS__))
29 #define LOGD(...) ((void)OH_LOG_Print(LOG_APP, LOG_DEBUG, LOG_DOMAIN, "[MSLiteNapi]", __VA_ARGS__))
30 #define LOGW(...) ((void)OH_LOG_Print(LOG_APP, LOG_WARN, LOG_DOMAIN, "[MSLiteNapi]", __VA_ARGS__))
31 #define LOGE(...) ((void)OH_LOG_Print(LOG_APP, LOG_ERROR, LOG_DOMAIN, "[MSLiteNapi]", __VA_ARGS__))
32 
33 constexpr int K_NUM_PRINT_OF_OUT_DATA = 20;
34 
FillInputTensor(OH_AI_TensorHandle input,std::vector<float> input_data)35 int FillInputTensor(OH_AI_TensorHandle input, std::vector<float> input_data)
36 {
37     if (OH_AI_TensorGetDataType(input) == OH_AI_DATATYPE_NUMBERTYPE_FLOAT32) {
38         float *data = (float *)OH_AI_TensorGetMutableData(input);
39         for (size_t i = 0; i < OH_AI_TensorGetElementNum(input); i++) {
40             data[i] = input_data[i];
41         }
42         return OH_AI_STATUS_SUCCESS;
43     } else {
44         return OH_AI_STATUS_LITE_ERROR;
45     }
46 }
47 
ReadModelFile(NativeResourceManager * nativeResourceManager,const std::string & modelName,size_t * modelSize)48 void *ReadModelFile(NativeResourceManager *nativeResourceManager, const std::string &modelName, size_t *modelSize)
49 {
50     auto rawFile = OH_ResourceManager_OpenRawFile(nativeResourceManager, modelName.c_str());
51     if (rawFile == nullptr) {
52         LOGE("MS_LITE_ERR: Open model file failed");
53         return nullptr;
54     }
55     long fileSize = OH_ResourceManager_GetRawFileSize(rawFile);
56     if (fileSize <= 0) {
57         LOGE("MS_LITE_ERR: FileSize not correct");
58     }
59     void *modelBuffer = malloc(fileSize);
60     if (modelBuffer == nullptr) {
61         LOGE("MS_LITE_ERR: OH_ResourceManager_ReadRawFile failed");
62     }
63     int ret = OH_ResourceManager_ReadRawFile(rawFile, modelBuffer, fileSize);
64     if (ret == 0) {
65         LOGI("MS_LITE_LOG: OH_ResourceManager_ReadRawFile failed");
66         OH_ResourceManager_CloseRawFile(rawFile);
67         return nullptr;
68     }
69     OH_ResourceManager_CloseRawFile(rawFile);
70     *modelSize = fileSize;
71     return modelBuffer;
72 }
73 
DestroyModelBuffer(void ** buffer)74 void DestroyModelBuffer(void **buffer)
75 {
76     if (buffer == nullptr) {
77         return;
78     }
79     free(*buffer);
80     *buffer = nullptr;
81 }
82 
CreateMSLiteModel(void * modelBuffer,size_t modelSize)83 OH_AI_ModelHandle CreateMSLiteModel(void *modelBuffer, size_t modelSize)
84 {
85     // Set executing context for model.
86     auto context = OH_AI_ContextCreate();
87     if (context == nullptr) {
88         DestroyModelBuffer(&modelBuffer);
89         LOGE("MS_LITE_ERR: Create MSLite context failed.\n");
90         return nullptr;
91     }
92     auto cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
93     OH_AI_DeviceInfoSetEnableFP16(cpu_device_info, true);
94     OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
95 
96     // Create model
97     auto model = OH_AI_ModelCreate();
98     if (model == nullptr) {
99         DestroyModelBuffer(&modelBuffer);
100         LOGE("MS_LITE_ERR: Allocate MSLite Model failed.\n");
101         return nullptr;
102     }
103 
104     // Build model object
105     auto build_ret = OH_AI_ModelBuild(model, modelBuffer, modelSize, OH_AI_MODELTYPE_MINDIR, context);
106     DestroyModelBuffer(&modelBuffer);
107     if (build_ret != OH_AI_STATUS_SUCCESS) {
108         OH_AI_ModelDestroy(&model);
109         LOGE("MS_LITE_ERR: Build MSLite model failed.\n");
110         return nullptr;
111     }
112     LOGI("MS_LITE_LOG: Build MSLite model success.\n");
113     return model;
114 }
115 
RunMSLiteModel(OH_AI_ModelHandle model,std::vector<float> input_data)116 int RunMSLiteModel(OH_AI_ModelHandle model, std::vector<float> input_data)
117 {
118     // Set input data for model.
119     auto inputs = OH_AI_ModelGetInputs(model);
120     auto ret = FillInputTensor(inputs.handle_list[0], input_data);
121     if (ret != OH_AI_STATUS_SUCCESS) {
122         LOGE("MS_LITE_ERR: RunMSLiteModel set input error.\n");
123         return OH_AI_STATUS_LITE_ERROR;
124     }
125 
126     // Get model output.
127     auto outputs = OH_AI_ModelGetOutputs(model);
128 
129     // Predict model.
130     auto predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
131     if (predict_ret != OH_AI_STATUS_SUCCESS) {
132         OH_AI_ModelDestroy(&model);
133         LOGE("MS_LITE_ERR: MSLite Predict error.\n");
134         return OH_AI_STATUS_LITE_ERROR;
135     }
136     LOGI("MS_LITE_LOG: Run MSLite model Predict success.\n");
137 
138     // Print output tensor data.
139     LOGI("MS_LITE_LOG: Get model outputs:\n");
140     for (size_t i = 0; i < outputs.handle_num; i++) {
141         auto tensor = outputs.handle_list[i];
142         LOGI("MS_LITE_LOG: - Tensor %{public}d name is: %{public}s.\n", static_cast<int>(i),
143              OH_AI_TensorGetName(tensor));
144         LOGI("MS_LITE_LOG: - Tensor %{public}d size is: %{public}d.\n", static_cast<int>(i),
145              (int)OH_AI_TensorGetDataSize(tensor));
146         LOGI("MS_LITE_LOG: - Tensor data is:\n");
147         auto out_data = reinterpret_cast<const float *>(OH_AI_TensorGetData(tensor));
148         std::stringstream outStr;
149         for (int i = 0; (i < OH_AI_TensorGetElementNum(tensor)) && (i <= K_NUM_PRINT_OF_OUT_DATA); i++) {
150             outStr << out_data[i] << " ";
151         }
152         LOGI("MS_LITE_LOG: %{public}s", outStr.str().c_str());
153     }
154     return OH_AI_STATUS_SUCCESS;
155 }
156 
RunDemo(napi_env env,napi_callback_info info)157 static napi_value RunDemo(napi_env env, napi_callback_info info)
158 {
159     // run demo
160     napi_value error_ret;
161     napi_create_int32(env, -1, &error_ret);
162     // 传入数据处理
163     size_t argc = 2;
164     napi_value argv[2] = {nullptr};
165     napi_get_cb_info(env, info, &argc, argv, nullptr, nullptr);
166     bool isArray = false;
167     napi_is_array(env, argv[0], &isArray);
168     uint32_t length = 0;
169     // 获取数组的长度
170     napi_get_array_length(env, argv[0], &length);
171     std::vector<float> input_data;
172     double param = 0;
173     for (int i = 0; i < length; i++) {
174         napi_value value;
175         napi_get_element(env, argv[0], i, &value);
176         napi_get_value_double(env, value, &param);
177         input_data.push_back(static_cast<float>(param));
178     }
179     std::stringstream outstr;
180     for (int i = 0; i < K_NUM_PRINT_OF_OUT_DATA; i++) {
181         outstr << input_data[i] << " ";
182     }
183     // Read model file
184     const std::string modelName = "mobilenetv2.ms";
185     size_t modelSize;
186     auto resourcesManager = OH_ResourceManager_InitNativeResourceManager(env, argv[1]);
187     auto modelBuffer = ReadModelFile(resourcesManager, modelName, &modelSize);
188     if (modelBuffer == nullptr) {
189         return error_ret;
190     }
191     auto model = CreateMSLiteModel(modelBuffer, modelSize);
192     int ret = RunMSLiteModel(model, input_data);
193     if (ret != OH_AI_STATUS_SUCCESS) {
194         OH_AI_ModelDestroy(&model);
195         return error_ret;
196     }
197     napi_value out_data;
198     napi_create_array(env, &out_data);
199     auto outputs = OH_AI_ModelGetOutputs(model);
200     OH_AI_TensorHandle output_0 = outputs.handle_list[0];
201     float *output0Data = reinterpret_cast<float *>(OH_AI_TensorGetMutableData(output_0));
202     for (size_t i = 0; i < OH_AI_TensorGetElementNum(output_0); i++) {
203         napi_value element;
204         napi_create_double(env, static_cast<double>(output0Data[i]), &element);
205         napi_set_element(env, out_data, i, element);
206     }
207     OH_AI_ModelDestroy(&model);
208     return out_data;
209 }
210 
211 EXTERN_C_START
Init(napi_env env,napi_value exports)212 static napi_value Init(napi_env env, napi_value exports)
213 {
214     napi_property_descriptor desc[] = {{"runDemo", nullptr, RunDemo, nullptr, nullptr, nullptr, napi_default, nullptr}};
215     napi_define_properties(env, exports, sizeof(desc) / sizeof(desc[0]), desc);
216     return exports;
217 }
218 EXTERN_C_END
219 
220 static napi_module demoModule = {
221     .nm_version = 1,
222     .nm_flags = 0,
223     .nm_filename = nullptr,
224     .nm_register_func = Init,
225     .nm_modname = "entry",
226     .nm_priv = ((void *)0),
227     .reserved = {0},
228 };
229 
RegisterEntryModule(void)230 extern "C" __attribute__((constructor)) void RegisterEntryModule(void) { napi_module_register(&demoModule); }
231