• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2023 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #ifndef MINDSPORE_INCLUDE_JS_API_MSLITE_MODEL_NAPI_H
17 #define MINDSPORE_INCLUDE_JS_API_MSLITE_MODEL_NAPI_H
18 
19 #include <memory>
20 #include <mutex>
21 #include "include/api/model.h"
22 #include "include/api/context.h"
23 #include "include/api/serialization.h"
24 #include "include/api/cell.h"
25 #include "common_napi.h"
26 #include "mslite_model_callback_napi.h"
27 #include "napi/native_api.h"
28 #include "napi/native_node_api.h"
29 #include "include/js_api/common_napi.h"
30 
31 namespace mindspore {
32 static const std::map<std::string, TensorFormat> tensorFormatMap = {
33     {"DEFAULT_FORMAT", TENSOR_DEFAULT_FORMAT},
34     {"NCHW", TENSOR_NCHW},
35     {"NHWC", TENSOR_NHWC},
36     {"NHWC4", TENSOR_NHWC4},
37     {"HWKC", TENSOR_HWKC},
38     {"HWCK", TENSOR_HWCK},
39     {"KCHW", TENSOR_KCHW}
40 };
41 static const std::map<std::string, TensorDataType> tensorDataTypeMap = {
42     {"TYPE_UNKNOWN", TENSOR_UNKNOWN},
43     {"NUMBER_TYPE_INT8", TENSOR_INT8},
44     {"NUMBER_TYPE_INT16", TENSOR_INT16},
45     {"NUMBER_TYPE_INT32", TENSOR_INT32},
46     {"NUMBER_TYPE_INT64", TENSOR_INT64},
47     {"NUMBER_TYPE_UINT8", TENSOR_UINT8},
48     {"NUMBER_TYPE_UINT16", TENSOR_UINT16},
49     {"NUMBER_TYPE_UINT32", TENSOR_UINT32},
50     {"NUMBER_TYPE_UINT64", TENSOR_UINT64},
51     {"NUMBER_TYPE_FLOAT16", TENSOR_FLOAT16},
52     {"NUMBER_TYPE_FLOAT32", TENSOR_FLOAT32},
53     {"NUMBER_TYPE_FLOAT64", TENSOR_FLOAT64}
54 };
55 static const std::map<std::string, ContextThreadAffinityMode> contextThreadAffinityModeMap = {
56     {"NO_AFFINITIES", CONTEXT_AFFINITY_MODE},
57     {"BIG_CORES_FIRST", CONTEXT_BIG_CORES_FIRST},
58     {"LITTLE_CORES_FIRST", CONTEXT_LITTLE_CORES_FIRST},
59 };
60 
61 static const std::map<std::string, ContextQuantizationType> contextQuantizationTypeMap = {
62   {"NO_QUANT", NO_QUANT},
63   {"WEIGHT_QUANT", WEIGHT_QUANT},
64   {"FULL_QUANT", FULL_QUANT},
65 };
66 
67 static const std::map<std::string, ContextOptimizationLevel> contextOptimizationLevelTypeMap = {
68   {"O0", O0},
69   {"O2", O2},
70   {"O3", O3},
71   {"AUTO", AUTO},
72 };
73 
74 static const std::map<std::string, ContextPerformanceMode> contextPerformanceModeTypeMap = {
75   {"PERFORMANCE_NONE", PERFORMANCE_NONE},
76   {"PERFORMANCE_LOW", PERFORMANCE_LOW},
77   {"PERFORMANCE_MEDIUM", PERFORMANCE_MEDIUM},
78   {"PERFORMANCE_HIGH", PERFORMANCE_HIGH},
79   {"PERFORMANCE_EXTREME", PERFORMANCE_EXTREME}
80 };
81 
82 static const std::map<std::string, ContextPriority> contextPriorityTypeMap = {
83   {"PRIORITY_NONE", PRIORITY_NONE},
84   {"PRIORITY_LOW", PRIORITY_LOW},
85   {"PRIORITY_MEDIUM", PRIORITY_MEDIUM},
86   {"PRIORITY_HIGH", PRIORITY_HIGH},
87 };
88 
89 static const std::map<std::string, ContextNnrtDeviceType> contextNnrtDeviceTypeTypeMap = {
90   {"NNRTDEVICE_OTHERS", NNRTDEVICE_OTHERS},
91   {"NNRTDEVICE_CPU", NNRTDEVICE_CPU},
92   {"NNRTDEVICE_GPU", NNRTDEVICE_GPU},
93   {"NNRTDEVICE_ACCELERATOR", NNRTDEVICE_ACCELERATOR},
94 };
95 
96 class MSLiteModelNapi {
97  public:
98   MSLiteModelNapi();
99   ~MSLiteModelNapi();
100 
101   static napi_value Init(napi_env env, napi_value exports);
102   std::shared_ptr<mindspore::Model> native_model_ = nullptr;
103 
104  private:
105   struct MSLiteModelAsyncContext {
106     napi_async_work work;
107     napi_deferred deferred = nullptr;
108     napi_ref callbackRef = nullptr;
109     int32_t status = SUCCESS;
110     MSLiteModelNapi *lite_model = nullptr;
111     ModelInfo model_info;
112     ContextInfo context;
113 
MSLiteModelAsyncContextMSLiteModelAsyncContext114     MSLiteModelAsyncContext() {
115       // setting context default value
116       context.target.push_back("cpu");
117       context.cpu_device.thread_num = 2;
118       context.cpu_device.thread_affinity_mode = 0;
119       context.cpu_device.precision_mode = "enforce_fp32";
120     }
121   };
122   static napi_value Constructor(napi_env env, napi_callback_info info);
123   static void Finalize(napi_env env, void *nativeObject, void *finalize);
124   static napi_value LoadMSLiteModelFromFile(napi_env env, napi_callback_info info);
125   static napi_value LoadMSLiteModelFromBuffer(napi_env env, napi_callback_info info);
126   static napi_value LoadMSLiteModelFromFd(napi_env env, napi_callback_info info);
127   static napi_value LoadMSLiteTrainModelFromFile(napi_env env, napi_callback_info info);
128   static napi_value LoadMSLiteTrainModelFromBuffer(napi_env env, napi_callback_info info);
129   static napi_value LoadMSLiteTrainModelFromFd(napi_env env, napi_callback_info info);
130   static napi_value GetInputs(napi_env env, napi_callback_info info);
131   static napi_value Resize(napi_env env, napi_callback_info info);
132   static napi_value PredictAsync(napi_env env, napi_callback_info info);
133   static napi_value RunStep(napi_env env, napi_callback_info info);
134   static napi_value GetWeights(napi_env env, napi_callback_info info);
135   static napi_value UpdateWeights(napi_env env, napi_callback_info info);
136   static napi_value SetupVirtualBatch(napi_env env, napi_callback_info info);
137   static napi_value ExportModel(napi_env env, napi_callback_info info);
138   static napi_value ExportWeightsCollaborateWithMicro(napi_env env, napi_callback_info info);
139   static napi_value GetTrainMode(napi_env env, napi_callback_info info);
140   static napi_value SetTrainMode(napi_env env, napi_callback_info info);
141   static napi_value GetLearningRate(napi_env env, napi_callback_info info);
142   static napi_value SetLearningRate(napi_env env, napi_callback_info info);
143   static int32_t ParseModelInfo(napi_env env, napi_value root, ModelInfo &model_info);
144   static int32_t ParseContextInfo(napi_env env, napi_value root, ContextInfo &info);
145   static int32_t ParseTrainCfgInfo(napi_env env, napi_value root, TrainConfig &cfg);
146   static void GetMSLiteModelAsyncCallbackComplete(napi_env env, napi_status status, void *data);
147   static void PredictAsyncCallbackComplete(napi_env env, napi_status status, void *data);
148   static napi_value CreateMSLiteModelWrapper(napi_env env, MSLiteModelAsyncContext *async_context);
149   static void CommonCallbackRoutine(napi_env env, MSLiteModelAsyncContext *&asyncContext, const napi_value &valueParam);
150   static std::shared_ptr<mindspore::Model> CreateModel(ModelInfo *model_info_ptr, ContextInfo *contex_ptr);
151   static std::shared_ptr<mindspore::Model> CreateTrainModel(ModelInfo *model_info_ptr, ContextInfo *contex_ptr);
152   static int32_t GetCpuDeviceInfo(napi_env env, napi_value args, ContextInfo &context);
153   static int32_t GetNNRTDeviceInfo(napi_env env, napi_value args, ContextInfo &context);
154   static int32_t GetDeviceInfoContext(ContextInfo *context_info_ptr,
155                                       std::vector<std::shared_ptr<DeviceInfoContext>> &device_infos);
156   static int32_t SetTensorData(napi_env env, napi_value thisVar, napi_value argv,
157                                MSLiteModelAsyncContext *async_context);
158   static napi_status AddNamedProperty(napi_env env, napi_value object, const std::string name, int32_t enumValue);
159   static napi_value GetAllNnrtDeviceDescs(napi_env env, napi_callback_info info);
160   static napi_value CreateFormatObject(napi_env env);
161   static napi_value CreateDataTypeObject(napi_env env);
162   static napi_value CreateThreadAffinityModeObject(napi_env env);
163   static napi_value CreateQuantizationTypeObject(napi_env env);
164   static napi_value CreateOptimizationLevelObject(napi_env env);
165   static napi_value CreatePerformanceModeObject(napi_env env);
166   static napi_value CreatePriorityObject(napi_env env);
167   static napi_value CreateNnrtDeviceTypeObject(napi_env env);
168 
169 
170   static thread_local napi_ref constructor_;
171   napi_env env_ = nullptr;
172   static napi_ref tensorFormat_;
173   static napi_ref tensorDataType_;
174   static napi_ref contextThreadAffinityMode_;
175   static napi_ref contextQuantizationType_;
176   static napi_ref contextOptimizationLevel_;
177   static napi_ref contextPerformanceMode_;
178   static napi_ref contextPriority_;
179   static napi_ref contextNnrtDeviceType_;
180 
181   static ModelInfo *model_info_;
182   static ContextInfo *context_;
183   static std::mutex create_mutex_;
184 };
185 }  // namespace mindspore
186 #endif  // MINDSPORE_INCLUDE_JS_API_MSLITE_MODEL_NAPI_H