• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1From f34ebb471d558dda42c76143d886aad445ae473e Mon Sep 17 00:00:00 2001
2From: Zhu Guodong <zhuguodong0001@163.com>
3Date: Tue, 18 Apr 2023 16:18:26 +0800
4Subject: [PATCH] auto-apply
5 0007-support-third-party-model-in-nnrt-delegate.patch
6
7---
8 include/api/context.h                         | 10 +++
9 include/c_api/types_c.h                       |  2 +-
10 .../plugin/device/cpu/kernel/nnacl/op_base.h  |  1 +
11 mindspore/lite/BUILD.gn                       |  1 +
12 mindspore/lite/include/context.h              |  5 ++
13 mindspore/lite/include/lite_types.h           |  1 +
14 mindspore/lite/mindir/src/utils.cc            |  2 +-
15 mindspore/lite/src/CMakeLists.txt             |  6 +-
16 mindspore/lite/src/common/context_util.cc     | 14 +++-
17 .../common/ops/populate/custom_populate.cc    | 11 +++
18 .../lite/src/runtime/cxx_api/converters.cc    | 10 +++
19 .../lite/src/runtime/cxx_api/converters.h     |  1 +
20 .../src/runtime/delegate/nnrt/CMakeLists.txt  | 27 ++-----
21 .../delegate/nnrt/checker/primitive_check.cc  |  2 +
22 .../runtime/delegate/nnrt/nnrt_delegate.cc    | 13 ++--
23 .../delegate/nnrt/nnrt_model_kernel.cc        |  2 +-
24 .../src/runtime/delegate/nnrt/nnrt_stub.cc    | 78 +++++++++++++++++++
25 mindspore/lite/src/runtime/infer_manager.cc   |  3 +-
26 mindspore/lite/src/runtime/inner_context.cc   | 12 ++-
27 .../lite/src/runtime/kernel/cpu/BUILD.gn      |  1 +
28 .../runtime/kernel/cpu/base/custom_base.cc    | 46 +++++++++++
29 .../src/runtime/kernel/cpu/base/custom_base.h | 43 ++++++++++
30 mindspore/lite/src/runtime/lite_session.cc    | 22 ++++++
31 mindspore/lite/src/runtime/lite_session.h     |  1 +
32 mindspore/lite/src/runtime/scheduler.cc       | 15 +++-
33 .../lite/tools/benchmark/benchmark_base.cc    |  2 +-
34 .../lite/tools/benchmark/benchmark_base.h     |  2 +-
35 .../lite/tools/benchmark/benchmark_c_api.cc   |  4 +
36 .../tools/benchmark/benchmark_unified_api.cc  |  5 ++
37 29 files changed, 303 insertions(+), 39 deletions(-)
38 create mode 100644 mindspore/lite/src/runtime/delegate/nnrt/nnrt_stub.cc
39 create mode 100644 mindspore/lite/src/runtime/kernel/cpu/base/custom_base.cc
40 create mode 100644 mindspore/lite/src/runtime/kernel/cpu/base/custom_base.h
41
42diff --git a/include/api/context.h b/include/api/context.h
43index 93ed2706..d88b9d44 100644
44--- a/include/api/context.h
45+++ b/include/api/context.h
46@@ -31,6 +31,8 @@ enum DeviceType {
47   kAscend,
48   kAscend910,
49   kAscend310,
50+  //ohos-only device range[60,80)
51+  kNNRt = 60,
52   // add new type here
53   kInvalidDeviceType = 100,
54 };
55@@ -510,5 +512,13 @@ void AscendDeviceInfo::SetBufferOptimizeMode(const std::string &buffer_optimize_
56   SetBufferOptimizeMode(StringToChar(buffer_optimize_mode));
57 }
58 std::string AscendDeviceInfo::GetBufferOptimizeMode() const { return CharToString(GetBufferOptimizeModeChar()); }
59+
60+class MS_API NNRTDeviceInfo : public DeviceInfoContext {
61+ public:
62+  /// \brief Get the type of this DeviceInfoContext.
63+  ///
64+  /// \return Type of this DeviceInfoContext.
65+  enum DeviceType GetDeviceType() const override { return DeviceType::kNNRt; };
66+};
67 }  // namespace mindspore
68 #endif  // MINDSPORE_INCLUDE_API_CONTEXT_H
69diff --git a/include/c_api/types_c.h b/include/c_api/types_c.h
70index dba54ffa..fdf91f5a 100644
71--- a/include/c_api/types_c.h
72+++ b/include/c_api/types_c.h
73@@ -40,7 +40,7 @@ typedef enum OH_AI_DeviceType {
74   OH_AI_DEVICETYPE_KIRIN_NPU,
75   // add new type here
76   // ohos-only device range: [60, 80)
77-  OH_AI_DEVICETYPE__NNRT = 60,
78+  OH_AI_DEVICETYPE_NNRT = 60,
79   OH_AI_DEVICETYPE_INVALID = 100,
80 } OH_AI_DeviceType;
81
82diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/op_base.h b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/op_base.h
83index 26221249..5876bdf6 100644
84--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/op_base.h
85+++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/op_base.h
86@@ -519,6 +519,7 @@ enum PrimType {
87   PrimType_Inner_Identity = 10002,
88   PrimType_Inner_ShapeFusion = 10003,
89   PrimType_Inner_GraphKernel = 10004,
90+  PrimType_Inner_ThirdPartyModel = 10005,
91   PrimType_InnerOpMax,
92   PrimType_InnerOpMin = PrimType_Inner_ToFormat
93 };
94diff --git a/mindspore/lite/BUILD.gn b/mindspore/lite/BUILD.gn
95index a472283a..d761b69c 100644
96--- a/mindspore/lite/BUILD.gn
97+++ b/mindspore/lite/BUILD.gn
98@@ -197,6 +197,7 @@ cpu_kernel_sources = [
99   "src/runtime/kernel/cpu/base/constant_of_shape.cc",
100   "src/runtime/kernel/cpu/base/convolution_base.cc",
101   "src/runtime/kernel/cpu/base/crop_base.cc",
102+  "src/runtime/kernel/cpu/base/custom_base.cc",
103   "src/runtime/kernel/cpu/base/detection_post_process_base.cc",
104   "src/runtime/kernel/cpu/base/format_transpose.cc",
105   "src/runtime/kernel/cpu/base/gather_base.cc",
106diff --git a/mindspore/lite/include/context.h b/mindspore/lite/include/context.h
107index 915f2f66..22bd24df 100644
108--- a/mindspore/lite/include/context.h
109+++ b/mindspore/lite/include/context.h
110@@ -50,12 +50,17 @@ typedef struct AscendDeviceInfo {
111   std::string batch_size_;
112   std::string image_size_;
113 } AscendDeviceInfo;
114+
115+typedef struct NNRtDeviceInfo {
116+} NNRtDeviceInfo;
117+
118 /// \brief DeviceInfo defined for backend's configuration information.
119 struct DeviceInfo {
120   CpuDeviceInfo cpu_device_info_;
121   GpuDeviceInfo gpu_device_info_;
122   NpuDeviceInfo npu_device_info_;
123   AscendDeviceInfo ascend_device_info_;
124+  NNRtDeviceInfo nnrt_device_info_;
125 };
126
127 /// \brief DeviceContext defined for holding backend's configuration information.
128diff --git a/mindspore/lite/include/lite_types.h b/mindspore/lite/include/lite_types.h
129index 5fa59923..d1f70b68 100644
130--- a/mindspore/lite/include/lite_types.h
131+++ b/mindspore/lite/include/lite_types.h
132@@ -41,6 +41,7 @@ typedef enum {
133   DT_GPU,    /**< GPU device type */
134   DT_NPU,    /**< NPU device type */
135   DT_ASCEND, /**< ASCEND device type */
136+  DT_NNRT,   /**< NNRT device type */
137   DT_END     /**< NO device type */
138 } DeviceType;
139
140diff --git a/mindspore/lite/mindir/src/utils.cc b/mindspore/lite/mindir/src/utils.cc
141index ca5f7f4b..6a02d6bb 100644
142--- a/mindspore/lite/mindir/src/utils.cc
143+++ b/mindspore/lite/mindir/src/utils.cc
144@@ -21,7 +21,7 @@ namespace lite {
145
146 // ********** PrimitiveBase **********
147 NodeType MindIR_Primitive_GetType(PrimitivePtr primitive) {
148-  auto prim = flatbuffers::GetMutableRoot<schema::Primitive>(primitive);
149+  auto prim = static_cast<schema::Primitive *>(primitive);
150   auto type = prim->value_type();
151   return static_cast<NodeType>(type);
152 }
153diff --git a/mindspore/lite/src/CMakeLists.txt b/mindspore/lite/src/CMakeLists.txt
154index 3ba3c923..16ae2e63 100644
155--- a/mindspore/lite/src/CMakeLists.txt
156+++ b/mindspore/lite/src/CMakeLists.txt
157@@ -428,6 +428,11 @@ add_subdirectory(runtime/kernel/cpu)
158 add_library(lite_src_mid OBJECT ${LITE_SRC})
159 add_dependencies(lite_src_mid fbs_src)
160
161+if(SUPPORT_NNRT)
162+    add_subdirectory(runtime/delegate/nnrt)
163+    target_link_libraries(lite_src_mid nnrt_mid)
164+endif()
165+
166 if(MSLITE_ENABLE_ACL AND NOT MSLITE_ENABLE_CLOUD_FUSION_INFERENCE)
167     include_directories(${TOP_DIR}/graphengine/inc/external)
168     add_subdirectory(extendrt/kernel/ascend)
169@@ -493,7 +498,6 @@ if(MSLITE_ENABLE_MINDRT)
170 endif()
171
172 if (SUPPORT_NNRT)
173-    add_subdirectory(runtime/delegate/nnrt)
174     target_link_libraries(mindspore-lite nnrt_mid)
175     target_link_libraries(mindspore-lite_static nnrt_mid)
176 endif()
177diff --git a/mindspore/lite/src/common/context_util.cc b/mindspore/lite/src/common/context_util.cc
178index c446fc6e..ac8534f5 100644
179--- a/mindspore/lite/src/common/context_util.cc
180+++ b/mindspore/lite/src/common/context_util.cc
181@@ -106,6 +106,17 @@ std::shared_ptr<mindspore::AscendDeviceInfo> AscendDeviceInfoFromAscendDeviceCon
182   ascend_info->SetDynamicImageSize(ascend_context.device_info_.ascend_device_info_.image_size_);
183   return ascend_info;
184 }
185+
186+std::shared_ptr<mindspore::NNRTDeviceInfo> NNRtDeviceInfoFromNNRtDeviceContext(
187+  const lite::DeviceContext &nnrt_context) {
188+  if (nnrt_context.device_type_ != DT_NNRT) {
189+    MS_LOG(ERROR) << "Function input parameter is not NNRt context.";
190+    return nullptr;
191+  }
192+  auto nnrt_info = std::make_shared<mindspore::NNRTDeviceInfo>();
193+  MS_CHECK_TRUE_RET(nnrt_info != nullptr, nullptr);
194+  return nnrt_info;
195+}
196 }  // namespace
197
198 mindspore::Context *MSContextFromContext(const lite::Context *context) {
199@@ -127,7 +138,8 @@ mindspore::Context *MSContextFromContext(const lite::Context *context) {
200     transfer_funcs = {{DT_CPU, CPUDeviceInfoFromCPUDeviceContext},
201                       {DT_GPU, GPUDeviceInfoFromGPUDeviceContext},
202                       {DT_NPU, NPUDeviceInfoFromNPUDeviceContext},
203-                      {DT_ASCEND, AscendDeviceInfoFromAscendDeviceContext}};
204+                      {DT_ASCEND, AscendDeviceInfoFromAscendDeviceContext},
205+                      {DT_NNRT, NNRtDeviceInfoFromNNRtDeviceContext}};
206   for (auto &device_context : context->device_list_) {
207     auto device_type = device_context.device_type_;
208     if (transfer_funcs.find(device_type) == transfer_funcs.end()) {
209diff --git a/mindspore/lite/src/common/ops/populate/custom_populate.cc b/mindspore/lite/src/common/ops/populate/custom_populate.cc
210index b0b21047..f1506ece 100644
211--- a/mindspore/lite/src/common/ops/populate/custom_populate.cc
212+++ b/mindspore/lite/src/common/ops/populate/custom_populate.cc
213@@ -51,6 +51,17 @@ OpParameter *PopulateCustomParameter(const void *prim) {
214     // Just use the attr_data pointer to save the prim directly, the inner value is parsed as necessary.
215     param->attr_data[0] = static_cast<char *>(const_cast<void *>(prim));
216     return reinterpret_cast<OpParameter *>(param);
217+  } else if (type == "ThirdPartyModel") {
218+    auto *param = static_cast<CustomParameter *>(malloc(sizeof(CustomParameter)));
219+    if (param == nullptr) {
220+      MS_LOG(ERROR) << "malloc CustomParameter failed.";
221+      return nullptr;
222+    }
223+    memset(param, 0, sizeof(CustomParameter));
224+    param->op_parameter_.type_ = PrimType_Inner_ThirdPartyModel;
225+    // Just use the attr_data pointer to save the prim directly, the inner value is parsed as necessary.
226+    param->attr_data[0] = static_cast<char *>(const_cast<void *>(prim));
227+    return reinterpret_cast<OpParameter *>(param);
228   } else {
229     MS_LOG(ERROR) << "Unsupported custom type: " << type;
230   }
231diff --git a/mindspore/lite/src/runtime/cxx_api/converters.cc b/mindspore/lite/src/runtime/cxx_api/converters.cc
232index 02f1c9ec..23a02778 100644
233--- a/mindspore/lite/src/runtime/cxx_api/converters.cc
234+++ b/mindspore/lite/src/runtime/cxx_api/converters.cc
235@@ -72,6 +72,12 @@ Status ContextUtils::AddAscendDevice(lite::InnerContext *inner_context, DeviceIn
236   return kSuccess;
237 }
238
239+Status ContextUtils::AddNNRtDevice(lite::InnerContext *inner_context) {
240+  lite::DeviceInfo device_info = {0};
241+  inner_context->device_list_.push_back({lite::DT_NNRT, device_info});
242+  return kSuccess;
243+}
244+
245 lite::InnerContext *ContextUtils::Convert(Context *context) {
246   auto inner_context = std::make_unique<lite::InnerContext>();
247   if ((context == nullptr) || (inner_context == nullptr)) {
248@@ -115,6 +121,8 @@ lite::InnerContext *ContextUtils::Convert(Context *context) {
249       ret = AddNpuDevice(npu_context->GetFrequency(), inner_context.get());
250     } else if (device->GetDeviceType() == kAscend) {
251       ret = AddAscendDevice(inner_context.get(), device.get());
252+    } else if (device->GetDeviceType() == kNNRt) {
253+      ret = AddNNRtDevice(inner_context.get());
254     }
255     if (ret != kSuccess) {
256       MS_LOG(ERROR) << "Add device failed!";
257@@ -153,6 +161,8 @@ lite::InnerContext *ContextUtils::Convert(const ContextC *context_c) {
258                          device_info_c->provider_device, device_info_c->allocator, inner_context.get());
259     } else if (device_info_c->device_type == OH_AI_DEVICETYPE_KIRIN_NPU) {
260       ret = AddNpuDevice(device_info_c->frequency, inner_context.get());
261+    } else if (device_info_c->device_type == OH_AI_DEVICETYPE_NNRT) {
262+      ret = AddNNRtDevice(inner_context.get());
263     }
264     if (ret != kSuccess) {
265       MS_LOG(ERROR) << "Add device failed!";
266diff --git a/mindspore/lite/src/runtime/cxx_api/converters.h b/mindspore/lite/src/runtime/cxx_api/converters.h
267index 7eb2df24..11338875 100644
268--- a/mindspore/lite/src/runtime/cxx_api/converters.h
269+++ b/mindspore/lite/src/runtime/cxx_api/converters.h
270@@ -45,6 +45,7 @@ class ContextUtils {
271                              lite::InnerContext *inner_context);
272   static Status AddNpuDevice(int frequency, lite::InnerContext *inner_context);
273   static Status AddAscendDevice(lite::InnerContext *inner_context, DeviceInfoContext *device);
274+  static Status AddNNRtDevice(lite::InnerContext *inner_context);
275   static bool IsAffinityModeValid(int affinity_mode) {
276     return affinity_mode >= lite::NO_BIND && affinity_mode <= lite::MID_CPU;
277   }
278diff --git a/mindspore/lite/src/runtime/delegate/nnrt/CMakeLists.txt b/mindspore/lite/src/runtime/delegate/nnrt/CMakeLists.txt
279index 70aa63f3..625459e2 100644
280--- a/mindspore/lite/src/runtime/delegate/nnrt/CMakeLists.txt
281+++ b/mindspore/lite/src/runtime/delegate/nnrt/CMakeLists.txt
282@@ -1,30 +1,13 @@
283 include_directories(${DDK_PATH})
284 include_directories($(CCSRC_DIR)/plugin/device/cpu/kernel)
285+include_directories(${CMAKE_SOURCE_DIR}/../../../../../../foundation/ai/neural_network_runtime/)
286
287 include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include)
288-#include_directories(/home/tony/wty/workspace/ohos/third_party/mindspore/mindspore/lite/mindir/include/inner)
289-#include_directories(/home/tony/wty/workspace/ohos/third_party/mindspore/mindspore/lite/mindir/include)
290+
291 file(GLOB_RECURSE NNRT_SRC
292         ${CMAKE_CURRENT_SOURCE_DIR}/*.cc
293 )
294-
295-#add_library(hiai SHARED IMPORTED)
296-#set_target_properties(hiai PROPERTIES IMPORTED_LOCATION
297-#        ${DDK_LIB_PATH}/libhiai.so)
298-#add_library(hiai_ir SHARED IMPORTED)
299-#set_target_properties(hiai_ir PROPERTIES IMPORTED_LOCATION
300-#        ${DDK_LIB_PATH}/libhiai_ir.so)
301-#add_library(hiai_ir_build SHARED IMPORTED)
302-#set_target_properties(hiai_ir_build PROPERTIES IMPORTED_LOCATION
303-#        ${DDK_LIB_PATH}/libhiai_ir_build.so)
304-#add_library(npu_kernel_mid OBJECT ${NPU_RUNTIME_SRC})
305-#add_dependencies(npu_kernel_mid fbs_src)
306-#target_link_libraries(
307-#        npu_kernel_mid
308-#        hiai
309-#        hiai_ir
310-#        hiai_ir_build
311-#)
312-
313 file(GLOB convert_source checker/*.cc)
314-add_library(nnr_mid OBJECT ${NNRT_SRC} ${convert_source} )
315\ No newline at end of file
316+
317+add_library(nnrt_mid OBJECT ${NNRT_SRC} ${convert_source})
318+target_include_directories(nnrt_mid PUBLIC ${CMAKE_SOURCE_DIR}/../../../../../../foundation/ai/neural_network_runtime/)
319\ No newline at end of file
320diff --git a/mindspore/lite/src/runtime/delegate/nnrt/checker/primitive_check.cc b/mindspore/lite/src/runtime/delegate/nnrt/checker/primitive_check.cc
321index a647796c..c2b35393 100644
322--- a/mindspore/lite/src/runtime/delegate/nnrt/checker/primitive_check.cc
323+++ b/mindspore/lite/src/runtime/delegate/nnrt/checker/primitive_check.cc
324@@ -109,6 +109,8 @@ Status CheckPrimitiveSupported(const schema::Primitive *primitive) {
325         return mindspore::kSuccess;
326       case schema::PrimitiveType_Unsqueeze:
327         return mindspore::kSuccess;
328+      case schema::PrimitiveType_Custom:
329+        return mindspore::kSuccess;
330       default: {
331         MS_LOG(WARNING) << "No primitive type :" << (int)(type);
332         return mindspore::kLiteSuccessExit;
333diff --git a/mindspore/lite/src/runtime/delegate/nnrt/nnrt_delegate.cc b/mindspore/lite/src/runtime/delegate/nnrt/nnrt_delegate.cc
334index 34897331..67d4e6c4 100644
335--- a/mindspore/lite/src/runtime/delegate/nnrt/nnrt_delegate.cc
336+++ b/mindspore/lite/src/runtime/delegate/nnrt/nnrt_delegate.cc
337@@ -19,8 +19,11 @@
338 #include "interfaces/kits/c/neural_network_runtime.h"
339 #include "interfaces/innerkits/c/neural_network_runtime_inner.h"
340 #include "nnrt_model_kernel.h"
341+#include "schema/model_generated.h"
342+#include "flatbuffers/flatbuffers.h"
343
344 mindspore::Status mindspore::NNRTDelegate::Build(DelegateModel<schema::Primitive> *model) {
345+  MS_LOG(DEBUG) << "Start to build NNRT model.";
346   if (this->nnrt_lite_graph == nullptr) {
347     MS_LOG(ERROR) << "nnrt_lite_graph is nullptr.";
348     return mindspore::kLiteError;
349@@ -43,7 +46,7 @@ mindspore::Status mindspore::NNRTDelegate::Build(DelegateModel<schema::Primitive
350     OH_NNModel_Destroy(&oh_nnmodel);
351     return mindspore::kLiteError;
352   }
353-  MS_LOG(INFO) << "NNRTDelegate creates NNModel success.";
354+  MS_LOG(DEBUG) << "NNRTDelegate creates NNModel success.";
355
356   OH_NNCompilation *oh_nn_compilation = nullptr;
357   oh_nn_compilation = OH_NNCompilation_Construct(oh_nnmodel);
358@@ -53,7 +56,7 @@ mindspore::Status mindspore::NNRTDelegate::Build(DelegateModel<schema::Primitive
359     OH_NNModel_Destroy(&oh_nnmodel);
360     return mindspore::kLiteError;
361   }
362-  MS_LOG(INFO) << "NNRTDelegate creates NNCompilation success.";
363+  MS_LOG(DEBUG) << "NNRTDelegate creates NNCompilation success.";
364
365   const size_t *allDevicesID = nullptr;
366   uint32_t device_count = 0;
367@@ -71,7 +74,7 @@ mindspore::Status mindspore::NNRTDelegate::Build(DelegateModel<schema::Primitive
368     // OH_NNModel_Destroy(&oh_nnmodel);
369     return mindspore::kSuccess;
370   }
371-  MS_LOG(INFO) << "NNRTDelegate GetAllDevicesID success.";
372+  MS_LOG(DEBUG) << "NNRTDelegate GetAllDevicesID success. device_count: " << device_count;
373
374   // check if model ops are supported
375   const bool *issupported = nullptr;
376@@ -98,7 +101,7 @@ mindspore::Status mindspore::NNRTDelegate::Build(DelegateModel<schema::Primitive
377     OH_NNModel_Destroy(&oh_nnmodel);
378     return mindspore::kSuccess;
379   }
380-  MS_LOG(INFO) << "NNRtDelegate supports all op in this model.";
381+  MS_LOG(DEBUG) << "NNRtDelegate supports all op in this model.";
382
383   ret_code = OH_NNCompilation_SetDevice(oh_nn_compilation, allDevicesID[0]);
384
385@@ -141,7 +144,7 @@ mindspore::Status mindspore::NNRTDelegate::Build(DelegateModel<schema::Primitive
386   KernelIter end = model->EndKernelIterator();
387   model->Replace(from, end, nnr_model_kernel);
388
389-  MS_LOG(INFO) << "NNRTDelegate build  success.";
390+  MS_LOG(DEBUG) << "NNRTDelegate build success.";
391   return mindspore::kSuccess;
392 }
393
394diff --git a/mindspore/lite/src/runtime/delegate/nnrt/nnrt_model_kernel.cc b/mindspore/lite/src/runtime/delegate/nnrt/nnrt_model_kernel.cc
395index 5acf2e9a..b91522b0 100644
396--- a/mindspore/lite/src/runtime/delegate/nnrt/nnrt_model_kernel.cc
397+++ b/mindspore/lite/src/runtime/delegate/nnrt/nnrt_model_kernel.cc
398@@ -97,7 +97,7 @@ OH_NN_DataType mindspore::NNRTModelKernel::ConvertDataType(mindspore::DataType d
399 }
400 int mindspore::NNRTModelKernel::PrepareInputs() {
401   auto input_tensors = this->inputs();
402-  for (int i = 0; i < input_tensors.size(); i++) {
403+  for (size_t i = 0; i < input_tensors.size(); i++) {
404     auto tensor = input_tensors[i];
405     auto tensor_shape = tensor.Shape();
406     auto tmp_quant_param = tensor.QuantParams();
407diff --git a/mindspore/lite/src/runtime/delegate/nnrt/nnrt_stub.cc b/mindspore/lite/src/runtime/delegate/nnrt/nnrt_stub.cc
408new file mode 100644
409index 00000000..886ac304
410--- /dev/null
411+++ b/mindspore/lite/src/runtime/delegate/nnrt/nnrt_stub.cc
412@@ -0,0 +1,78 @@
413+/**
414+* Copyright 2023 Huawei Technologies Co., Ltd
415+*
416+* Licensed under the Apache License, Version 2.0 (the "License");
417+* you may not use this file except in compliance with the License.
418+* You may obtain a copy of the License at
419+*
420+* http://www.apache.org/licenses/LICENSE-2.0
421+*
422+* Unless required by applicable law or agreed to in writing, software
423+* distributed under the License is distributed on an "AS IS" BASIS,
424+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
425+* See the License for the specific language governing permissions and
426+* limitations under the License.
427+*/
428+
429+#include "interfaces/kits/c/neural_network_runtime.h"
430+#include "interfaces/innerkits/c/neural_network_runtime_inner.h"
431+
432+OH_NNModel *OH_NNModel_Construct(void) {
433+  return NULL;
434+}
435+
436+OH_NN_ReturnCode OH_NNExecutor_Run(OH_NNExecutor *executor) {
437+  return OH_NN_SUCCESS;
438+}
439+
440+OH_NN_ReturnCode OH_NNCompilation_Build(OH_NNCompilation *compilation) {
441+  return OH_NN_SUCCESS;
442+}
443+
444+void OH_NNCompilation_Destroy(OH_NNCompilation **compilation) {}
445+
446+OH_NNExecutor *OH_NNExecutor_Construct(OH_NNCompilation *compilation) {
447+  return NULL;
448+}
449+
450+void OH_NNExecutor_Destroy(OH_NNExecutor **executor) {}
451+
452+OH_NNCompilation *OH_NNCompilation_Construct(const OH_NNModel *model) {
453+  return NULL;
454+}
455+
456+OH_NN_ReturnCode OH_NNDevice_GetAllDevicesID(const size_t **allDevicesID, uint32_t *deviceCount) {
457+  return OH_NN_SUCCESS;
458+}
459+
460+OH_NN_ReturnCode OH_NNExecutor_SetOutput(OH_NNExecutor *executor,
461+                                         uint32_t outputIndex,
462+                                         void *dataBuffer,
463+                                         size_t length) {
464+  return OH_NN_SUCCESS;
465+}
466+
467+OH_NN_ReturnCode OH_NNCompilation_SetDevice(OH_NNCompilation *compilation, size_t deviceID) {
468+  return OH_NN_SUCCESS;
469+}
470+
471+OH_NN_ReturnCode OH_NNExecutor_SetInput(OH_NNExecutor *executor,
472+                                        uint32_t inputIndex,
473+                                        const OH_NN_Tensor *tensor,
474+                                        const void *dataBuffer,
475+                                        size_t length) {
476+  return OH_NN_SUCCESS;
477+}
478+
479+void OH_NNModel_Destroy(OH_NNModel **model) {}
480+
481+OH_NN_ReturnCode OH_NNModel_GetAvailableOperations(OH_NNModel *model,
482+                                                   size_t deviceID,
483+                                                   const bool **isSupported,
484+                                                   uint32_t *opCount) {
485+  return OH_NN_SUCCESS;
486+}
487+
488+OH_NN_ReturnCode OH_NNModel_BuildFromLiteGraph(OH_NNModel *model, const void *liteGraph) {
489+  return OH_NN_SUCCESS;
490+}
491\ No newline at end of file
492diff --git a/mindspore/lite/src/runtime/infer_manager.cc b/mindspore/lite/src/runtime/infer_manager.cc
493index 4d6794b1..bd3ff802 100644
494--- a/mindspore/lite/src/runtime/infer_manager.cc
495+++ b/mindspore/lite/src/runtime/infer_manager.cc
496@@ -139,7 +139,8 @@ int KernelInferShape(const std::vector<lite::Tensor *> &inputs, const std::vecto
497   std::vector<TensorC *> in_tensors;
498   std::vector<TensorC *> out_tensors;
499   if (parameter->type_ == schema::PrimitiveType_PartialFusion || parameter->type_ == schema::PrimitiveType_Switch ||
500-      parameter->type_ == schema::PrimitiveType_Call || parameter->type_ == schema::PrimitiveType_SwitchLayer) {
501+      parameter->type_ == schema::PrimitiveType_Call || parameter->type_ == schema::PrimitiveType_SwitchLayer ||
502+      parameter->type_ == PrimType_Inner_ThirdPartyModel) {
503     MS_LOG(INFO) << "no need infer shape.";
504     return RET_OK;
505   }
506diff --git a/mindspore/lite/src/runtime/inner_context.cc b/mindspore/lite/src/runtime/inner_context.cc
507index 40557f90..5b70cd21 100644
508--- a/mindspore/lite/src/runtime/inner_context.cc
509+++ b/mindspore/lite/src/runtime/inner_context.cc
510@@ -86,11 +86,14 @@ void InnerContext::SetContextDevice(const Context *context) {
511   bool isUserSetGPU = context->device_list_.end() !=
512                       std::find_if(this->device_list_.begin(), this->device_list_.end(),
513                                    [](const DeviceContext &device) { return device.device_type_ == DT_GPU; });
514-  if (isUserSetGPU == false && isUserSetNPU == false) {
515+  bool isUserSetNNRt = context->device_list_.end() !=
516+                       std::find_if(this->device_list_.begin(), this->device_list_.end(),
517+                                    [](const DeviceContext &device) { return device.device_type_ == DT_NNRT; });
518+  if ((isUserSetGPU == false) && (isUserSetNPU == false) && (isUserSetNNRt == false)) {
519     return;
520   }
521
522-  /* add GPU/NPU first */
523+  /* add GPU/NPU/NNRT first */
524   for (auto &device_ctx : context->device_list_) {
525     if (device_ctx.device_type_ != DT_CPU) {
526       this->device_list_.push_back(device_ctx);
527@@ -100,7 +103,7 @@ void InnerContext::SetContextDevice(const Context *context) {
528   /* add CPU */
529   for (auto &device_ctx : context->device_list_) {
530     if (device_ctx.device_type_ == DT_CPU) {
531-      if (isUserSetNPU || (isUserSetGPU && enable_parallel_ == false)) {
532+      if (isUserSetNPU || isUserSetNNRt || (isUserSetGPU && enable_parallel_ == false)) {
533         auto cpu_ctx = device_ctx;
534         cpu_ctx.device_info_.cpu_device_info_.cpu_bind_mode_ = NO_BIND;
535         this->device_list_.push_back(cpu_ctx);
536@@ -181,6 +184,9 @@ int InnerContext::Init() {
537   if (IsDeviceTypeEnabled(DT_GPU)) {
538     MS_LOG(DEBUG) << "GPU enabled.";
539   }
540+  if (IsDeviceTypeEnabled(DT_NNRT)) {
541+    MS_LOG(DEBUG) << "NNRT enabled.";
542+  }
543
544   InitExperimentalExecEnv();
545   return RET_OK;
546diff --git a/mindspore/lite/src/runtime/kernel/cpu/BUILD.gn b/mindspore/lite/src/runtime/kernel/cpu/BUILD.gn
547index 96083c68..fa20755f 100644
548--- a/mindspore/lite/src/runtime/kernel/cpu/BUILD.gn
549+++ b/mindspore/lite/src/runtime/kernel/cpu/BUILD.gn
550@@ -10,6 +10,7 @@ cpu_kernel_sources = [
551     "base/constant_of_shape.cc",
552     "base/convolution_base.cc",
553     "base/crop_base.cc",
554+    "base/custom_base.cc",
555     "base/detection_post_process_base.cc",
556     "base/format_transpose.cc",
557     "base/gather_base.cc",
558diff --git a/mindspore/lite/src/runtime/kernel/cpu/base/custom_base.cc b/mindspore/lite/src/runtime/kernel/cpu/base/custom_base.cc
559new file mode 100644
560index 00000000..03477072
561--- /dev/null
562+++ b/mindspore/lite/src/runtime/kernel/cpu/base/custom_base.cc
563@@ -0,0 +1,46 @@
564+/**
565+ * Copyright 2022 Huawei Technologies Co., Ltd
566+ *
567+ * Licensed under the Apache License, Version 2.0 (the "License");
568+ * you may not use this file except in compliance with the License.
569+ * You may obtain a copy of the License at
570+ *
571+ * http://www.apache.org/licenses/LICENSE-2.0
572+ *
573+ * Unless required by applicable law or agreed to in writing, software
574+ * distributed under the License is distributed on an "AS IS" BASIS,
575+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
576+ * See the License for the specific language governing permissions and
577+ * limitations under the License.
578+ */
579+
580+#include "src/runtime/kernel/cpu/base/custom_base.h"
581+#include <algorithm>
582+#include <utility>
583+#include <vector>
584+#include "src/runtime/kernel_registry.h"
585+#include "nnacl/op_base.h"
586+
587+using mindspore::kernel::KERNEL_ARCH;
588+using mindspore::lite::KernelRegistrar;
589+using mindspore::lite::RET_ERROR;
590+using mindspore::lite::RET_OK;
591+using mindspore::schema::PrimitiveType_Custom;
592+
593+namespace mindspore::kernel {
594+int CustomBaseCPUKernel::Prepare() {
595+  return RET_OK;
596+}
597+
598+int CustomBaseCPUKernel::ReSize() {
599+  return RET_OK;
600+}
601+
602+int CustomBaseCPUKernel::Run() {
603+  return RET_OK;
604+}
605+
606+REG_KERNEL(kCPU, kNumberTypeInt32, PrimType_Inner_ThirdPartyModel, LiteKernelCreator<CustomBaseCPUKernel>)
607+REG_KERNEL(kCPU, kNumberTypeFloat32, PrimType_Inner_ThirdPartyModel, LiteKernelCreator<CustomBaseCPUKernel>)
608+REG_KERNEL(kCPU, kNumberTypeBool, PrimType_Inner_ThirdPartyModel, LiteKernelCreator<CustomBaseCPUKernel>)
609+}  // namespace mindspore::kernel
610diff --git a/mindspore/lite/src/runtime/kernel/cpu/base/custom_base.h b/mindspore/lite/src/runtime/kernel/cpu/base/custom_base.h
611new file mode 100644
612index 00000000..3b021669
613--- /dev/null
614+++ b/mindspore/lite/src/runtime/kernel/cpu/base/custom_base.h
615@@ -0,0 +1,43 @@
616+/**
617+ * Copyright 2022 Huawei Technologies Co., Ltd
618+ *
619+ * Licensed under the Apache License, Version 2.0 (the "License");
620+ * you may not use this file except in compliance with the License.
621+ * You may obtain a copy of the License at
622+ *
623+ * http://www.apache.org/licenses/LICENSE-2.0
624+ *
625+ * Unless required by applicable law or agreed to in writing, software
626+ * distributed under the License is distributed on an "AS IS" BASIS,
627+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
628+ * See the License for the specific language governing permissions and
629+ * limitations under the License.
630+ */
631+
632+#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_BASE_CUSTOM_BASE_H_
633+#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_BASE_CUSTOM_BASE_H_
634+
635+#include <vector>
636+#include "src/runtime/lite_kernel.h"
637+#include "nnacl/custom_parameter.h"
638+
639+namespace mindspore::kernel {
640+class CustomBaseCPUKernel : public LiteKernel {
641+ public:
642+  CustomBaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
643+                      const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx)
644+      : LiteKernel(parameter, inputs, outputs, ctx) {
645+    custom_param_ = reinterpret_cast<CustomParameter *>(op_parameter_);
646+  }
647+  ~CustomBaseCPUKernel() override = default;
648+
649+  int Prepare() override;
650+  int ReSize() override;
651+  int Run() override;
652+
653+ private:
654+  CustomParameter *custom_param_ = nullptr;
655+};
656+}  // namespace mindspore::kernel
657+
658+#endif  // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_BASE_CUSTOM_BASE_H_
659diff --git a/mindspore/lite/src/runtime/lite_session.cc b/mindspore/lite/src/runtime/lite_session.cc
660index 4a9939fd..eb83f444 100644
661--- a/mindspore/lite/src/runtime/lite_session.cc
662+++ b/mindspore/lite/src/runtime/lite_session.cc
663@@ -54,6 +54,9 @@
664 #endif
665 #include "src/runtime/runtime_convert.h"
666 #include "extendrt/mindir_loader/model_loader.h"
667+#ifdef SUPPORT_NNRT
668+#include "src/runtime/delegate/nnrt/nnrt_delegate.h"
669+#endif
670
671 using AbstractBaseModel = mindspore::infer::AbstractBaseModel;
672
673@@ -829,6 +832,19 @@ int LiteSession::CreateNPUDelegate() {
674   return RET_OK;
675 }
676
677+int LiteSession::CreateNNRTDelegate() {
678+#if SUPPORT_NNRT
679+  delegate_ = std::make_shared<NNRTDelegate>();
680+  if (delegate_ == nullptr) {
681+    MS_LOG(ERROR) << "New NNRT delegate failed";
682+    return RET_ERROR;
683+  }
684+  delegate_device_type_ = DT_NNRT;
685+  this->context_->delegate = delegate_;
686+#endif
687+  return RET_OK;
688+};
689+
690 int LiteSession::DelegateInit() {
691 #ifndef DELEGATE_CLIP
692   if (context_->delegate != nullptr) {
693@@ -848,6 +864,12 @@ int LiteSession::DelegateInit() {
694         return ret;
695       }
696     }
697+    if (context_->IsDeviceTypeEnabled(DT_NNRT)) {
698+      auto ret = CreateNNRTDelegate();
699+      if (ret != RET_OK) {
700+        return ret;
701+      }
702+    }
703   }
704
705   if (delegate_ != nullptr) {
706diff --git a/mindspore/lite/src/runtime/lite_session.h b/mindspore/lite/src/runtime/lite_session.h
707index dc93d583..255e90b5 100644
708--- a/mindspore/lite/src/runtime/lite_session.h
709+++ b/mindspore/lite/src/runtime/lite_session.h
710@@ -150,6 +150,7 @@ class LiteSession {
711   int ContextInit(InnerContext *context);
712   int CreateTensorRTDelegate();
713   int CreateNPUDelegate();
714+  int CreateNNRTDelegate();
715   int DelegateInit();
716   int InitGPURuntime();
717
718diff --git a/mindspore/lite/src/runtime/scheduler.cc b/mindspore/lite/src/runtime/scheduler.cc
719index 4a024850..b2b6f6a9 100644
720--- a/mindspore/lite/src/runtime/scheduler.cc
721+++ b/mindspore/lite/src/runtime/scheduler.cc
722@@ -53,7 +53,9 @@
723 #include "include/registry/register_kernel_interface.h"
724 #include "extendrt/mindir_loader/abstract_base_model.h"
725 #include "src/runtime/pack_weight_manager.h"
726-
727+#ifdef SUPPORT_NNRT
728+#include "src/runtime/delegate/nnrt/nnrt_delegate.h"
729+#endif
730 using AbstractBaseModel = mindspore::infer::AbstractBaseModel;
731
732 namespace mindspore::lite {
733@@ -423,6 +425,7 @@ bool Scheduler::CheckRunNCXPass() {
734 }
735
736 int Scheduler::Schedule(std::vector<kernel::KernelExec *> *dst_kernels) {
737+  MS_LOG(DEBUG) << "Start schedule.";
738   int check_input_ret = CheckInputParam(dst_kernels);
739   if (check_input_ret != RET_OK) {
740     MS_LOG(ERROR) << "CheckInputParam failed! ret: " << check_input_ret;
741@@ -459,11 +462,13 @@ int Scheduler::Schedule(std::vector<kernel::KernelExec *> *dst_kernels) {
742   }
743   shape_fusion_pass_->FreeOutputTensorDataOfFusedShape();
744
745+  MS_LOG(DEBUG) << "Start to init delegate kernels.";
746   ret = InitDelegateKernels(dst_kernels);
747   if (ret != RET_OK) {
748     MS_LOG(ERROR) << "Repalce delegate kernels failed.";
749     return ret;
750   }
751+  MS_LOG(DEBUG) << "Finish to init delegate kernels.";
752
753   ret = CheckCpuValid(dst_kernels);
754   if (ret != RET_OK) {
755@@ -555,6 +560,14 @@ int Scheduler::ReplaceDelegateKernels(std::vector<kernel::KernelExec *> *dst_ker
756     MS_LOG(ERROR) << "New delegate model failed.";
757     return RET_NULL_PTR;
758   }
759+
760+#ifdef SUPPORT_NNRT
761+  if (context_->IsDeviceTypeEnabled(DT_NNRT)) {
762+    auto delegate = static_cast<NNRTDelegate *>(delegate_.get());
763+    delegate->ShallowCopyLiteGraph(this->src_model_->graph_);
764+  }
765+#endif
766+
767   auto ret = delegate_->Build(model);
768   if (ret != mindspore::kSuccess) {
769     delete model;
770diff --git a/mindspore/lite/tools/benchmark/benchmark_base.cc b/mindspore/lite/tools/benchmark/benchmark_base.cc
771index 1c161fc4..b9d83296 100644
772--- a/mindspore/lite/tools/benchmark/benchmark_base.cc
773+++ b/mindspore/lite/tools/benchmark/benchmark_base.cc
774@@ -304,7 +304,7 @@ int BenchmarkBase::CheckThreadNumValid() {
775
776 int BenchmarkBase::CheckDeviceTypeValid() {
777   if (flags_->device_ != "CPU" && flags_->device_ != "GPU" && flags_->device_ != "NPU" &&
778-      flags_->device_ != "Ascend310" && flags_->device_ != "Ascend310P") {
779+      flags_->device_ != "Ascend310" && flags_->device_ != "Ascend310P" && flags_->device_ != "NNRT") {
780     MS_LOG(ERROR) << "Device type:" << flags_->device_ << " is not supported.";
781     std::cerr << "Device type:" << flags_->device_ << " is not supported." << std::endl;
782     return RET_ERROR;
783diff --git a/mindspore/lite/tools/benchmark/benchmark_base.h b/mindspore/lite/tools/benchmark/benchmark_base.h
784index d7f523e8..e24e1fe9 100644
785--- a/mindspore/lite/tools/benchmark/benchmark_base.h
786+++ b/mindspore/lite/tools/benchmark/benchmark_base.h
787@@ -121,7 +121,7 @@ class MS_API BenchmarkFlags : public virtual FlagParser {
788     AddFlag(&BenchmarkFlags::model_type_, "modelType", "Input model type. MindIR | MindIR_Lite", "MindIR");
789     AddFlag(&BenchmarkFlags::in_data_file_, "inDataFile", "Input data file, if not set, use random input", "");
790     AddFlag(&BenchmarkFlags::config_file_, "configFile", "Config file", "");
791-    AddFlag(&BenchmarkFlags::device_, "device", "CPU | GPU | NPU | Ascend310 | Ascend310P", "CPU");
792+    AddFlag(&BenchmarkFlags::device_, "device", "CPU | GPU | NPU | Ascend310 | Ascend310P | NNRT", "CPU");
793     AddFlag(&BenchmarkFlags::cpu_bind_mode_, "cpuBindMode", "Input 0 for NO_BIND, 1 for HIGHER_CPU, 2 for MID_CPU.", 1);
794     // MarkPerformance
795     AddFlag(&BenchmarkFlags::loop_count_, "loopCount", "Run loop count", 10);
796diff --git a/mindspore/lite/tools/benchmark/benchmark_c_api.cc b/mindspore/lite/tools/benchmark/benchmark_c_api.cc
797index 252e65c6..cb0c56b0 100644
798--- a/mindspore/lite/tools/benchmark/benchmark_c_api.cc
799+++ b/mindspore/lite/tools/benchmark/benchmark_c_api.cc
800@@ -125,6 +125,10 @@ int BenchmarkCApi::InitContext() {
801     OH_AI_DeviceInfoSetFrequency(npu_device_info, kFrequencyDefault);
802     OH_AI_ContextAddDeviceInfo(context_, npu_device_info);
803   }
804+  if (flags_->device_ == "NNRT") {
805+    OH_AI_DeviceInfoHandle nnrt_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_NNRT);
806+    OH_AI_ContextAddDeviceInfo(context_, nnrt_device_info);
807+  }
808   OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
809   OH_AI_DeviceInfoSetEnableFP16(cpu_device_info, flags_->enable_fp16_);
810   OH_AI_ContextAddDeviceInfo(context_, cpu_device_info);
811diff --git a/mindspore/lite/tools/benchmark/benchmark_unified_api.cc b/mindspore/lite/tools/benchmark/benchmark_unified_api.cc
812index 91f1fa73..75dc7b01 100644
813--- a/mindspore/lite/tools/benchmark/benchmark_unified_api.cc
814+++ b/mindspore/lite/tools/benchmark/benchmark_unified_api.cc
815@@ -444,6 +444,11 @@ int BenchmarkUnifiedApi::InitMSContext(const std::shared_ptr<mindspore::Context>
816     // device_list.push_back(ascend_device_info);
817   }
818
819+  if (flags_->device_ == "NNRT") {
820+    std::shared_ptr<NNRTDeviceInfo> nnrt_device_info = std::make_shared<NNRTDeviceInfo>();
821+    device_list.push_back(nnrt_device_info);
822+  }
823+
824   // CPU priority is behind GPU and NPU
825   std::shared_ptr<CPUDeviceInfo> device_info = std::make_shared<CPUDeviceInfo>();
826   device_info->SetEnableFP16(flags_->enable_fp16_);
827--
8282.34.1
829
830