• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1From 5907d68ca0fe549b5fed290f05978db5c2034865 Mon Sep 17 00:00:00 2001
2From: Zhu Guodong <zhuguodong0001@163.com>
3Date: Mon, 6 Mar 2023 16:05:44 +0800
4Subject: [PATCH 3/4] implement mindir module and support nnrt delegate
5
6---
7 mindspore/lite/mindir/BUILD.gn                |   52 +
8 mindspore/lite/mindir/CMakeLists.txt          |   31 +
9 mindspore/lite/mindir/include/mindir.h        |  423 ++
10 .../lite/mindir/include/mindir_lite_graph.h   |   57 +
11 .../lite/mindir/include/mindir_primitive.h    |   15 +
12 mindspore/lite/mindir/include/mindir_tensor.h |   45 +
13 mindspore/lite/mindir/include/mindir_types.h  |  210 +
14 .../lite/mindir/inner_headers/lite_graph.h    |   27 +
15 .../inner_headers/mindir_memory_manager.h     |   33 +
16 mindspore/lite/mindir/inner_headers/utils.h   |   28 +
17 mindspore/lite/mindir/src/mindir.cc           | 4258 +++++++++++++++++
18 .../lite/mindir/src/mindir_memory_manager.cc  |  122 +
19 .../lite/mindir/src/mindir_nnrt_lite_graph.cc |   87 +
20 .../src/mindir_nnrt_lite_graph_to_model.cc    | 1496 ++++++
21 mindspore/lite/mindir/src/mindir_tensor.cc    |  389 ++
22 mindspore/lite/mindir/src/utils.cc            |   96 +
23 mindspore/lite/mindir/tests/BUILD.gn          |   35 +
24 mindspore/lite/mindir/tests/mindir_test.cc    |   51 +
25 .../src/runtime/delegate/nnrt/CMakeLists.txt  |   30 +
26 .../delegate/nnrt/checker/primitive_check.cc  |  187 +
27 .../delegate/nnrt/checker/primitive_check.h   |   12 +
28 .../runtime/delegate/nnrt/nnrt_delegate.cc    |  360 ++
29 .../src/runtime/delegate/nnrt/nnrt_delegate.h |   52 +
30 .../delegate/nnrt/nnrt_model_kernel.cc        |  175 +
31 .../runtime/delegate/nnrt/nnrt_model_kernel.h |   57 +
32 25 files changed, 8328 insertions(+)
33 create mode 100644 mindspore/lite/mindir/BUILD.gn
34 create mode 100644 mindspore/lite/mindir/CMakeLists.txt
35 create mode 100644 mindspore/lite/mindir/include/mindir.h
36 create mode 100644 mindspore/lite/mindir/include/mindir_lite_graph.h
37 create mode 100644 mindspore/lite/mindir/include/mindir_primitive.h
38 create mode 100644 mindspore/lite/mindir/include/mindir_tensor.h
39 create mode 100644 mindspore/lite/mindir/include/mindir_types.h
40 create mode 100644 mindspore/lite/mindir/inner_headers/lite_graph.h
41 create mode 100644 mindspore/lite/mindir/inner_headers/mindir_memory_manager.h
42 create mode 100644 mindspore/lite/mindir/inner_headers/utils.h
43 create mode 100644 mindspore/lite/mindir/src/mindir.cc
44 create mode 100644 mindspore/lite/mindir/src/mindir_memory_manager.cc
45 create mode 100644 mindspore/lite/mindir/src/mindir_nnrt_lite_graph.cc
46 create mode 100644 mindspore/lite/mindir/src/mindir_nnrt_lite_graph_to_model.cc
47 create mode 100644 mindspore/lite/mindir/src/mindir_tensor.cc
48 create mode 100644 mindspore/lite/mindir/src/utils.cc
49 create mode 100644 mindspore/lite/mindir/tests/BUILD.gn
50 create mode 100644 mindspore/lite/mindir/tests/mindir_test.cc
51 create mode 100644 mindspore/lite/src/runtime/delegate/nnrt/CMakeLists.txt
52 create mode 100644 mindspore/lite/src/runtime/delegate/nnrt/checker/primitive_check.cc
53 create mode 100644 mindspore/lite/src/runtime/delegate/nnrt/checker/primitive_check.h
54 create mode 100644 mindspore/lite/src/runtime/delegate/nnrt/nnrt_delegate.cc
55 create mode 100644 mindspore/lite/src/runtime/delegate/nnrt/nnrt_delegate.h
56 create mode 100644 mindspore/lite/src/runtime/delegate/nnrt/nnrt_model_kernel.cc
57 create mode 100644 mindspore/lite/src/runtime/delegate/nnrt/nnrt_model_kernel.h
58
59diff --git a/mindspore/lite/mindir/BUILD.gn b/mindspore/lite/mindir/BUILD.gn
60new file mode 100644
61index 00000000..2ef8225d
62--- /dev/null
63+++ b/mindspore/lite/mindir/BUILD.gn
64@@ -0,0 +1,52 @@
65+# Copyright 2022 Huawei Technologies Co., Ltd
66+#
67+# Licensed under the Apache License, Version 2.0 (the "License");
68+# you may not use this file except in compliance with the License.
69+# You may obtain a copy of the License at
70+#
71+# http://www.apache.org/licenses/LICENSE-2.0
72+#
73+# Unless required by applicable law or agreed to in writing, software
74+# distributed under the License is distributed on an "AS IS" BASIS,
75+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
76+# See the License for the specific language governing permissions and
77+# limitations under the License.
78+# ============================================================================
79+import("//build/ohos.gni")
80+
81+# ohos_group("mindir_test") {
82+#   deps = [
83+#     "tests:mindir_test",
84+#   ]
85+# }
86+ohos_shared_library("mindir_lib") {
87+  include_dirs = [
88+    "../",
89+    "../../core",
90+    "include",
91+    "inner_headers",
92+    "//third_party/flatbuffers/include",
93+  ]
94+  sources = [
95+    "../src/common/log.cc",
96+    "src/mindir.cc",
97+    "src/mindir_memory_manager.cc",
98+    "src/mindir_nnrt_lite_graph.cc",
99+    "src/mindir_nnrt_lite_graph_to_model.cc",
100+    "src/mindir_tensor.cc",
101+    "src/utils.cc",
102+  ]
103+  external_deps = [
104+    "c_utils:utils",
105+    "drivers_interface_nnrt:libnnrt_proxy_1.0",
106+    "hdf_core:libhdi",
107+    "hilog_native:libhilog",
108+    "ipc:ipc_core",
109+  ]
110+  configs = ["../:disable_android"]
111+  defines = [ "MS_COMPILE_OHOS" ]
112+  deps = [ "//drivers/interface/nnrt/v1_0:nnrt_idl_headers" ]
113+  output_name = "mindir"
114+  innerapi_tags = [ "platformsdk_indirect"]
115+  part_name = "mindspore"
116+}
117diff --git a/mindspore/lite/mindir/CMakeLists.txt b/mindspore/lite/mindir/CMakeLists.txt
118new file mode 100644
119index 00000000..42b89711
120--- /dev/null
121+++ b/mindspore/lite/mindir/CMakeLists.txt
122@@ -0,0 +1,31 @@
123+#set(CMAKE_TOOLCHAIN_FILE /heaven/wty/devtools/ohos_sdk/native/build/cmake/ohos.toolchain.cmake)
124+set(OHOS_ARCH arm64-v8a)
125+set(OHOS_STL c++_static)
126+set(OHOS_PLATFORM rk3568)
127+set(CMAKE_CXX_COMPILER /usr/bin/g++)
128+project(mindir)
129+cmake_minimum_required(VERSION 3.18)
130+
131+file(GLOB source src/*.cc)
132+file(GLOB convert_source src/converter/*.cpp)
133+set(mindir_source ../src/common/log.cc)
134+include_directories(include)
135+include_directories(inner_headers)
136+include_directories(../)
137+include_directories(../../../../../out/rk3568/gen/drivers/interface)
138+include_directories(../../../../../third_party/flatbuffers/include)
139+include_directories(../../core)
140+
141+include_directories(../../foundation/communication/ipc/interfaces/innerkits/ipc_core/include)
142+include_directories(../../../../../utils/native/base/include)
143+add_compile_definitions(MINDIR_INTERFACE)
144+add_library(mindir SHARED ${source} ${convert_source} ${mindir_source})
145+target_link_libraries(mindir ../../../../../out/rk3568/hdf/drivers_interface_nnrt/libnnrt_proxy_1.0.z.so
146+        ../../../../../out/rk3568/commonlibrary/c_utils/libutils.z.so
147+        hilog_ndk.z
148+        ../../../../../out/rk3568/communication/ipc/libipc_core.z.so
149+        )
150+file(GLOB test_sources tests/*.cc)
151+
152+add_executable(mindir_test ${test_sources})
153+target_link_libraries(mindir_test mindir)
154diff --git a/mindspore/lite/mindir/include/mindir.h b/mindspore/lite/mindir/include/mindir.h
155new file mode 100644
156index 00000000..73cd6898
157--- /dev/null
158+++ b/mindspore/lite/mindir/include/mindir.h
159@@ -0,0 +1,423 @@
160+/**
161+ * Copyright 2021 Huawei Technologies Co., Ltd
162+ *
163+ * Licensed under the Apache License, Version 2.0 (the "License");
164+ * you may not use this file except in compliance with the License.
165+ * You may obtain a copy of the License at
166+ *
167+ * http://www.apache.org/licenses/LICENSE-2.0
168+ *
169+ * Unless required by applicable law or agreed to in writing, software
170+ * distributed under the License is distributed on an "AS IS" BASIS,
171+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
172+ * See the License for the specific language governing permissions and
173+ * limitations under the License.
174+ */
175+#ifndef MINDSPORE_LITE_MINDIR_H
176+#define MINDSPORE_LITE_MINDIR_H
177+#include "mindir_types.h"
178+#include "mindir_lite_graph.h"
179+#include "mindir_tensor.h"
180+#include "mindir_primitive.h"
181+namespace OHOS {
182+namespace HDI {
183+namespace Nnrt {
184+namespace V1_0 {
185+struct Model;
186+}  // namespace V1_0
187+}  // namespace Nnrt
188+}  // namespace HDI
189+}  // namespace OHOS
190+
191+namespace mindspore {
192+namespace lite {
193+
194+// ********** Model **********
195+OHOS::HDI::Nnrt::V1_0::Model *MindIR_LiteGraph_To_Model(const LiteGraph *lite_graph,
196+                                                        const OHOS::HDI::Nnrt::V1_0::SharedBuffer &buffer);
197+void MindIR_Model_Destroy(OHOS::HDI::Nnrt::V1_0::Model **model);
198+
199+// ********** Activation **********
200+PrimitivePtr MindIR_Activation_CreatePrimitive(ActivationType activation_type, float alpha, float min_val,
201+                                               float max_val, bool approximate);
202+ActivationType MindIR_Activation_GetActivationType(ConstPrimitivePtr primitive);
203+void MindIR_Activation_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type);
204+float MindIR_Activation_GetAlpha(ConstPrimitivePtr primitive);
205+void MindIR_Activation_SetAlpha(PrimitivePtr *primitive, float alpha);
206+float MindIR_Activation_GetMinVal(ConstPrimitivePtr primitive);
207+void MindIR_Activation_SetMinVal(PrimitivePtr *primitive, float min_val);
208+float MindIR_Activation_GetMaxVal(ConstPrimitivePtr primitive);
209+void MindIR_Activation_SetMaxVal(PrimitivePtr *primitive, float max_val);
210+bool MindIR_Activation_GetApproximate(ConstPrimitivePtr primitive);
211+void MindIR_Activation_SetApproximate(PrimitivePtr *primitive, bool approximate);
212+
213+// ********** AddFusion **********
214+PrimitivePtr MindIR_AddFusion_CreatePrimitive(ActivationType activation_type);
215+ActivationType MindIR_AddFusion_GetActivationType(ConstPrimitivePtr primitive);
216+void MindIR_AddFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type);
217+
218+// ********** ArgMaxFusion **********
219+PrimitivePtr MindIR_ArgMaxFusion_CreatePrimitive(int64_t axis, int64_t top_k, bool keep_dims, bool out_max_value);
220+int64_t MindIR_ArgMaxFusion_GetAxis(ConstPrimitivePtr primitive);
221+void MindIR_ArgMaxFusion_SetAxis(PrimitivePtr *primitive, int64_t axis);
222+int64_t MindIR_ArgMaxFusion_GetTopK(ConstPrimitivePtr primitive);
223+void MindIR_ArgMaxFusion_SetTopK(PrimitivePtr *primitive, int64_t top_k);
224+bool MindIR_ArgMaxFusion_GetKeepDims(ConstPrimitivePtr primitive);
225+void MindIR_ArgMaxFusion_SetKeepDims(PrimitivePtr *primitive, bool keep_dims);
226+bool MindIR_ArgMaxFusion_GetOutMaxValue(ConstPrimitivePtr primitive);
227+void MindIR_ArgMaxFusion_SetOutMaxValue(PrimitivePtr *primitive, bool out_max_value);
228+
229+// ********** AvgPoolFusion **********
230+PrimitivePtr MindIR_AvgPoolFusion_CreatePrimitive(const std::vector<int64_t> &kernel_size,
231+                                                  const std::vector<int64_t> &strides, const std::vector<int64_t> &pad,
232+                                                  PadMode pad_mode, RoundMode round_mode, Format format, bool global,
233+                                                  ActivationType activation_type);
234+std::vector<int64_t> MindIR_AvgPoolFusion_GetKernelSize(ConstPrimitivePtr primitive);
235+void MindIR_AvgPoolFusion_SetKernelSize(PrimitivePtr *primitive, const std::vector<int64_t> &kernel_size);
236+std::vector<int64_t> MindIR_AvgPoolFusion_GetStrides(ConstPrimitivePtr primitive);
237+void MindIR_AvgPoolFusion_SetStrides(PrimitivePtr *primitive, const std::vector<int64_t> &strides);
238+std::vector<int64_t> MindIR_AvgPoolFusion_GetPad(ConstPrimitivePtr primitive);
239+void MindIR_AvgPoolFusion_SetPad(PrimitivePtr *primitive, const std::vector<int64_t> &pad);
240+PadMode MindIR_AvgPoolFusion_GetPadMode(ConstPrimitivePtr primitive);
241+void MindIR_AvgPoolFusion_SetPadMode(PrimitivePtr *primitive, PadMode pad_mode);
242+RoundMode MindIR_AvgPoolFusion_GetRoundMode(ConstPrimitivePtr primitive);
243+void MindIR_AvgPoolFusion_SetRoundMode(PrimitivePtr *primitive, RoundMode round_mode);
244+Format MindIR_AvgPoolFusion_GetFormat(ConstPrimitivePtr primitive);
245+void MindIR_AvgPoolFusion_SetFormat(PrimitivePtr *primitive, Format format);
246+bool MindIR_AvgPoolFusion_GetGlobal(ConstPrimitivePtr primitive);
247+void MindIR_AvgPoolFusion_SetGlobal(PrimitivePtr *primitive, bool global);
248+ActivationType MindIR_AvgPoolFusion_GetActivationType(ConstPrimitivePtr primitive);
249+void MindIR_AvgPoolFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type);
250+
251+// ********** BatchToSpaceND **********
252+PrimitivePtr MindIR_BatchToSpaceND_CreatePrimitive(const std::vector<int64_t> &block_shape,
253+                                                   const std::vector<std::vector<int64_t>> &crops);
254+std::vector<int64_t> MindIR_BatchToSpaceND_GetBlockShape(ConstPrimitivePtr primitive);
255+void MindIR_BatchToSpaceND_SetBlockShape(PrimitivePtr *primitive, const std::vector<int64_t> &block_shape);
256+std::vector<std::vector<int64_t>> MindIR_BatchToSpaceND_GetCrops(ConstPrimitivePtr primitive);
257+void MindIR_BatchToSpaceND_SetCrops(PrimitivePtr *primitive, const std::vector<std::vector<int64_t>> &crops);
258+
259+// ********** BiasAdd **********
260+PrimitivePtr MindIR_BiasAdd_CreatePrimitive();
261+
262+// ********** Cast **********
263+PrimitivePtr MindIR_Cast_CreatePrimitive();
264+
265+// ********** Concat **********
266+PrimitivePtr MindIR_Concat_CreatePrimitive(int64_t axis);
267+int64_t MindIR_Concat_GetAxis(ConstPrimitivePtr primitive);
268+void MindIR_Concat_SetAxis(PrimitivePtr *primitive, int64_t axis);
269+
270+// ********** Conv2DFusion **********
271+PrimitivePtr MindIR_Conv2DFusion_CreatePrimitive(const std::vector<int64_t> &kernel_size,
272+                                                 const std::vector<int64_t> &stride,
273+                                                 const std::vector<int64_t> &dilation, PadMode pad_mode,
274+                                                 const std::vector<int64_t> &pad_list, int64_t group,
275+                                                 int64_t in_channel, int64_t out_channel,
276+                                                 ActivationType activation_type);
277+std::vector<int64_t> MindIR_Conv2DFusion_GetKernelSize(ConstPrimitivePtr primitive);
278+void MindIR_Conv2DFusion_SetKernelSize(PrimitivePtr *primitive, const std::vector<int64_t> &kernel_size);
279+std::vector<int64_t> MindIR_Conv2DFusion_GetStride(ConstPrimitivePtr primitive);
280+void MindIR_Conv2DFusion_SetStride(PrimitivePtr *primitive, const std::vector<int64_t> &stride);
281+std::vector<int64_t> MindIR_Conv2DFusion_GetDilation(ConstPrimitivePtr primitive);
282+void MindIR_Conv2DFusion_SetDilation(PrimitivePtr *primitive, const std::vector<int64_t> &dilation);
283+PadMode MindIR_Conv2DFusion_GetPadMode(ConstPrimitivePtr primitive);
284+void MindIR_Conv2DFusion_SetPadMode(PrimitivePtr *primitive, PadMode pad_mode);
285+std::vector<int64_t> MindIR_Conv2DFusion_GetPadList(ConstPrimitivePtr primitive);
286+void MindIR_Conv2DFusion_SetPadList(PrimitivePtr *primitive, const std::vector<int64_t> &pad_list);
287+int64_t MindIR_Conv2DFusion_GetGroup(ConstPrimitivePtr primitive);
288+void MindIR_Conv2DFusion_SetGroup(PrimitivePtr *primitive, int64_t group);
289+int64_t MindIR_Conv2DFusion_GetInChannel(ConstPrimitivePtr primitive);
290+void MindIR_Conv2DFusion_SetInChannel(PrimitivePtr *primitive, int64_t in_channel);
291+int64_t MindIR_Conv2DFusion_GetOutChannel(ConstPrimitivePtr primitive);
292+void MindIR_Conv2DFusion_SetOutChannel(PrimitivePtr *primitive, int64_t out_channel);
293+ActivationType MindIR_Conv2DFusion_GetActivationType(ConstPrimitivePtr primitive);
294+void MindIR_Conv2DFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type);
295+
296+// ********** Conv2dTransposeFusion **********
297+PrimitivePtr MindIR_Conv2dTransposeFusion_CreatePrimitive(
298+  const std::vector<int64_t> &kernel_size, const std::vector<int64_t> &stride, const std::vector<int64_t> &dilation,
299+  PadMode pad_mode, const std::vector<int64_t> &pad_list, int64_t group, int64_t in_channel, int64_t out_channel,
300+  ActivationType activation_type, const std::vector<int64_t> &output_paddings);
301+std::vector<int64_t> MindIR_Conv2dTransposeFusion_GetKernelSize(ConstPrimitivePtr primitive);
302+void MindIR_Conv2dTransposeFusion_SetKernelSize(PrimitivePtr *primitive, const std::vector<int64_t> &kernel_size);
303+std::vector<int64_t> MindIR_Conv2dTransposeFusion_GetStride(ConstPrimitivePtr primitive);
304+void MindIR_Conv2dTransposeFusion_SetStride(PrimitivePtr *primitive, const std::vector<int64_t> &stride);
305+std::vector<int64_t> MindIR_Conv2dTransposeFusion_GetDilation(ConstPrimitivePtr primitive);
306+void MindIR_Conv2dTransposeFusion_SetDilation(PrimitivePtr *primitive, const std::vector<int64_t> &dilation);
307+PadMode MindIR_Conv2dTransposeFusion_GetPadMode(ConstPrimitivePtr primitive);
308+void MindIR_Conv2dTransposeFusion_SetPadMode(PrimitivePtr *primitive, PadMode pad_mode);
309+std::vector<int64_t> MindIR_Conv2dTransposeFusion_GetPadList(ConstPrimitivePtr primitive);
310+void MindIR_Conv2dTransposeFusion_SetPadList(PrimitivePtr *primitive, const std::vector<int64_t> &pad_list);
311+int64_t MindIR_Conv2dTransposeFusion_GetGroup(ConstPrimitivePtr primitive);
312+void MindIR_Conv2dTransposeFusion_SetGroup(PrimitivePtr *primitive, int64_t group);
313+int64_t MindIR_Conv2dTransposeFusion_GetInChannel(ConstPrimitivePtr primitive);
314+void MindIR_Conv2dTransposeFusion_SetInChannel(PrimitivePtr *primitive, int64_t in_channel);
315+int64_t MindIR_Conv2dTransposeFusion_GetOutChannel(ConstPrimitivePtr primitive);
316+void MindIR_Conv2dTransposeFusion_SetOutChannel(PrimitivePtr *primitive, int64_t out_channel);
317+ActivationType MindIR_Conv2dTransposeFusion_GetActivationType(ConstPrimitivePtr primitive);
318+void MindIR_Conv2dTransposeFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type);
319+std::vector<int64_t> MindIR_Conv2dTransposeFusion_GetOutputPaddings(ConstPrimitivePtr primitive);
320+void MindIR_Conv2dTransposeFusion_SetOutputPaddings(PrimitivePtr *primitive,
321+                                                    const std::vector<int64_t> &output_paddings);
322+
323+// ********** DivFusion **********
324+PrimitivePtr MindIR_DivFusion_CreatePrimitive(ActivationType activation_type);
325+ActivationType MindIR_DivFusion_GetActivationType(ConstPrimitivePtr primitive);
326+void MindIR_DivFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type);
327+
328+// ********** Eltwise **********
329+PrimitivePtr MindIR_Eltwise_CreatePrimitive(EltwiseMode mode);
330+EltwiseMode MindIR_Eltwise_GetMode(ConstPrimitivePtr primitive);
331+void MindIR_Eltwise_SetMode(PrimitivePtr *primitive, EltwiseMode mode);
332+
333+// ********** ExpandDims **********
334+PrimitivePtr MindIR_ExpandDims_CreatePrimitive();
335+
336+// ********** Fill **********
337+PrimitivePtr MindIR_Fill_CreatePrimitive();
338+
339+// ********** FullConnection **********
340+PrimitivePtr MindIR_FullConnection_CreatePrimitive(bool has_bias, bool use_axis, int64_t axis,
341+                                                   ActivationType activation_type);
342+bool MindIR_FullConnection_GetHasBias(ConstPrimitivePtr primitive);
343+void MindIR_FullConnection_SetHasBias(PrimitivePtr *primitive, bool has_bias);
344+bool MindIR_FullConnection_GetUseAxis(ConstPrimitivePtr primitive);
345+void MindIR_FullConnection_SetUseAxis(PrimitivePtr *primitive, bool use_axis);
346+int64_t MindIR_FullConnection_GetAxis(ConstPrimitivePtr primitive);
347+void MindIR_FullConnection_SetAxis(PrimitivePtr *primitive, int64_t axis);
348+ActivationType MindIR_FullConnection_GetActivationType(ConstPrimitivePtr primitive);
349+void MindIR_FullConnection_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type);
350+
351+// ********** FusedBatchNorm **********
352+PrimitivePtr MindIR_FusedBatchNorm_CreatePrimitive(float epsilon);
353+float MindIR_FusedBatchNorm_GetEpsilon(ConstPrimitivePtr primitive);
354+void MindIR_FusedBatchNorm_SetEpsilon(PrimitivePtr *primitive, float epsilon);
355+
356+// ********** Gather **********
357+PrimitivePtr MindIR_Gather_CreatePrimitive();
358+
359+// ********** LayerNormFusion **********
360+PrimitivePtr MindIR_LayerNormFusion_CreatePrimitive(int64_t begin_norm_axis, float epsilon, bool elementwise_affine,
361+                                                    int64_t begin_params_axis);
362+int64_t MindIR_LayerNormFusion_GetBeginNormAxis(ConstPrimitivePtr primitive);
363+void MindIR_LayerNormFusion_SetBeginNormAxis(PrimitivePtr *primitive, int64_t begin_norm_axis);
364+float MindIR_LayerNormFusion_GetEpsilon(ConstPrimitivePtr primitive);
365+void MindIR_LayerNormFusion_SetEpsilon(PrimitivePtr *primitive, float epsilon);
366+bool MindIR_LayerNormFusion_GetElementwiseAffine(ConstPrimitivePtr primitive);
367+void MindIR_LayerNormFusion_SetElementwiseAffine(PrimitivePtr *primitive, bool elementwise_affine);
368+int64_t MindIR_LayerNormFusion_GetBeginParamsAxis(ConstPrimitivePtr primitive);
369+void MindIR_LayerNormFusion_SetBeginParamsAxis(PrimitivePtr *primitive, int64_t begin_params_axis);
370+
371+// ********** LessEqual **********
372+PrimitivePtr MindIR_LessEqual_CreatePrimitive();
373+
374+// ********** MatMulFusion **********
375+PrimitivePtr MindIR_MatMulFusion_CreatePrimitive(bool transpose_a, bool transpose_b, ActivationType activation_type);
376+bool MindIR_MatMulFusion_GetTransposeA(ConstPrimitivePtr primitive);
377+void MindIR_MatMulFusion_SetTransposeA(PrimitivePtr *primitive, bool transpose_a);
378+bool MindIR_MatMulFusion_GetTransposeB(ConstPrimitivePtr primitive);
379+void MindIR_MatMulFusion_SetTransposeB(PrimitivePtr *primitive, bool transpose_b);
380+ActivationType MindIR_MatMulFusion_GetActivationType(ConstPrimitivePtr primitive);
381+void MindIR_MatMulFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type);
382+
383+// ********** Maximum **********
384+PrimitivePtr MindIR_Maximum_CreatePrimitive();
385+
386+// ********** MaxPoolFusion **********
387+PrimitivePtr MindIR_MaxPoolFusion_CreatePrimitive(const std::vector<int64_t> &kernel_size,
388+                                                  const std::vector<int64_t> &strides, const std::vector<int64_t> &pad,
389+                                                  PadMode pad_mode, Format format, bool global,
390+                                                  ActivationType activation_type);
391+std::vector<int64_t> MindIR_MaxPoolFusion_GetKernelSize(ConstPrimitivePtr primitive);
392+void MindIR_MaxPoolFusion_SetKernelSize(PrimitivePtr *primitive, const std::vector<int64_t> &kernel_size);
393+std::vector<int64_t> MindIR_MaxPoolFusion_GetStrides(ConstPrimitivePtr primitive);
394+void MindIR_MaxPoolFusion_SetStrides(PrimitivePtr *primitive, const std::vector<int64_t> &strides);
395+std::vector<int64_t> MindIR_MaxPoolFusion_GetPad(ConstPrimitivePtr primitive);
396+void MindIR_MaxPoolFusion_SetPad(PrimitivePtr *primitive, const std::vector<int64_t> &pad);
397+PadMode MindIR_MaxPoolFusion_GetPadMode(ConstPrimitivePtr primitive);
398+void MindIR_MaxPoolFusion_SetPadMode(PrimitivePtr *primitive, PadMode pad_mode);
399+Format MindIR_MaxPoolFusion_GetFormat(ConstPrimitivePtr primitive);
400+void MindIR_MaxPoolFusion_SetFormat(PrimitivePtr *primitive, Format format);
401+bool MindIR_MaxPoolFusion_GetGlobal(ConstPrimitivePtr primitive);
402+void MindIR_MaxPoolFusion_SetGlobal(PrimitivePtr *primitive, bool global);
403+ActivationType MindIR_MaxPoolFusion_GetActivationType(ConstPrimitivePtr primitive);
404+void MindIR_MaxPoolFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type);
405+
406+// ********** MulFusion **********
407+PrimitivePtr MindIR_MulFusion_CreatePrimitive(ActivationType activation_type);
408+ActivationType MindIR_MulFusion_GetActivationType(ConstPrimitivePtr primitive);
409+void MindIR_MulFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type);
410+
411+// ********** OneHot **********
412+PrimitivePtr MindIR_OneHot_CreatePrimitive(int64_t axis);
413+int64_t MindIR_OneHot_GetAxis(ConstPrimitivePtr primitive);
414+void MindIR_OneHot_SetAxis(PrimitivePtr *primitive, int64_t axis);
415+
416+// ********** PadFusion **********
417+PrimitivePtr MindIR_PadFusion_CreatePrimitive(const std::vector<std::vector<int64_t>> &paddings,
418+                                              PaddingMode padding_mode, float constant_value);
419+std::vector<std::vector<int64_t>> MindIR_PadFusion_GetPaddings(ConstPrimitivePtr primitive);
420+void MindIR_PadFusion_SetPaddings(PrimitivePtr *primitive, const std::vector<std::vector<int64_t>> &paddings);
421+PaddingMode MindIR_PadFusion_GetPaddingMode(ConstPrimitivePtr primitive);
422+void MindIR_PadFusion_SetPaddingMode(PrimitivePtr *primitive, PaddingMode padding_mode);
423+float MindIR_PadFusion_GetConstantValue(ConstPrimitivePtr primitive);
424+void MindIR_PadFusion_SetConstantValue(PrimitivePtr *primitive, float constant_value);
425+
426+// ********** PowFusion **********
427+PrimitivePtr MindIR_PowFusion_CreatePrimitive(float scale, float shift);
428+float MindIR_PowFusion_GetScale(ConstPrimitivePtr primitive);
429+void MindIR_PowFusion_SetScale(PrimitivePtr *primitive, float scale);
430+float MindIR_PowFusion_GetShift(ConstPrimitivePtr primitive);
431+void MindIR_PowFusion_SetShift(PrimitivePtr *primitive, float shift);
432+
433+// ********** PReLUFusion **********
434+PrimitivePtr MindIR_PReLUFusion_CreatePrimitive(bool channel_shared);
435+bool MindIR_PReLUFusion_GetChannelShared(ConstPrimitivePtr primitive);
436+void MindIR_PReLUFusion_SetChannelShared(PrimitivePtr *primitive, bool channel_shared);
437+
438+// ********** QuantDTypeCast **********
439+PrimitivePtr MindIR_QuantDTypeCast_CreatePrimitive(int64_t src_t, int64_t dst_t);
440+int64_t MindIR_QuantDTypeCast_GetSrcT(ConstPrimitivePtr primitive);
441+void MindIR_QuantDTypeCast_SetSrcT(PrimitivePtr *primitive, int64_t src_t);
442+int64_t MindIR_QuantDTypeCast_GetDstT(ConstPrimitivePtr primitive);
443+void MindIR_QuantDTypeCast_SetDstT(PrimitivePtr *primitive, int64_t dst_t);
444+
445+// ********** ReduceFusion **********
446+PrimitivePtr MindIR_ReduceFusion_CreatePrimitive(bool keep_dims, ReduceMode mode, bool reduce_to_end, float coeff);
447+bool MindIR_ReduceFusion_GetKeepDims(ConstPrimitivePtr primitive);
448+void MindIR_ReduceFusion_SetKeepDims(PrimitivePtr *primitive, bool keep_dims);
449+ReduceMode MindIR_ReduceFusion_GetMode(ConstPrimitivePtr primitive);
450+void MindIR_ReduceFusion_SetMode(PrimitivePtr *primitive, ReduceMode mode);
451+bool MindIR_ReduceFusion_GetReduceToEnd(ConstPrimitivePtr primitive);
452+void MindIR_ReduceFusion_SetReduceToEnd(PrimitivePtr *primitive, bool reduce_to_end);
453+float MindIR_ReduceFusion_GetCoeff(ConstPrimitivePtr primitive);
454+void MindIR_ReduceFusion_SetCoeff(PrimitivePtr *primitive, float coeff);
455+
456+// ********** Reshape **********
457+PrimitivePtr MindIR_Reshape_CreatePrimitive();
458+
459+// ********** Resize **********
460+PrimitivePtr MindIR_Resize_CreatePrimitive(ResizeMethod method, int64_t new_height, int64_t new_width,
461+                                           bool preserve_aspect_ratio,
462+                                           CoordinateTransformMode coordinate_transform_mode, float cubic_coeff,
463+                                           int64_t exclude_outside, float extrapolation_value,
464+                                           NearestMode nearest_mode);
465+ResizeMethod MindIR_Resize_GetMethod(ConstPrimitivePtr primitive);
466+void MindIR_Resize_SetMethod(PrimitivePtr *primitive, ResizeMethod method);
467+int64_t MindIR_Resize_GetNewHeight(ConstPrimitivePtr primitive);
468+void MindIR_Resize_SetNewHeight(PrimitivePtr *primitive, int64_t new_height);
469+int64_t MindIR_Resize_GetNewWidth(ConstPrimitivePtr primitive);
470+void MindIR_Resize_SetNewWidth(PrimitivePtr *primitive, int64_t new_width);
471+bool MindIR_Resize_GetPreserveAspectRatio(ConstPrimitivePtr primitive);
472+void MindIR_Resize_SetPreserveAspectRatio(PrimitivePtr *primitive, bool preserve_aspect_ratio);
473+CoordinateTransformMode MindIR_Resize_GetCoordinateTransformMode(ConstPrimitivePtr primitive);
474+void MindIR_Resize_SetCoordinateTransformMode(PrimitivePtr *primitive,
475+                                              CoordinateTransformMode coordinate_transform_mode);
476+float MindIR_Resize_GetCubicCoeff(ConstPrimitivePtr primitive);
477+void MindIR_Resize_SetCubicCoeff(PrimitivePtr *primitive, float cubic_coeff);
478+int64_t MindIR_Resize_GetExcludeOutside(ConstPrimitivePtr primitive);
479+void MindIR_Resize_SetExcludeOutside(PrimitivePtr *primitive, int64_t exclude_outside);
480+float MindIR_Resize_GetExtrapolationValue(ConstPrimitivePtr primitive);
481+void MindIR_Resize_SetExtrapolationValue(PrimitivePtr *primitive, float extrapolation_value);
482+NearestMode MindIR_Resize_GetNearestMode(ConstPrimitivePtr primitive);
483+void MindIR_Resize_SetNearestMode(PrimitivePtr *primitive, NearestMode nearest_mode);
484+
485+// ********** Rsqrt **********
486+PrimitivePtr MindIR_Rsqrt_CreatePrimitive();
487+
488+// ********** ScaleFusion **********
489+PrimitivePtr MindIR_ScaleFusion_CreatePrimitive(int64_t axis, ActivationType activation_type);
490+int64_t MindIR_ScaleFusion_GetAxis(ConstPrimitivePtr primitive);
491+void MindIR_ScaleFusion_SetAxis(PrimitivePtr *primitive, int64_t axis);
492+ActivationType MindIR_ScaleFusion_GetActivationType(ConstPrimitivePtr primitive);
493+void MindIR_ScaleFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type);
494+
495+// ********** Shape **********
496+PrimitivePtr MindIR_Shape_CreatePrimitive();
497+
498+// ********** SliceFusion **********
499+PrimitivePtr MindIR_SliceFusion_CreatePrimitive(const std::vector<int64_t> &axes);
500+std::vector<int64_t> MindIR_SliceFusion_GetAxes(ConstPrimitivePtr primitive);
501+void MindIR_SliceFusion_SetAxes(PrimitivePtr *primitive, const std::vector<int64_t> &axes);
502+
503+// ********** Softmax **********
504+PrimitivePtr MindIR_Softmax_CreatePrimitive(const std::vector<int64_t> &axis);
505+std::vector<int64_t> MindIR_Softmax_GetAxis(ConstPrimitivePtr primitive);
506+void MindIR_Softmax_SetAxis(PrimitivePtr *primitive, const std::vector<int64_t> &axis);
507+
508+// ********** SpaceToBatchND **********
509+PrimitivePtr MindIR_SpaceToBatchND_CreatePrimitive(const std::vector<int64_t> &block_shape,
510+                                                   const std::vector<std::vector<int64_t>> &paddings);
511+std::vector<int64_t> MindIR_SpaceToBatchND_GetBlockShape(ConstPrimitivePtr primitive);
512+void MindIR_SpaceToBatchND_SetBlockShape(PrimitivePtr *primitive, const std::vector<int64_t> &block_shape);
513+std::vector<std::vector<int64_t>> MindIR_SpaceToBatchND_GetPaddings(ConstPrimitivePtr primitive);
514+void MindIR_SpaceToBatchND_SetPaddings(PrimitivePtr *primitive, const std::vector<std::vector<int64_t>> &paddings);
515+
516+// ********** Split **********
517+PrimitivePtr MindIR_Split_CreatePrimitive(int64_t output_num, const std::vector<int64_t> &size_splits, int64_t axis);
518+int64_t MindIR_Split_GetOutputNum(ConstPrimitivePtr primitive);
519+void MindIR_Split_SetOutputNum(PrimitivePtr *primitive, int64_t output_num);
520+std::vector<int64_t> MindIR_Split_GetSizeSplits(ConstPrimitivePtr primitive);
521+void MindIR_Split_SetSizeSplits(PrimitivePtr *primitive, const std::vector<int64_t> &size_splits);
522+int64_t MindIR_Split_GetAxis(ConstPrimitivePtr primitive);
523+void MindIR_Split_SetAxis(PrimitivePtr *primitive, int64_t axis);
524+
525+// ********** Sqrt **********
526+PrimitivePtr MindIR_Sqrt_CreatePrimitive();
527+
528+// ********** SquaredDifference **********
529+PrimitivePtr MindIR_SquaredDifference_CreatePrimitive();
530+
531+// ********** Squeeze **********
532+PrimitivePtr MindIR_Squeeze_CreatePrimitive(const std::vector<int64_t> &axis);
533+std::vector<int64_t> MindIR_Squeeze_GetAxis(ConstPrimitivePtr primitive);
534+void MindIR_Squeeze_SetAxis(PrimitivePtr *primitive, const std::vector<int64_t> &axis);
535+
536+// ********** Stack **********
537+PrimitivePtr MindIR_Stack_CreatePrimitive(int64_t axis);
538+int64_t MindIR_Stack_GetAxis(ConstPrimitivePtr primitive);
539+void MindIR_Stack_SetAxis(PrimitivePtr *primitive, int64_t axis);
540+
541+// ********** StridedSlice **********
542+PrimitivePtr MindIR_StridedSlice_CreatePrimitive(int64_t begin_mask, int64_t end_mask, int64_t ellipsis_mask,
543+                                                 int64_t new_axis_mask, int64_t shrink_axis_mask);
544+int64_t MindIR_StridedSlice_GetBeginMask(ConstPrimitivePtr primitive);
545+void MindIR_StridedSlice_SetBeginMask(PrimitivePtr *primitive, int64_t begin_mask);
546+int64_t MindIR_StridedSlice_GetEndMask(ConstPrimitivePtr primitive);
547+void MindIR_StridedSlice_SetEndMask(PrimitivePtr *primitive, int64_t end_mask);
548+int64_t MindIR_StridedSlice_GetEllipsisMask(ConstPrimitivePtr primitive);
549+void MindIR_StridedSlice_SetEllipsisMask(PrimitivePtr *primitive, int64_t ellipsis_mask);
550+int64_t MindIR_StridedSlice_GetNewAxisMask(ConstPrimitivePtr primitive);
551+void MindIR_StridedSlice_SetNewAxisMask(PrimitivePtr *primitive, int64_t new_axis_mask);
552+int64_t MindIR_StridedSlice_GetShrinkAxisMask(ConstPrimitivePtr primitive);
553+void MindIR_StridedSlice_SetShrinkAxisMask(PrimitivePtr *primitive, int64_t shrink_axis_mask);
554+
555+// ********** SubFusion **********
556+PrimitivePtr MindIR_SubFusion_CreatePrimitive(ActivationType activation_type);
557+ActivationType MindIR_SubFusion_GetActivationType(ConstPrimitivePtr primitive);
558+void MindIR_SubFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type);
559+
560+// ********** TileFusion **********
561+PrimitivePtr MindIR_TileFusion_CreatePrimitive(const std::vector<int64_t> &dims);
562+std::vector<int64_t> MindIR_TileFusion_GetDims(ConstPrimitivePtr primitive);
563+void MindIR_TileFusion_SetDims(PrimitivePtr *primitive, const std::vector<int64_t> &dims);
564+
565+// ********** TopKFusion **********
566+PrimitivePtr MindIR_TopKFusion_CreatePrimitive(bool sorted, int64_t axis);
567+bool MindIR_TopKFusion_GetSorted(ConstPrimitivePtr primitive);
568+void MindIR_TopKFusion_SetSorted(PrimitivePtr *primitive, bool sorted);
569+int64_t MindIR_TopKFusion_GetAxis(ConstPrimitivePtr primitive);
570+void MindIR_TopKFusion_SetAxis(PrimitivePtr *primitive, int64_t axis);
571+
572+// ********** Transpose **********
573+PrimitivePtr MindIR_Transpose_CreatePrimitive();
574+
575+// ********** Unsqueeze **********
576+PrimitivePtr MindIR_Unsqueeze_CreatePrimitive(const std::vector<int64_t> &axis);
577+std::vector<int64_t> MindIR_Unsqueeze_GetAxis(ConstPrimitivePtr primitive);
578+void MindIR_Unsqueeze_SetAxis(PrimitivePtr *primitive, const std::vector<int64_t> &axis);
579+
580+}  // namespace lite
581+}  // namespace mindspore
582+#endif
583diff --git a/mindspore/lite/mindir/include/mindir_lite_graph.h b/mindspore/lite/mindir/include/mindir_lite_graph.h
584new file mode 100644
585index 00000000..24684914
586--- /dev/null
587+++ b/mindspore/lite/mindir/include/mindir_lite_graph.h
588@@ -0,0 +1,57 @@
589+#ifndef LITE_NNRT_NNRT_LITE_GRAPH_H
590+#define LITE_NNRT_NNRT_LITE_GRAPH_H
591+#include <memory>
592+#include <string>
593+#include <vector>
594+namespace mindspore {
595+namespace lite {
596+
597+typedef void *PrimitivePtr;
598+typedef const void *ConstPrimitivePtr;
599+
600+typedef void *TensorPtr;
601+typedef const void *ConstTensorPtr;
602+
603+struct LiteGraph {
604+  struct Node {
605+    std::string name_;
606+    std::string op_type_;  // hnn no use
607+    int node_type_;        // hnn no use
608+    PrimitivePtr primitive_ = nullptr;
609+    std::shared_ptr<void> base_operator_ = nullptr;  // hnn no use
610+    std::vector<uint32_t> input_indices_;
611+    std::vector<uint32_t> output_indices_;
612+    int quant_type_;
613+    int device_type_ = -1;  // hnn no use
614+  };
615+
616+  struct SubGraph {
617+    std::string name_;
618+    std::vector<uint32_t> input_indices_;
619+    std::vector<uint32_t> output_indices_;
620+    std::vector<uint32_t> node_indices_;
621+    std::vector<uint32_t> tensor_indices_;  // hnn no use
622+  };
623+
624+  std::string name_;
625+  std::string version_;  // hnn no use
626+  std::vector<uint32_t> input_indices_;
627+  std::vector<uint32_t> output_indices_;
628+  std::vector<TensorPtr> all_tensors_;
629+  std::vector<Node *> all_nodes_;
630+  std::vector<SubGraph *> sub_graphs_;
631+#ifdef ENABLE_MODEL_OBF
632+  std::vector<uint32_t> all_prims_type_;      // hnn no use
633+  std::vector<uint32_t> all_nodes_stat_;      // hnn no use
634+  bool model_obfuscated_ = false;             // hnn no use
635+  std::vector<unsigned char *> deobf_prims_;  // hnn no use
636+#endif
637+};
638+
639+void MindIR_LiteGraph_Destroy(LiteGraph **lite_graph);
640+size_t MindIR_LiteGraph_GetConstTensorSize(const LiteGraph *lite_graph);
641+
642+}  // namespace lite
643+}  // namespace mindspore
644+
645+#endif  // LITE_NNRT_NNRT_LITE_GRAPH_H
646diff --git a/mindspore/lite/mindir/include/mindir_primitive.h b/mindspore/lite/mindir/include/mindir_primitive.h
647new file mode 100644
648index 00000000..b67c608a
649--- /dev/null
650+++ b/mindspore/lite/mindir/include/mindir_primitive.h
651@@ -0,0 +1,15 @@
652+#ifndef MINDIR_MINDIR_PRIMITIVE_H
653+#define MINDIR_MINDIR_PRIMITIVE_H
654+#include "mindir_lite_graph.h"
655+#include "mindir_types.h"
656+
657+namespace mindspore {
658+namespace lite {
659+
660+// ********** PrimitiveBase **********
661+NodeType MindIR_Primitive_GetType(PrimitivePtr primitive);
662+void MindIR_Primitive_Destroy(PrimitivePtr *primitive);
663+
664+}  // namespace lite
665+}  // namespace mindspore
666+#endif  // MINDIR_MINDIR_PRIMITIVE_H
667diff --git a/mindspore/lite/mindir/include/mindir_tensor.h b/mindspore/lite/mindir/include/mindir_tensor.h
668new file mode 100644
669index 00000000..ce1b24dc
670--- /dev/null
671+++ b/mindspore/lite/mindir/include/mindir_tensor.h
672@@ -0,0 +1,45 @@
673+#ifndef LITE_TENSOR_H
674+#define LITE_TENSOR_H
675+#include "mindir_lite_graph.h"
676+#include "mindir_types.h"
677+
678+namespace OHOS {
679+namespace HDI {
680+namespace Nnrt {
681+namespace V1_0 {
682+struct SharedBuffer;
683+}  // namespace V1_0
684+}  // namespace Nnrt
685+}  // namespace HDI
686+}  // namespace OHOS
687+
688+namespace mindspore {
689+namespace lite {
690+
691+// ********** Tensor **********
692+TensorPtr MindIR_Tensor_Create();
693+TensorPtr MindIR_Tensor_Create(const std::string &name, DataType data_type, const std::vector<int32_t> &dims,
694+                               Format format, const std::vector<uint8_t> &data,
695+                               const std::vector<QuantParam> &quant_params);
696+std::string MindIR_Tensor_GetName(ConstTensorPtr tensor);
697+void MindIR_Tensor_SetName(TensorPtr *tensor, const std::string &name);
698+DataType MindIR_Tensor_GetDataType(ConstTensorPtr tensor);
699+void MindIR_Tensor_SetDataType(TensorPtr *tensor, DataType data_type);
700+std::vector<int32_t> MindIR_Tensor_GetDims(ConstTensorPtr tensor);
701+void MindIR_Tensor_SetDims(TensorPtr *tensor, const std::vector<int32_t> &dims);
702+Format MindIR_Tensor_GetFormat(ConstTensorPtr tensor);
703+void MindIR_Tensor_SetFormat(TensorPtr *tensor, Format format);
704+OHOS::HDI::Nnrt::V1_0::SharedBuffer MindIR_Tensor_GetData(ConstTensorPtr tensor,
705+                                                          const OHOS::HDI::Nnrt::V1_0::SharedBuffer &buffer,
706+                                                          uint8_t *mmap_ptr, unsigned int offset);
707+void MindIR_Tensor_SetData(TensorPtr *tensor, const std::vector<uint8_t> &data);
708+std::vector<uint8_t> MindIR_Tensor_GetData(ConstTensorPtr tensor);
709+std::vector<QuantParam> MindIR_Tensor_GetQuantParams(ConstTensorPtr tensor);
710+void MindIR_Tensor_SetQuantParams(TensorPtr *tensor, const std::vector<QuantParam> &quant_params);
711+
712+void MindIR_Tensor_Destroy(TensorPtr *tensor);
713+
714+}  // namespace lite
715+}  // namespace mindspore
716+
717+#endif  // LITE_TENSOR_H
718diff --git a/mindspore/lite/mindir/include/mindir_types.h b/mindspore/lite/mindir/include/mindir_types.h
719new file mode 100644
720index 00000000..8f2a9c70
721--- /dev/null
722+++ b/mindspore/lite/mindir/include/mindir_types.h
723@@ -0,0 +1,210 @@
724+/**
725+ * Copyright 2021 Huawei Technologies Co., Ltd
726+ *
727+ * Licensed under the Apache License, Version 2.0 (the "License");
728+ * you may not use this file except in compliance with the License.
729+ * You may obtain a copy of the License at
730+ *
731+ * http://www.apache.org/licenses/LICENSE-2.0
732+ *
733+ * Unless required by applicable law or agreed to in writing, software
734+ * distributed under the License is distributed on an "AS IS" BASIS,
735+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
736+ * See the License for the specific language governing permissions and
737+ * limitations under the License.
738+ */
739+#ifndef MINDSPORE_LITE_TYPES_H
740+#define MINDSPORE_LITE_TYPES_H
741+#include <string>
742+namespace mindspore {
743+namespace lite {
744+
745+struct QuantParam {
746+  int32_t zeroPoint;
747+  double scale;
748+  int numBits;
749+};
750+
751+enum DataType : int8_t {
752+  DATA_TYPE_UNKNOWN = 0,
753+  DATA_TYPE_BOOL = 30,
754+  DATA_TYPE_INT8 = 32,
755+  DATA_TYPE_INT16 = 33,
756+  DATA_TYPE_INT32 = 34,
757+  DATA_TYPE_INT64 = 35,
758+  DATA_TYPE_UINT8 = 37,
759+  DATA_TYPE_UINT16 = 38,
760+  DATA_TYPE_UINT32 = 39,
761+  DATA_TYPE_UINT64 = 40,
762+  DATA_TYPE_FLOAT16 = 42,
763+  DATA_TYPE_FLOAT32 = 43,
764+  DATA_TYPE_FLOAT64 = 44,
765+};
766+
767+enum Format : int8_t {
768+  FORMAT_NCHW = 0,
769+  FORMAT_NHWC = 1,
770+};
771+
772+enum QuantType : int8_t {
773+  QUANT_TYPE_NONE,
774+  QUANT_TYPE_ALL,
775+};
776+
777+enum NodeType : uint32_t {
778+  NODE_TYPE_NONE = 0,
779+  NODE_TYPE_ACTIVATION = 2,
780+  NODE_TYPE_ADD_FUSION = 5,
781+  NODE_TYPE_ARGMAX_FUSION = 11,
782+  NODE_TYPE_AVG_POOL_FUSION = 17,
783+  NODE_TYPE_BATCH_TO_SPACE_ND = 22,
784+  NODE_TYPE_BIAS_ADD = 23,
785+  NODE_TYPE_CAST = 28,
786+  NODE_TYPE_CONCAT = 31,
787+  NODE_TYPE_CONV2D_FUSION = 35,
788+  NODE_TYPE_CONV2D_TRANSPOSE_FUSION = 36,
789+  NODE_TYPE_DIV_FUSION = 47,
790+  NODE_TYPE_ELTWISE = 52,
791+  NODE_TYPE_EXPAND_DIMS = 56,
792+  NODE_TYPE_FILL = 66,
793+  NODE_TYPE_FULL_CONNECTION = 67,
794+  NODE_TYPE_FUSED_BATCH_NORM = 68,
795+  NODE_TYPE_GATHER = 69,
796+  NODE_TYPE_LAYER_NORM_FUSION = 75,
797+  NODE_TYPE_LESS_EQUAL = 78,
798+  NODE_TYPE_MATMUL_FUSION = 89,
799+  NODE_TYPE_MAXIMUM = 90,
800+  NODE_TYPE_MAX_POOL_FUSION = 92,
801+  NODE_TYPE_MUL_FUSION = 99,
802+  NODE_TYPE_ONE_HOT = 105,
803+  NODE_TYPE_PAD_FUSION = 107,
804+  NODE_TYPE_POW_FUSION = 110,
805+  NODE_TYPE_PRELU_FUSION = 112,
806+  NODE_TYPE_QUANT_DTYPE_CAST = 113,
807+  NODE_TYPE_REDUCE_FUSION = 118,
808+  NODE_TYPE_RESHAPE = 119,
809+  NODE_TYPE_RESIZE = 120,
810+  NODE_TYPE_RSQRT = 126,
811+  NODE_TYPE_SCALE_FUSION = 127,
812+  NODE_TYPE_SHAPE = 130,
813+  NODE_TYPE_SLICE_FUSION = 135,
814+  NODE_TYPE_SOFTMAX = 138,
815+  NODE_TYPE_SPACE_TO_BATCH_ND = 141,
816+  NODE_TYPE_SPLIT = 145,
817+  NODE_TYPE_SQRT = 146,
818+  NODE_TYPE_SQUEEZE = 147,
819+  NODE_TYPE_SQUARED_DIFFERENCE = 149,
820+  NODE_TYPE_STACK = 150,
821+  NODE_TYPE_STRIDED_SLICE = 151,
822+  NODE_TYPE_SUB_FUSION = 152,
823+  NODE_TYPE_TILE_FUSION = 160,
824+  NODE_TYPE_TOPK_FUSION = 161,
825+  NODE_TYPE_TRANSPOSE = 162,
826+  NODE_TYPE_UNSQUEEZE = 165,
827+};
828+
829+enum ResizeMethod : int8_t {
830+  RESIZE_METHOD_UNKNOWN = -1,
831+  RESIZE_METHOD_LINEAR = 0,
832+  RESIZE_METHOD_NEAREST = 1,
833+  RESIZE_METHOD_CUBIC = 2,
834+};
835+
836+enum CoordinateTransformMode : int8_t {
837+  COORDINATE_TRANSFORM_MODE_ASYMMETRIC = 0,
838+  COORDINATE_TRANSFORM_MODE_ALIGN_CORNERS = 1,
839+  COORDINATE_TRANSFORM_MODE_HALF_PIXEL = 2,
840+};
841+
842+enum NearestMode : int8_t {
843+  NEAREST_MODE_NORMAL = 0,
844+  NEAREST_MODE_ROUND_HALF_DOWN = 1,
845+  NEAREST_MODE_ROUND_HALF_UP = 2,
846+  NEAREST_MODE_FLOOR = 3,
847+  NEAREST_MODE_CEIL = 4,
848+};
849+
850+enum ActivationType : int8_t {
851+  ACTIVATION_TYPE_NO_ACTIVATION = 0,
852+  ACTIVATION_TYPE_RELU = 1,
853+  ACTIVATION_TYPE_SIGMOID = 2,
854+  ACTIVATION_TYPE_RELU6 = 3,
855+  ACTIVATION_TYPE_ELU = 4,
856+  ACTIVATION_TYPE_LEAKY_RELU = 5,
857+  ACTIVATION_TYPE_ABS = 6,
858+  ACTIVATION_TYPE_RELU1 = 7,
859+  ACTIVATION_TYPE_SOFTSIGN = 8,
860+  ACTIVATION_TYPE_SOFTPLUS = 9,
861+  ACTIVATION_TYPE_TANH = 10,
862+  ACTIVATION_TYPE_SELU = 11,
863+  ACTIVATION_TYPE_HSWISH = 12,
864+  ACTIVATION_TYPE_HSIGMOID = 13,
865+  ACTIVATION_TYPE_THRESHOLDRELU = 14,
866+  ACTIVATION_TYPE_LINEAR = 15,
867+  ACTIVATION_TYPE_HARD_TANH = 16,
868+  ACTIVATION_TYPE_SIGN = 17,
869+  ACTIVATION_TYPE_SWISH = 18,
870+  ACTIVATION_TYPE_GELU = 19,
871+  ACTIVATION_TYPE_UNKNOWN = 20,
872+};
873+
874+enum ReduceMode : int8_t {
875+  REDUCE_MODE_MEAN = 0,
876+  REDUCE_MODE_MAX = 1,
877+  REDUCE_MODE_MIN = 2,
878+  REDUCE_MODE_PROD = 3,
879+  REDUCE_MODE_SUM = 4,
880+  REDUCE_MODE_SUM_SQUARE = 5,
881+  REDUCE_MODE_ASUM = 6,
882+  REDUCE_MODE_ALL = 7,
883+};
884+
885+enum PoolMode : int8_t {
886+  POOL_MODE_MAX_POOLING = 0,
887+  POOL_MODE_MEAN_POOLING = 1,
888+};
889+
890+enum EltwiseMode : int8_t {
891+  ELTWISE_MODE_PROD = 0,
892+  ELTWISE_MODE_SUM = 1,
893+  ELTWISE_MODE_MAXIMUM = 2,
894+  ELTWISE_MODE_UNKNOWN = 3,
895+};
896+
897+enum PadMode : int8_t {
898+  PAD_MODE_PAD = 0,
899+  PAD_MODE_SAME = 1,
900+  PAD_MODE_VALID = 2,
901+};
902+
903+enum RoundMode : int8_t {
904+  ROUND_MODE_FLOOR = 0,
905+  ROUND_MODE_CEIL = 1,
906+};
907+
908+enum PaddingMode : int8_t {
909+  PADDING_MODE_CONSTANT = 0,
910+  PADDING_MODE_REFLECT = 1,
911+  PADDING_MODE_SYMMETRIC = 2,
912+  PADDING_MODE_RESERVED = 3,
913+};
914+
915+enum LshProjectionType : int8_t {
916+  UNKNOWN = 0,
917+  SPARSE = 1,
918+  DENSE = 2,
919+};
920+
921+enum Reduction : int8_t {
922+  REDUCTION_SUM = 0,
923+  MEAN = 1,
924+  NONE = 2,
925+};
926+
927+struct Attribute {
928+  std::string name;
929+  uint32_t data;
930+};
931+}  // namespace lite
932+}  // namespace mindspore
933+#endif  // MIDIR_LITE_TYPES_H
934diff --git a/mindspore/lite/mindir/inner_headers/lite_graph.h b/mindspore/lite/mindir/inner_headers/lite_graph.h
935new file mode 100644
936index 00000000..f2599cc9
937--- /dev/null
938+++ b/mindspore/lite/mindir/inner_headers/lite_graph.h
939@@ -0,0 +1,27 @@
940+/**
941+ * Copyright 2021 Huawei Technologies Co., Ltd
942+ *
943+ * Licensed under the Apache License, Version 2.0 (the "License");
944+ * you may not use this file except in compliance with the License.
945+ * You may obtain a copy of the License at
946+ *
947+ * http://www.apache.org/licenses/LICENSE-2.0
948+ *
949+ * Unless required by applicable law or agreed to in writing, software
950+ * distributed under the License is distributed on an "AS IS" BASIS,
951+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
952+ * See the License for the specific language governing permissions and
953+ * limitations under the License.
954+ */
955+#ifndef MIDIR_LITE_LITE_GRAPH_H
956+#define MIDIR_LITE_LITE_GRAPH_H
957+#include <vector>
958+
959+namespace mindspore {
960+namespace lite {
961+
962+std::vector<int8_t> Convert(NodeType type, PrimitivePtr primitive);
963+
964+}  // namespace lite
965+}  // namespace mindspore
966+#endif  // MIDIR_LITE_LITE_GRAPH_H
967diff --git a/mindspore/lite/mindir/inner_headers/mindir_memory_manager.h b/mindspore/lite/mindir/inner_headers/mindir_memory_manager.h
968new file mode 100644
969index 00000000..29ef0b31
970--- /dev/null
971+++ b/mindspore/lite/mindir/inner_headers/mindir_memory_manager.h
972@@ -0,0 +1,33 @@
973+#ifndef LITE_MINDIR_MEMORY_MANAGER_H
974+#define LITE_MINDIR_MEMORY_MANAGER_H
975+#include <memory>
976+#include <vector>
977+#include <map>
978+#include <mutex>
979+#include "include/errorcode.h"
980+#include "schema/model_generated.h"
981+#include "mindir_lite_graph.h"
982+// using namespace OHOS::HDI::Nnrt::V1_0;
983+
984+namespace mindspore {
985+namespace lite {
986+class MindIRMemoryManager {
987+ public:
988+  static MindIRMemoryManager *GetInstance();
989+  ~MindIRMemoryManager() = default;
990+  void *CreateTensorFromBuilder(flatbuffers::FlatBufferBuilder &fbb_new, schema::Tensor *tensor);
991+  void DeleteTensor(schema::Tensor *tensor);
992+  void *CreatePrimitiveFromBuilder(flatbuffers::FlatBufferBuilder &fbb_new, schema::Primitive *primitive);
993+  void DeletePrimitive(schema::Primitive *primitive);
994+  void ClearAllMemory();
995+
996+ private:
997+  MindIRMemoryManager() = default;
998+  static void *CopyFbbToNewMemory(flatbuffers::FlatBufferBuilder &fbb_new);
999+  std::map<schema::Primitive *, PrimitivePtr> primitive_map;
1000+  std::map<schema::Tensor *, TensorPtr> tensor_map;
1001+  std::mutex mutex;
1002+};
1003+}  // namespace lite
1004+}  // namespace mindspore
1005+#endif  // LITE_MINDIR_MEMORY_MANAGER_H
1006diff --git a/mindspore/lite/mindir/inner_headers/utils.h b/mindspore/lite/mindir/inner_headers/utils.h
1007new file mode 100644
1008index 00000000..0e6eb35d
1009--- /dev/null
1010+++ b/mindspore/lite/mindir/inner_headers/utils.h
1011@@ -0,0 +1,28 @@
1012+#ifndef MIDIR_LITE_UTILS_H
1013+#define MIDIR_LITE_UTILS_H
1014+#include "mindir_types.h"
1015+#include "mindir_lite_graph.h"
1016+#include "schema/model_generated.h"
1017+namespace mindspore {
1018+namespace lite {
1019+
1020+// ********** PrimitiveBase **********
1021+PrimitivePtr MindIR_CreatePrimitiveFromBuilder(flatbuffers::FlatBufferBuilder &fbb);
1022+
1023+flatbuffers::Offset<schema::Vec2D> CreateVec2D(flatbuffers::FlatBufferBuilder &fbb,
1024+                                               const std::vector<std::vector<int64_t>> &data);
1025+flatbuffers::Offset<schema::Vec2D> CreateVec2D(flatbuffers::FlatBufferBuilder &fbb,
1026+                                               const mindspore::schema::Vec2D *data);
1027+
1028+mindspore::schema::PrimitiveType MindIR_GetPrimitiveType(PrimitivePtr prim);
1029+
1030+flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<schema::QuantParam>>> ConvertQuantParams(
1031+  flatbuffers::FlatBufferBuilder &fbb, const std::vector<QuantParam> &quant_params);
1032+
1033+flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<schema::QuantParam>>> ConvertQuantParams(
1034+  flatbuffers::FlatBufferBuilder &fbb,
1035+  const flatbuffers::Vector<flatbuffers::Offset<mindspore::schema::QuantParam>> *quant_params);
1036+
1037+}  // namespace lite
1038+}  // namespace mindspore
1039+#endif  // MIDIR_LITE_UTILS_H
1040diff --git a/mindspore/lite/mindir/src/mindir.cc b/mindspore/lite/mindir/src/mindir.cc
1041new file mode 100644
1042index 00000000..c2a1cd3f
1043--- /dev/null
1044+++ b/mindspore/lite/mindir/src/mindir.cc
1045@@ -0,0 +1,4258 @@
1046+/**
1047+ * Copyright 2021 Huawei Technologies Co., Ltd
1048+ *
1049+ * Licensed under the Apache License, Version 2.0 (the "License");
1050+ * you may not use this file except in compliance with the License.
1051+ * You may obtain a copy of the License at
1052+ *
1053+ * http://www.apache.org/licenses/LICENSE-2.0
1054+ *
1055+ * Unless required by applicable law or agreed to in writing, software
1056+ * distributed under the License is distributed on an "AS IS" BASIS,
1057+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1058+ * See the License for the specific language governing permissions and
1059+ * limitations under the License.
1060+ */
1061+#include "mindir.h"
1062+#include "utils.h"
1063+#include "schema/model_generated.h"
1064+#include "mindir_memory_manager.h"
1065+//----TODO---write an example to run MindIRMemoryManager
1066+namespace mindspore {
1067+namespace lite {
1068+
1069+// ********** Activation **********
1070+PrimitivePtr MindIR_Activation_CreatePrimitive(ActivationType activation_type, float alpha, float min_val,
1071+                                               float max_val, bool approximate) {
1072+  flatbuffers::FlatBufferBuilder fbb;
1073+  auto ops_offset = schema::CreateActivation(fbb, static_cast<schema::ActivationType>(activation_type), alpha, min_val,
1074+                                             max_val, approximate);
1075+  auto prim_offset =
1076+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_ACTIVATION), ops_offset.o);
1077+  fbb.Finish(prim_offset);
1078+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
1079+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1080+  return ret_value;
1081+}
1082+ActivationType MindIR_Activation_GetActivationType(ConstPrimitivePtr primitive) {
1083+  if (primitive != nullptr) {
1084+    auto prim = static_cast<const schema::Primitive *>(primitive);
1085+    auto value = prim->value_as_Activation();
1086+    if (prim != nullptr && value != nullptr) {
1087+      return static_cast<ActivationType>(value->activation_type());
1088+    } else {
1089+      ActivationType en = static_cast<ActivationType>(0);
1090+      return en;
1091+    }
1092+  } else {
1093+    ActivationType en = static_cast<ActivationType>(0);
1094+    return en;
1095+  }
1096+}
1097+
1098+void MindIR_Activation_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type) {
1099+  if (primitive != nullptr && *primitive != nullptr) {
1100+    auto prim = static_cast<schema::Primitive *>(*primitive);
1101+    auto value = prim->value_as_Activation();
1102+    if (prim != nullptr && value != nullptr) {
1103+      flatbuffers::FlatBufferBuilder fbb;
1104+      auto ops_offset =
1105+        schema::CreateActivation(fbb, static_cast<schema::ActivationType>(activation_type), value->alpha(),
1106+                                 value->min_val(), value->max_val(), value->approximate());
1107+      auto prim_offset =
1108+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_ACTIVATION), ops_offset.o);
1109+      fbb.Finish(prim_offset);
1110+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
1111+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1112+      *primitive = ret_value;
1113+    }
1114+  }
1115+}
1116+float MindIR_Activation_GetAlpha(ConstPrimitivePtr primitive) {
1117+  if (primitive != nullptr) {
1118+    auto prim = static_cast<const schema::Primitive *>(primitive);
1119+    auto value = prim->value_as_Activation();
1120+    if (prim != nullptr && value != nullptr) {
1121+      return value->alpha();
1122+    } else {
1123+      return .0;
1124+    }
1125+  } else {
1126+    return .0;
1127+  }
1128+}
1129+
1130+void MindIR_Activation_SetAlpha(PrimitivePtr *primitive, float alpha) {
1131+  if (primitive != nullptr && *primitive != nullptr) {
1132+    auto prim = static_cast<schema::Primitive *>(*primitive);
1133+    auto value = prim->value_as_Activation();
1134+    if (prim != nullptr && value != nullptr) {
1135+      flatbuffers::FlatBufferBuilder fbb;
1136+      auto ops_offset = schema::CreateActivation(fbb, static_cast<schema::ActivationType>(value->activation_type()),
1137+                                                 alpha, value->min_val(), value->max_val(), value->approximate());
1138+      auto prim_offset =
1139+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_ACTIVATION), ops_offset.o);
1140+      fbb.Finish(prim_offset);
1141+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
1142+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1143+      *primitive = ret_value;
1144+    }
1145+  }
1146+}
1147+float MindIR_Activation_GetMinVal(ConstPrimitivePtr primitive) {
1148+  if (primitive != nullptr) {
1149+    auto prim = static_cast<const schema::Primitive *>(primitive);
1150+    auto value = prim->value_as_Activation();
1151+    if (prim != nullptr && value != nullptr) {
1152+      return value->min_val();
1153+    } else {
1154+      return .0;
1155+    }
1156+  } else {
1157+    return .0;
1158+  }
1159+}
1160+
1161+void MindIR_Activation_SetMinVal(PrimitivePtr *primitive, float min_val) {
1162+  if (primitive != nullptr && *primitive != nullptr) {
1163+    auto prim = static_cast<schema::Primitive *>(*primitive);
1164+    auto value = prim->value_as_Activation();
1165+    if (prim != nullptr && value != nullptr) {
1166+      flatbuffers::FlatBufferBuilder fbb;
1167+      auto ops_offset = schema::CreateActivation(fbb, static_cast<schema::ActivationType>(value->activation_type()),
1168+                                                 value->alpha(), min_val, value->max_val(), value->approximate());
1169+      auto prim_offset =
1170+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_ACTIVATION), ops_offset.o);
1171+      fbb.Finish(prim_offset);
1172+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
1173+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1174+      *primitive = ret_value;
1175+    }
1176+  }
1177+}
1178+float MindIR_Activation_GetMaxVal(ConstPrimitivePtr primitive) {
1179+  if (primitive != nullptr) {
1180+    auto prim = static_cast<const schema::Primitive *>(primitive);
1181+    auto value = prim->value_as_Activation();
1182+    if (prim != nullptr && value != nullptr) {
1183+      return value->max_val();
1184+    } else {
1185+      return .0;
1186+    }
1187+  } else {
1188+    return .0;
1189+  }
1190+}
1191+
1192+void MindIR_Activation_SetMaxVal(PrimitivePtr *primitive, float max_val) {
1193+  if (primitive != nullptr && *primitive != nullptr) {
1194+    auto prim = static_cast<schema::Primitive *>(*primitive);
1195+    auto value = prim->value_as_Activation();
1196+    if (prim != nullptr && value != nullptr) {
1197+      flatbuffers::FlatBufferBuilder fbb;
1198+      auto ops_offset = schema::CreateActivation(fbb, static_cast<schema::ActivationType>(value->activation_type()),
1199+                                                 value->alpha(), value->min_val(), max_val, value->approximate());
1200+      auto prim_offset =
1201+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_ACTIVATION), ops_offset.o);
1202+      fbb.Finish(prim_offset);
1203+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
1204+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1205+      *primitive = ret_value;
1206+    }
1207+  }
1208+}
1209+bool MindIR_Activation_GetApproximate(ConstPrimitivePtr primitive) {
1210+  if (primitive != nullptr) {
1211+    auto prim = static_cast<const schema::Primitive *>(primitive);
1212+    auto value = prim->value_as_Activation();
1213+    if (prim != nullptr && value != nullptr) {
1214+      return value->approximate();
1215+    } else {
1216+      return false;
1217+    }
1218+  } else {
1219+    return false;
1220+  }
1221+}
1222+
1223+void MindIR_Activation_SetApproximate(PrimitivePtr *primitive, bool approximate) {
1224+  if (primitive != nullptr && *primitive != nullptr) {
1225+    auto prim = static_cast<schema::Primitive *>(*primitive);
1226+    auto value = prim->value_as_Activation();
1227+    if (prim != nullptr && value != nullptr) {
1228+      flatbuffers::FlatBufferBuilder fbb;
1229+      auto ops_offset = schema::CreateActivation(fbb, static_cast<schema::ActivationType>(value->activation_type()),
1230+                                                 value->alpha(), value->min_val(), value->max_val(), approximate);
1231+      auto prim_offset =
1232+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_ACTIVATION), ops_offset.o);
1233+      fbb.Finish(prim_offset);
1234+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
1235+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1236+      *primitive = ret_value;
1237+    }
1238+  }
1239+}
1240+
1241+// ********** AddFusion **********
1242+PrimitivePtr MindIR_AddFusion_CreatePrimitive(ActivationType activation_type) {
1243+  flatbuffers::FlatBufferBuilder fbb;
1244+  auto ops_offset = schema::CreateAddFusion(fbb, static_cast<schema::ActivationType>(activation_type));
1245+  auto prim_offset =
1246+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_ADD_FUSION), ops_offset.o);
1247+  fbb.Finish(prim_offset);
1248+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
1249+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1250+  return ret_value;
1251+}
1252+ActivationType MindIR_AddFusion_GetActivationType(ConstPrimitivePtr primitive) {
1253+  if (primitive != nullptr) {
1254+    auto prim = static_cast<const schema::Primitive *>(primitive);
1255+    auto value = prim->value_as_AddFusion();
1256+    if (prim != nullptr && value != nullptr) {
1257+      return static_cast<ActivationType>(value->activation_type());
1258+    } else {
1259+      ActivationType en = static_cast<ActivationType>(0);
1260+      return en;
1261+    }
1262+  } else {
1263+    ActivationType en = static_cast<ActivationType>(0);
1264+    return en;
1265+  }
1266+}
1267+
1268+void MindIR_AddFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type) {
1269+  if (primitive != nullptr && *primitive != nullptr) {
1270+    auto prim = static_cast<schema::Primitive *>(*primitive);
1271+    auto value = prim->value_as_AddFusion();
1272+    if (prim != nullptr && value != nullptr) {
1273+      flatbuffers::FlatBufferBuilder fbb;
1274+      auto ops_offset = schema::CreateAddFusion(fbb, static_cast<schema::ActivationType>(activation_type));
1275+      auto prim_offset =
1276+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_ADD_FUSION), ops_offset.o);
1277+      fbb.Finish(prim_offset);
1278+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
1279+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1280+      *primitive = ret_value;
1281+    }
1282+  }
1283+}
1284+
1285+// ********** ArgMaxFusion **********
1286+PrimitivePtr MindIR_ArgMaxFusion_CreatePrimitive(int64_t axis, int64_t top_k, bool keep_dims, bool out_max_value) {
1287+  flatbuffers::FlatBufferBuilder fbb;
1288+  auto ops_offset = schema::CreateArgMaxFusion(fbb, axis, top_k, keep_dims, out_max_value);
1289+  auto prim_offset =
1290+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_ARGMAX_FUSION), ops_offset.o);
1291+  fbb.Finish(prim_offset);
1292+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
1293+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1294+  return ret_value;
1295+}
1296+int64_t MindIR_ArgMaxFusion_GetAxis(ConstPrimitivePtr primitive) {
1297+  if (primitive != nullptr) {
1298+    auto prim = static_cast<const schema::Primitive *>(primitive);
1299+    auto value = prim->value_as_ArgMaxFusion();
1300+    if (prim != nullptr && value != nullptr) {
1301+      return value->axis();
1302+    } else {
1303+      return 0;
1304+    }
1305+  } else {
1306+    return 0;
1307+  }
1308+}
1309+
1310+void MindIR_ArgMaxFusion_SetAxis(PrimitivePtr *primitive, int64_t axis) {
1311+  if (primitive != nullptr && *primitive != nullptr) {
1312+    auto prim = static_cast<schema::Primitive *>(*primitive);
1313+    auto value = prim->value_as_ArgMaxFusion();
1314+    if (prim != nullptr && value != nullptr) {
1315+      flatbuffers::FlatBufferBuilder fbb;
1316+      auto ops_offset =
1317+        schema::CreateArgMaxFusion(fbb, axis, value->top_k(), value->keep_dims(), value->out_max_value());
1318+      auto prim_offset =
1319+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_ARGMAX_FUSION), ops_offset.o);
1320+      fbb.Finish(prim_offset);
1321+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
1322+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1323+      *primitive = ret_value;
1324+    }
1325+  }
1326+}
1327+int64_t MindIR_ArgMaxFusion_GetTopK(ConstPrimitivePtr primitive) {
1328+  if (primitive != nullptr) {
1329+    auto prim = static_cast<const schema::Primitive *>(primitive);
1330+    auto value = prim->value_as_ArgMaxFusion();
1331+    if (prim != nullptr && value != nullptr) {
1332+      return value->top_k();
1333+    } else {
1334+      return 0;
1335+    }
1336+  } else {
1337+    return 0;
1338+  }
1339+}
1340+
1341+void MindIR_ArgMaxFusion_SetTopK(PrimitivePtr *primitive, int64_t top_k) {
1342+  if (primitive != nullptr && *primitive != nullptr) {
1343+    auto prim = static_cast<schema::Primitive *>(*primitive);
1344+    auto value = prim->value_as_ArgMaxFusion();
1345+    if (prim != nullptr && value != nullptr) {
1346+      flatbuffers::FlatBufferBuilder fbb;
1347+      auto ops_offset =
1348+        schema::CreateArgMaxFusion(fbb, value->axis(), top_k, value->keep_dims(), value->out_max_value());
1349+      auto prim_offset =
1350+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_ARGMAX_FUSION), ops_offset.o);
1351+      fbb.Finish(prim_offset);
1352+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
1353+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1354+      *primitive = ret_value;
1355+    }
1356+  }
1357+}
1358+bool MindIR_ArgMaxFusion_GetKeepDims(ConstPrimitivePtr primitive) {
1359+  if (primitive != nullptr) {
1360+    auto prim = static_cast<const schema::Primitive *>(primitive);
1361+    auto value = prim->value_as_ArgMaxFusion();
1362+    if (prim != nullptr && value != nullptr) {
1363+      return value->keep_dims();
1364+    } else {
1365+      return false;
1366+    }
1367+  } else {
1368+    return false;
1369+  }
1370+}
1371+
1372+void MindIR_ArgMaxFusion_SetKeepDims(PrimitivePtr *primitive, bool keep_dims) {
1373+  if (primitive != nullptr && *primitive != nullptr) {
1374+    auto prim = static_cast<schema::Primitive *>(*primitive);
1375+    auto value = prim->value_as_ArgMaxFusion();
1376+    if (prim != nullptr && value != nullptr) {
1377+      flatbuffers::FlatBufferBuilder fbb;
1378+      auto ops_offset =
1379+        schema::CreateArgMaxFusion(fbb, value->axis(), value->top_k(), keep_dims, value->out_max_value());
1380+      auto prim_offset =
1381+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_ARGMAX_FUSION), ops_offset.o);
1382+      fbb.Finish(prim_offset);
1383+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
1384+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1385+      *primitive = ret_value;
1386+    }
1387+  }
1388+}
1389+bool MindIR_ArgMaxFusion_GetOutMaxValue(ConstPrimitivePtr primitive) {
1390+  if (primitive != nullptr) {
1391+    auto prim = static_cast<const schema::Primitive *>(primitive);
1392+    auto value = prim->value_as_ArgMaxFusion();
1393+    if (prim != nullptr && value != nullptr) {
1394+      return value->out_max_value();
1395+    } else {
1396+      return false;
1397+    }
1398+  } else {
1399+    return false;
1400+  }
1401+}
1402+
1403+void MindIR_ArgMaxFusion_SetOutMaxValue(PrimitivePtr *primitive, bool out_max_value) {
1404+  if (primitive != nullptr && *primitive != nullptr) {
1405+    auto prim = static_cast<schema::Primitive *>(*primitive);
1406+    auto value = prim->value_as_ArgMaxFusion();
1407+    if (prim != nullptr && value != nullptr) {
1408+      flatbuffers::FlatBufferBuilder fbb;
1409+      auto ops_offset =
1410+        schema::CreateArgMaxFusion(fbb, value->axis(), value->top_k(), value->keep_dims(), out_max_value);
1411+      auto prim_offset =
1412+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_ARGMAX_FUSION), ops_offset.o);
1413+      fbb.Finish(prim_offset);
1414+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
1415+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1416+      *primitive = ret_value;
1417+    }
1418+  }
1419+}
1420+
1421+// ********** AvgPoolFusion **********
1422+PrimitivePtr MindIR_AvgPoolFusion_CreatePrimitive(const std::vector<int64_t> &kernel_size,
1423+                                                  const std::vector<int64_t> &strides, const std::vector<int64_t> &pad,
1424+                                                  PadMode pad_mode, RoundMode round_mode, Format format, bool global,
1425+                                                  ActivationType activation_type) {
1426+  flatbuffers::FlatBufferBuilder fbb;
1427+  auto ops_offset = schema::CreateAvgPoolFusion(
1428+    fbb, fbb.CreateVector(kernel_size.data(), kernel_size.size()), fbb.CreateVector(strides.data(), strides.size()),
1429+    fbb.CreateVector(pad.data(), pad.size()), static_cast<schema::PadMode>(pad_mode),
1430+    static_cast<schema::RoundMode>(round_mode), static_cast<schema::Format>(format), global,
1431+    static_cast<schema::ActivationType>(activation_type));
1432+  auto prim_offset =
1433+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_AVG_POOL_FUSION), ops_offset.o);
1434+  fbb.Finish(prim_offset);
1435+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
1436+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1437+  return ret_value;
1438+}
1439+std::vector<int64_t> MindIR_AvgPoolFusion_GetKernelSize(ConstPrimitivePtr primitive) {
1440+  if (primitive != nullptr) {
1441+    auto prim = static_cast<const schema::Primitive *>(primitive);
1442+    auto value = prim->value_as_AvgPoolFusion();
1443+    if (prim != nullptr && value != nullptr) {
1444+      std::vector<int64_t> result;
1445+      auto src = value->kernel_size();
1446+      result.resize(src->size());
1447+      std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; });
1448+      return result;
1449+    } else {
1450+      return {};
1451+    }
1452+  } else {
1453+    return {};
1454+  }
1455+}
1456+
1457+void MindIR_AvgPoolFusion_SetKernelSize(PrimitivePtr *primitive, const std::vector<int64_t> &kernel_size) {
1458+  if (primitive != nullptr && *primitive != nullptr) {
1459+    auto prim = static_cast<schema::Primitive *>(*primitive);
1460+    auto value = prim->value_as_AvgPoolFusion();
1461+    if (prim != nullptr && value != nullptr) {
1462+      flatbuffers::FlatBufferBuilder fbb;
1463+      auto ops_offset = schema::CreateAvgPoolFusion(
1464+        fbb, fbb.CreateVector(kernel_size.data(), kernel_size.size()),
1465+        fbb.CreateVector(value->strides()->data(), value->strides()->size()),
1466+        fbb.CreateVector(value->pad()->data(), value->pad()->size()), static_cast<schema::PadMode>(value->pad_mode()),
1467+        static_cast<schema::RoundMode>(value->round_mode()), static_cast<schema::Format>(value->format()),
1468+        value->global(), static_cast<schema::ActivationType>(value->activation_type()));
1469+      auto prim_offset =
1470+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_AVG_POOL_FUSION), ops_offset.o);
1471+      fbb.Finish(prim_offset);
1472+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
1473+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1474+      *primitive = ret_value;
1475+    }
1476+  }
1477+}
1478+std::vector<int64_t> MindIR_AvgPoolFusion_GetStrides(ConstPrimitivePtr primitive) {
1479+  if (primitive != nullptr) {
1480+    auto prim = static_cast<const schema::Primitive *>(primitive);
1481+    auto value = prim->value_as_AvgPoolFusion();
1482+    if (prim != nullptr && value != nullptr) {
1483+      std::vector<int64_t> result;
1484+      auto src = value->strides();
1485+      result.resize(src->size());
1486+      std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; });
1487+      return result;
1488+    } else {
1489+      return {};
1490+    }
1491+  } else {
1492+    return {};
1493+  }
1494+}
1495+
1496+void MindIR_AvgPoolFusion_SetStrides(PrimitivePtr *primitive, const std::vector<int64_t> &strides) {
1497+  if (primitive != nullptr && *primitive != nullptr) {
1498+    auto prim = static_cast<schema::Primitive *>(*primitive);
1499+    auto value = prim->value_as_AvgPoolFusion();
1500+    if (prim != nullptr && value != nullptr) {
1501+      flatbuffers::FlatBufferBuilder fbb;
1502+      auto ops_offset = schema::CreateAvgPoolFusion(
1503+        fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
1504+        fbb.CreateVector(strides.data(), strides.size()), fbb.CreateVector(value->pad()->data(), value->pad()->size()),
1505+        static_cast<schema::PadMode>(value->pad_mode()), static_cast<schema::RoundMode>(value->round_mode()),
1506+        static_cast<schema::Format>(value->format()), value->global(),
1507+        static_cast<schema::ActivationType>(value->activation_type()));
1508+      auto prim_offset =
1509+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_AVG_POOL_FUSION), ops_offset.o);
1510+      fbb.Finish(prim_offset);
1511+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
1512+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1513+      *primitive = ret_value;
1514+    }
1515+  }
1516+}
1517+std::vector<int64_t> MindIR_AvgPoolFusion_GetPad(ConstPrimitivePtr primitive) {
1518+  if (primitive != nullptr) {
1519+    auto prim = static_cast<const schema::Primitive *>(primitive);
1520+    auto value = prim->value_as_AvgPoolFusion();
1521+    if (prim != nullptr && value != nullptr) {
1522+      std::vector<int64_t> result;
1523+      auto src = value->pad();
1524+      result.resize(src->size());
1525+      std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; });
1526+      return result;
1527+    } else {
1528+      return {};
1529+    }
1530+  } else {
1531+    return {};
1532+  }
1533+}
1534+
1535+void MindIR_AvgPoolFusion_SetPad(PrimitivePtr *primitive, const std::vector<int64_t> &pad) {
1536+  if (primitive != nullptr && *primitive != nullptr) {
1537+    auto prim = static_cast<schema::Primitive *>(*primitive);
1538+    auto value = prim->value_as_AvgPoolFusion();
1539+    if (prim != nullptr && value != nullptr) {
1540+      flatbuffers::FlatBufferBuilder fbb;
1541+      auto ops_offset = schema::CreateAvgPoolFusion(
1542+        fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
1543+        fbb.CreateVector(value->strides()->data(), value->strides()->size()), fbb.CreateVector(pad.data(), pad.size()),
1544+        static_cast<schema::PadMode>(value->pad_mode()), static_cast<schema::RoundMode>(value->round_mode()),
1545+        static_cast<schema::Format>(value->format()), value->global(),
1546+        static_cast<schema::ActivationType>(value->activation_type()));
1547+      auto prim_offset =
1548+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_AVG_POOL_FUSION), ops_offset.o);
1549+      fbb.Finish(prim_offset);
1550+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
1551+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1552+      *primitive = ret_value;
1553+    }
1554+  }
1555+}
1556+PadMode MindIR_AvgPoolFusion_GetPadMode(ConstPrimitivePtr primitive) {
1557+  if (primitive != nullptr) {
1558+    auto prim = static_cast<const schema::Primitive *>(primitive);
1559+    auto value = prim->value_as_AvgPoolFusion();
1560+    if (prim != nullptr && value != nullptr) {
1561+      return static_cast<PadMode>(value->pad_mode());
1562+    } else {
1563+      PadMode en = static_cast<PadMode>(0);
1564+      return en;
1565+    }
1566+  } else {
1567+    PadMode en = static_cast<PadMode>(0);
1568+    return en;
1569+  }
1570+}
1571+
1572+void MindIR_AvgPoolFusion_SetPadMode(PrimitivePtr *primitive, PadMode pad_mode) {
1573+  if (primitive != nullptr && *primitive != nullptr) {
1574+    auto prim = static_cast<schema::Primitive *>(*primitive);
1575+    auto value = prim->value_as_AvgPoolFusion();
1576+    if (prim != nullptr && value != nullptr) {
1577+      flatbuffers::FlatBufferBuilder fbb;
1578+      auto ops_offset = schema::CreateAvgPoolFusion(
1579+        fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
1580+        fbb.CreateVector(value->strides()->data(), value->strides()->size()),
1581+        fbb.CreateVector(value->pad()->data(), value->pad()->size()), static_cast<schema::PadMode>(pad_mode),
1582+        static_cast<schema::RoundMode>(value->round_mode()), static_cast<schema::Format>(value->format()),
1583+        value->global(), static_cast<schema::ActivationType>(value->activation_type()));
1584+      auto prim_offset =
1585+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_AVG_POOL_FUSION), ops_offset.o);
1586+      fbb.Finish(prim_offset);
1587+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
1588+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1589+      *primitive = ret_value;
1590+    }
1591+  }
1592+}
1593+RoundMode MindIR_AvgPoolFusion_GetRoundMode(ConstPrimitivePtr primitive) {
1594+  if (primitive != nullptr) {
1595+    auto prim = static_cast<const schema::Primitive *>(primitive);
1596+    auto value = prim->value_as_AvgPoolFusion();
1597+    if (prim != nullptr && value != nullptr) {
1598+      return static_cast<RoundMode>(value->round_mode());
1599+    } else {
1600+      RoundMode en = static_cast<RoundMode>(0);
1601+      return en;
1602+    }
1603+  } else {
1604+    RoundMode en = static_cast<RoundMode>(0);
1605+    return en;
1606+  }
1607+}
1608+
1609+void MindIR_AvgPoolFusion_SetRoundMode(PrimitivePtr *primitive, RoundMode round_mode) {
1610+  if (primitive != nullptr && *primitive != nullptr) {
1611+    auto prim = static_cast<schema::Primitive *>(*primitive);
1612+    auto value = prim->value_as_AvgPoolFusion();
1613+    if (prim != nullptr && value != nullptr) {
1614+      flatbuffers::FlatBufferBuilder fbb;
1615+      auto ops_offset = schema::CreateAvgPoolFusion(
1616+        fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
1617+        fbb.CreateVector(value->strides()->data(), value->strides()->size()),
1618+        fbb.CreateVector(value->pad()->data(), value->pad()->size()), static_cast<schema::PadMode>(value->pad_mode()),
1619+        static_cast<schema::RoundMode>(round_mode), static_cast<schema::Format>(value->format()), value->global(),
1620+        static_cast<schema::ActivationType>(value->activation_type()));
1621+      auto prim_offset =
1622+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_AVG_POOL_FUSION), ops_offset.o);
1623+      fbb.Finish(prim_offset);
1624+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
1625+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1626+      *primitive = ret_value;
1627+    }
1628+  }
1629+}
1630+Format MindIR_AvgPoolFusion_GetFormat(ConstPrimitivePtr primitive) {
1631+  if (primitive != nullptr) {
1632+    auto prim = static_cast<const schema::Primitive *>(primitive);
1633+    auto value = prim->value_as_AvgPoolFusion();
1634+    if (prim != nullptr && value != nullptr) {
1635+      return static_cast<Format>(value->format());
1636+    } else {
1637+      Format en = static_cast<Format>(0);
1638+      return en;
1639+    }
1640+  } else {
1641+    Format en = static_cast<Format>(0);
1642+    return en;
1643+  }
1644+}
1645+
1646+void MindIR_AvgPoolFusion_SetFormat(PrimitivePtr *primitive, Format format) {
1647+  if (primitive != nullptr && *primitive != nullptr) {
1648+    auto prim = static_cast<schema::Primitive *>(*primitive);
1649+    auto value = prim->value_as_AvgPoolFusion();
1650+    if (prim != nullptr && value != nullptr) {
1651+      flatbuffers::FlatBufferBuilder fbb;
1652+      auto ops_offset = schema::CreateAvgPoolFusion(
1653+        fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
1654+        fbb.CreateVector(value->strides()->data(), value->strides()->size()),
1655+        fbb.CreateVector(value->pad()->data(), value->pad()->size()), static_cast<schema::PadMode>(value->pad_mode()),
1656+        static_cast<schema::RoundMode>(value->round_mode()), static_cast<schema::Format>(format), value->global(),
1657+        static_cast<schema::ActivationType>(value->activation_type()));
1658+      auto prim_offset =
1659+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_AVG_POOL_FUSION), ops_offset.o);
1660+      fbb.Finish(prim_offset);
1661+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
1662+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1663+      *primitive = ret_value;
1664+    }
1665+  }
1666+}
1667+bool MindIR_AvgPoolFusion_GetGlobal(ConstPrimitivePtr primitive) {
1668+  if (primitive != nullptr) {
1669+    auto prim = static_cast<const schema::Primitive *>(primitive);
1670+    auto value = prim->value_as_AvgPoolFusion();
1671+    if (prim != nullptr && value != nullptr) {
1672+      return value->global();
1673+    } else {
1674+      return false;
1675+    }
1676+  } else {
1677+    return false;
1678+  }
1679+}
1680+
1681+void MindIR_AvgPoolFusion_SetGlobal(PrimitivePtr *primitive, bool global) {
1682+  if (primitive != nullptr && *primitive != nullptr) {
1683+    auto prim = static_cast<schema::Primitive *>(*primitive);
1684+    auto value = prim->value_as_AvgPoolFusion();
1685+    if (prim != nullptr && value != nullptr) {
1686+      flatbuffers::FlatBufferBuilder fbb;
1687+      auto ops_offset = schema::CreateAvgPoolFusion(
1688+        fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
1689+        fbb.CreateVector(value->strides()->data(), value->strides()->size()),
1690+        fbb.CreateVector(value->pad()->data(), value->pad()->size()), static_cast<schema::PadMode>(value->pad_mode()),
1691+        static_cast<schema::RoundMode>(value->round_mode()), static_cast<schema::Format>(value->format()), global,
1692+        static_cast<schema::ActivationType>(value->activation_type()));
1693+      auto prim_offset =
1694+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_AVG_POOL_FUSION), ops_offset.o);
1695+      fbb.Finish(prim_offset);
1696+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
1697+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1698+      *primitive = ret_value;
1699+    }
1700+  }
1701+}
1702+ActivationType MindIR_AvgPoolFusion_GetActivationType(ConstPrimitivePtr primitive) {
1703+  if (primitive != nullptr) {
1704+    auto prim = static_cast<const schema::Primitive *>(primitive);
1705+    auto value = prim->value_as_AvgPoolFusion();
1706+    if (prim != nullptr && value != nullptr) {
1707+      return static_cast<ActivationType>(value->activation_type());
1708+    } else {
1709+      ActivationType en = static_cast<ActivationType>(0);
1710+      return en;
1711+    }
1712+  } else {
1713+    ActivationType en = static_cast<ActivationType>(0);
1714+    return en;
1715+  }
1716+}
1717+
1718+void MindIR_AvgPoolFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type) {
1719+  if (primitive != nullptr && *primitive != nullptr) {
1720+    auto prim = static_cast<schema::Primitive *>(*primitive);
1721+    auto value = prim->value_as_AvgPoolFusion();
1722+    if (prim != nullptr && value != nullptr) {
1723+      flatbuffers::FlatBufferBuilder fbb;
1724+      auto ops_offset = schema::CreateAvgPoolFusion(
1725+        fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
1726+        fbb.CreateVector(value->strides()->data(), value->strides()->size()),
1727+        fbb.CreateVector(value->pad()->data(), value->pad()->size()), static_cast<schema::PadMode>(value->pad_mode()),
1728+        static_cast<schema::RoundMode>(value->round_mode()), static_cast<schema::Format>(value->format()),
1729+        value->global(), static_cast<schema::ActivationType>(activation_type));
1730+      auto prim_offset =
1731+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_AVG_POOL_FUSION), ops_offset.o);
1732+      fbb.Finish(prim_offset);
1733+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
1734+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1735+      *primitive = ret_value;
1736+    }
1737+  }
1738+}
1739+
1740+// ********** BatchToSpaceND **********
1741+PrimitivePtr MindIR_BatchToSpaceND_CreatePrimitive(const std::vector<int64_t> &block_shape,
1742+                                                   const std::vector<std::vector<int64_t>> &crops) {
1743+  flatbuffers::FlatBufferBuilder fbb;
1744+  auto ops_offset = schema::CreateBatchToSpaceND(fbb, fbb.CreateVector(block_shape.data(), block_shape.size()),
1745+                                                 CreateVec2D(fbb, crops));
1746+  auto prim_offset =
1747+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_BATCH_TO_SPACE_ND), ops_offset.o);
1748+  fbb.Finish(prim_offset);
1749+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
1750+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1751+  return ret_value;
1752+}
1753+std::vector<int64_t> MindIR_BatchToSpaceND_GetBlockShape(ConstPrimitivePtr primitive) {
1754+  if (primitive != nullptr) {
1755+    auto prim = static_cast<const schema::Primitive *>(primitive);
1756+    auto value = prim->value_as_BatchToSpaceND();
1757+    if (prim != nullptr && value != nullptr) {
1758+      std::vector<int64_t> result;
1759+      auto src = value->block_shape();
1760+      result.resize(src->size());
1761+      std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; });
1762+      return result;
1763+    } else {
1764+      return {};
1765+    }
1766+  } else {
1767+    return {};
1768+  }
1769+}
1770+
1771+void MindIR_BatchToSpaceND_SetBlockShape(PrimitivePtr *primitive, const std::vector<int64_t> &block_shape) {
1772+  if (primitive != nullptr && *primitive != nullptr) {
1773+    auto prim = static_cast<schema::Primitive *>(*primitive);
1774+    auto value = prim->value_as_BatchToSpaceND();
1775+    if (prim != nullptr && value != nullptr) {
1776+      flatbuffers::FlatBufferBuilder fbb;
1777+      auto ops_offset = schema::CreateBatchToSpaceND(fbb, fbb.CreateVector(block_shape.data(), block_shape.size()),
1778+                                                     CreateVec2D(fbb, value->crops()));
1779+      auto prim_offset =
1780+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_BATCH_TO_SPACE_ND), ops_offset.o);
1781+      fbb.Finish(prim_offset);
1782+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
1783+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1784+      *primitive = ret_value;
1785+    }
1786+  }
1787+}
1788+std::vector<std::vector<int64_t>> MindIR_BatchToSpaceND_GetCrops(ConstPrimitivePtr primitive) {
1789+  if (primitive != nullptr) {
1790+    auto prim = static_cast<const schema::Primitive *>(primitive);
1791+    auto value = prim->value_as_BatchToSpaceND();
1792+    if (prim != nullptr && value != nullptr) {
1793+      std::vector<std::vector<int64_t>> out;
1794+      auto src = value->crops();
1795+      for (auto sub_list : *src->data()) {
1796+        std::vector<int64_t> result_tmp;
1797+        result_tmp.resize(sub_list->data()->size());
1798+        std::transform(sub_list->data()->begin(), sub_list->data()->end(), result_tmp.begin(),
1799+                       [](int64_t item) { return item; });
1800+        out.emplace_back(result_tmp);
1801+      }
1802+      return out;
1803+    } else {
1804+      return {};
1805+    }
1806+  } else {
1807+    return {};
1808+  }
1809+}
1810+
1811+void MindIR_BatchToSpaceND_SetCrops(PrimitivePtr *primitive, const std::vector<std::vector<int64_t>> &crops) {
1812+  if (primitive != nullptr && *primitive != nullptr) {
1813+    auto prim = static_cast<schema::Primitive *>(*primitive);
1814+    auto value = prim->value_as_BatchToSpaceND();
1815+    if (prim != nullptr && value != nullptr) {
1816+      flatbuffers::FlatBufferBuilder fbb;
1817+      auto ops_offset = schema::CreateBatchToSpaceND(
1818+        fbb, fbb.CreateVector(value->block_shape()->data(), value->block_shape()->size()), CreateVec2D(fbb, crops));
1819+      auto prim_offset =
1820+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_BATCH_TO_SPACE_ND), ops_offset.o);
1821+      fbb.Finish(prim_offset);
1822+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
1823+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1824+      *primitive = ret_value;
1825+    }
1826+  }
1827+}
1828+
1829+// ********** BiasAdd **********
1830+PrimitivePtr MindIR_BiasAdd_CreatePrimitive() {
1831+  flatbuffers::FlatBufferBuilder fbb;
1832+  auto ops_offset = schema::CreateBiasAdd(fbb);
1833+  auto prim_offset = schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_BIAS_ADD), ops_offset.o);
1834+  fbb.Finish(prim_offset);
1835+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
1836+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1837+  return ret_value;
1838+}
1839+
1840+// ********** Cast **********
1841+PrimitivePtr MindIR_Cast_CreatePrimitive() {
1842+  flatbuffers::FlatBufferBuilder fbb;
1843+  auto ops_offset = schema::CreateCast(fbb);
1844+  auto prim_offset = schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_CAST), ops_offset.o);
1845+  fbb.Finish(prim_offset);
1846+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
1847+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1848+  return ret_value;
1849+}
1850+
1851+// ********** Concat **********
1852+PrimitivePtr MindIR_Concat_CreatePrimitive(int64_t axis) {
1853+  flatbuffers::FlatBufferBuilder fbb;
1854+  auto ops_offset = schema::CreateConcat(fbb, axis);
1855+  auto prim_offset = schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_CONCAT), ops_offset.o);
1856+  fbb.Finish(prim_offset);
1857+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
1858+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1859+  return ret_value;
1860+}
1861+int64_t MindIR_Concat_GetAxis(ConstPrimitivePtr primitive) {
1862+  if (primitive != nullptr) {
1863+    auto prim = static_cast<const schema::Primitive *>(primitive);
1864+    auto value = prim->value_as_Concat();
1865+    if (prim != nullptr && value != nullptr) {
1866+      return value->axis();
1867+    } else {
1868+      return 0;
1869+    }
1870+  } else {
1871+    return 0;
1872+  }
1873+}
1874+
1875+void MindIR_Concat_SetAxis(PrimitivePtr *primitive, int64_t axis) {
1876+  if (primitive != nullptr && *primitive != nullptr) {
1877+    auto prim = static_cast<schema::Primitive *>(*primitive);
1878+    auto value = prim->value_as_Concat();
1879+    if (prim != nullptr && value != nullptr) {
1880+      flatbuffers::FlatBufferBuilder fbb;
1881+      auto ops_offset = schema::CreateConcat(fbb, axis);
1882+      auto prim_offset =
1883+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_CONCAT), ops_offset.o);
1884+      fbb.Finish(prim_offset);
1885+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
1886+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1887+      *primitive = ret_value;
1888+    }
1889+  }
1890+}
1891+
1892+// ********** Conv2DFusion **********
1893+PrimitivePtr MindIR_Conv2DFusion_CreatePrimitive(const std::vector<int64_t> &kernel_size,
1894+                                                 const std::vector<int64_t> &stride,
1895+                                                 const std::vector<int64_t> &dilation, PadMode pad_mode,
1896+                                                 const std::vector<int64_t> &pad_list, int64_t group,
1897+                                                 int64_t in_channel, int64_t out_channel,
1898+                                                 ActivationType activation_type) {
1899+  flatbuffers::FlatBufferBuilder fbb;
1900+  auto ops_offset = schema::CreateConv2DFusion(
1901+    fbb, mindspore::schema::Format_NCHW, fbb.CreateVector(kernel_size.data(), kernel_size.size()),
1902+    fbb.CreateVector(stride.data(), stride.size()), fbb.CreateVector(dilation.data(), dilation.size()),
1903+    static_cast<schema::PadMode>(pad_mode), fbb.CreateVector(pad_list.data(), pad_list.size()), 0, group, in_channel,
1904+    out_channel, static_cast<schema::ActivationType>(activation_type));
1905+  auto prim_offset =
1906+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_CONV2D_FUSION), ops_offset.o);
1907+  fbb.Finish(prim_offset);
1908+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
1909+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1910+  return ret_value;
1911+}
1912+std::vector<int64_t> MindIR_Conv2DFusion_GetKernelSize(ConstPrimitivePtr primitive) {
1913+  if (primitive != nullptr) {
1914+    auto prim = static_cast<const schema::Primitive *>(primitive);
1915+    auto value = prim->value_as_Conv2DFusion();
1916+    if (prim != nullptr && value != nullptr) {
1917+      std::vector<int64_t> result;
1918+      auto src = value->kernel_size();
1919+      result.resize(src->size());
1920+      std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; });
1921+      return result;
1922+    } else {
1923+      return {};
1924+    }
1925+  } else {
1926+    return {};
1927+  }
1928+}
1929+
1930+void MindIR_Conv2DFusion_SetKernelSize(PrimitivePtr *primitive, const std::vector<int64_t> &kernel_size) {
1931+  if (primitive != nullptr && *primitive != nullptr) {
1932+    auto prim = static_cast<schema::Primitive *>(*primitive);
1933+    auto value = prim->value_as_Conv2DFusion();
1934+    if (prim != nullptr && value != nullptr) {
1935+      flatbuffers::FlatBufferBuilder fbb;
1936+      auto ops_offset = schema::CreateConv2DFusion(
1937+        fbb, mindspore::schema::Format_NCHW, fbb.CreateVector(kernel_size.data(), kernel_size.size()),
1938+        fbb.CreateVector(value->stride()->data(), value->stride()->size()),
1939+        fbb.CreateVector(value->dilation()->data(), value->dilation()->size()),
1940+        static_cast<schema::PadMode>(value->pad_mode()),
1941+        fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), value->in_channel(),
1942+        value->out_channel(), static_cast<schema::ActivationType>(value->activation_type()));
1943+      auto prim_offset =
1944+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_CONV2D_FUSION), ops_offset.o);
1945+      fbb.Finish(prim_offset);
1946+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
1947+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1948+      *primitive = ret_value;
1949+    }
1950+  }
1951+}
1952+std::vector<int64_t> MindIR_Conv2DFusion_GetStride(ConstPrimitivePtr primitive) {
1953+  if (primitive != nullptr) {
1954+    auto prim = static_cast<const schema::Primitive *>(primitive);
1955+    auto value = prim->value_as_Conv2DFusion();
1956+    if (prim != nullptr && value != nullptr) {
1957+      std::vector<int64_t> result;
1958+      auto src = value->stride();
1959+      result.resize(src->size());
1960+      std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; });
1961+      return result;
1962+    } else {
1963+      return {};
1964+    }
1965+  } else {
1966+    return {};
1967+  }
1968+}
1969+
1970+void MindIR_Conv2DFusion_SetStride(PrimitivePtr *primitive, const std::vector<int64_t> &stride) {
1971+  if (primitive != nullptr && *primitive != nullptr) {
1972+    auto prim = static_cast<schema::Primitive *>(*primitive);
1973+    auto value = prim->value_as_Conv2DFusion();
1974+    if (prim != nullptr && value != nullptr) {
1975+      flatbuffers::FlatBufferBuilder fbb;
1976+      auto ops_offset = schema::CreateConv2DFusion(
1977+        fbb, mindspore::schema::Format_NCHW,
1978+        fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
1979+        fbb.CreateVector(stride.data(), stride.size()),
1980+        fbb.CreateVector(value->dilation()->data(), value->dilation()->size()),
1981+        static_cast<schema::PadMode>(value->pad_mode()),
1982+        fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), value->in_channel(),
1983+        value->out_channel(), static_cast<schema::ActivationType>(value->activation_type()));
1984+      auto prim_offset =
1985+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_CONV2D_FUSION), ops_offset.o);
1986+      fbb.Finish(prim_offset);
1987+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
1988+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
1989+      *primitive = ret_value;
1990+    }
1991+  }
1992+}
1993+std::vector<int64_t> MindIR_Conv2DFusion_GetDilation(ConstPrimitivePtr primitive) {
1994+  if (primitive != nullptr) {
1995+    auto prim = static_cast<const schema::Primitive *>(primitive);
1996+    auto value = prim->value_as_Conv2DFusion();
1997+    if (prim != nullptr && value != nullptr) {
1998+      std::vector<int64_t> result;
1999+      auto src = value->dilation();
2000+      result.resize(src->size());
2001+      std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; });
2002+      return result;
2003+    } else {
2004+      return {};
2005+    }
2006+  } else {
2007+    return {};
2008+  }
2009+}
2010+
2011+void MindIR_Conv2DFusion_SetDilation(PrimitivePtr *primitive, const std::vector<int64_t> &dilation) {
2012+  if (primitive != nullptr && *primitive != nullptr) {
2013+    auto prim = static_cast<schema::Primitive *>(*primitive);
2014+    auto value = prim->value_as_Conv2DFusion();
2015+    if (prim != nullptr && value != nullptr) {
2016+      flatbuffers::FlatBufferBuilder fbb;
2017+      auto ops_offset = schema::CreateConv2DFusion(
2018+        fbb, mindspore::schema::Format_NCHW,
2019+        fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
2020+        fbb.CreateVector(value->stride()->data(), value->stride()->size()),
2021+        fbb.CreateVector(dilation.data(), dilation.size()), static_cast<schema::PadMode>(value->pad_mode()),
2022+        fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), value->in_channel(),
2023+        value->out_channel(), static_cast<schema::ActivationType>(value->activation_type()));
2024+      auto prim_offset =
2025+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_CONV2D_FUSION), ops_offset.o);
2026+      fbb.Finish(prim_offset);
2027+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
2028+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2029+      *primitive = ret_value;
2030+    }
2031+  }
2032+}
2033+PadMode MindIR_Conv2DFusion_GetPadMode(ConstPrimitivePtr primitive) {
2034+  if (primitive != nullptr) {
2035+    auto prim = static_cast<const schema::Primitive *>(primitive);
2036+    auto value = prim->value_as_Conv2DFusion();
2037+    if (prim != nullptr && value != nullptr) {
2038+      return static_cast<PadMode>(value->pad_mode());
2039+    } else {
2040+      PadMode en = static_cast<PadMode>(0);
2041+      return en;
2042+    }
2043+  } else {
2044+    PadMode en = static_cast<PadMode>(0);
2045+    return en;
2046+  }
2047+}
2048+
2049+void MindIR_Conv2DFusion_SetPadMode(PrimitivePtr *primitive, PadMode pad_mode) {
2050+  if (primitive != nullptr && *primitive != nullptr) {
2051+    auto prim = static_cast<schema::Primitive *>(*primitive);
2052+    auto value = prim->value_as_Conv2DFusion();
2053+    if (prim != nullptr && value != nullptr) {
2054+      flatbuffers::FlatBufferBuilder fbb;
2055+      auto ops_offset = schema::CreateConv2DFusion(
2056+        fbb, mindspore::schema::Format_NCHW,
2057+        fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
2058+        fbb.CreateVector(value->stride()->data(), value->stride()->size()),
2059+        fbb.CreateVector(value->dilation()->data(), value->dilation()->size()), static_cast<schema::PadMode>(pad_mode),
2060+        fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), value->in_channel(),
2061+        value->out_channel(), static_cast<schema::ActivationType>(value->activation_type()));
2062+      auto prim_offset =
2063+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_CONV2D_FUSION), ops_offset.o);
2064+      fbb.Finish(prim_offset);
2065+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
2066+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2067+      *primitive = ret_value;
2068+    }
2069+  }
2070+}
2071+std::vector<int64_t> MindIR_Conv2DFusion_GetPadList(ConstPrimitivePtr primitive) {
2072+  if (primitive != nullptr) {
2073+    auto prim = static_cast<const schema::Primitive *>(primitive);
2074+    auto value = prim->value_as_Conv2DFusion();
2075+    if (prim != nullptr && value != nullptr) {
2076+      std::vector<int64_t> result;
2077+      auto src = value->pad_list();
2078+      result.resize(src->size());
2079+      std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; });
2080+      return result;
2081+    } else {
2082+      return {};
2083+    }
2084+  } else {
2085+    return {};
2086+  }
2087+}
2088+
2089+void MindIR_Conv2DFusion_SetPadList(PrimitivePtr *primitive, const std::vector<int64_t> &pad_list) {
2090+  if (primitive != nullptr && *primitive != nullptr) {
2091+    auto prim = static_cast<schema::Primitive *>(*primitive);
2092+    auto value = prim->value_as_Conv2DFusion();
2093+    if (prim != nullptr && value != nullptr) {
2094+      flatbuffers::FlatBufferBuilder fbb;
2095+      auto ops_offset = schema::CreateConv2DFusion(
2096+        fbb, mindspore::schema::Format_NCHW,
2097+        fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
2098+        fbb.CreateVector(value->stride()->data(), value->stride()->size()),
2099+        fbb.CreateVector(value->dilation()->data(), value->dilation()->size()),
2100+        static_cast<schema::PadMode>(value->pad_mode()), fbb.CreateVector(pad_list.data(), pad_list.size()), 0,
2101+        value->group(), value->in_channel(), value->out_channel(),
2102+        static_cast<schema::ActivationType>(value->activation_type()));
2103+      auto prim_offset =
2104+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_CONV2D_FUSION), ops_offset.o);
2105+      fbb.Finish(prim_offset);
2106+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
2107+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2108+      *primitive = ret_value;
2109+    }
2110+  }
2111+}
2112+int64_t MindIR_Conv2DFusion_GetGroup(ConstPrimitivePtr primitive) {
2113+  if (primitive != nullptr) {
2114+    auto prim = static_cast<const schema::Primitive *>(primitive);
2115+    auto value = prim->value_as_Conv2DFusion();
2116+    if (prim != nullptr && value != nullptr) {
2117+      return value->group();
2118+    } else {
2119+      return 0;
2120+    }
2121+  } else {
2122+    return 0;
2123+  }
2124+}
2125+
2126+void MindIR_Conv2DFusion_SetGroup(PrimitivePtr *primitive, int64_t group) {
2127+  if (primitive != nullptr && *primitive != nullptr) {
2128+    auto prim = static_cast<schema::Primitive *>(*primitive);
2129+    auto value = prim->value_as_Conv2DFusion();
2130+    if (prim != nullptr && value != nullptr) {
2131+      flatbuffers::FlatBufferBuilder fbb;
2132+      auto ops_offset = schema::CreateConv2DFusion(
2133+        fbb, mindspore::schema::Format_NCHW,
2134+        fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
2135+        fbb.CreateVector(value->stride()->data(), value->stride()->size()),
2136+        fbb.CreateVector(value->dilation()->data(), value->dilation()->size()),
2137+        static_cast<schema::PadMode>(value->pad_mode()),
2138+        fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, group, value->in_channel(),
2139+        value->out_channel(), static_cast<schema::ActivationType>(value->activation_type()));
2140+      auto prim_offset =
2141+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_CONV2D_FUSION), ops_offset.o);
2142+      fbb.Finish(prim_offset);
2143+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
2144+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2145+      *primitive = ret_value;
2146+    }
2147+  }
2148+}
2149+int64_t MindIR_Conv2DFusion_GetInChannel(ConstPrimitivePtr primitive) {
2150+  if (primitive != nullptr) {
2151+    auto prim = static_cast<const schema::Primitive *>(primitive);
2152+    auto value = prim->value_as_Conv2DFusion();
2153+    if (prim != nullptr && value != nullptr) {
2154+      return value->in_channel();
2155+    } else {
2156+      return 0;
2157+    }
2158+  } else {
2159+    return 0;
2160+  }
2161+}
2162+
2163+void MindIR_Conv2DFusion_SetInChannel(PrimitivePtr *primitive, int64_t in_channel) {
2164+  if (primitive != nullptr && *primitive != nullptr) {
2165+    auto prim = static_cast<schema::Primitive *>(*primitive);
2166+    auto value = prim->value_as_Conv2DFusion();
2167+    if (prim != nullptr && value != nullptr) {
2168+      flatbuffers::FlatBufferBuilder fbb;
2169+      auto ops_offset = schema::CreateConv2DFusion(
2170+        fbb, mindspore::schema::Format_NCHW,
2171+        fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
2172+        fbb.CreateVector(value->stride()->data(), value->stride()->size()),
2173+        fbb.CreateVector(value->dilation()->data(), value->dilation()->size()),
2174+        static_cast<schema::PadMode>(value->pad_mode()),
2175+        fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), in_channel,
2176+        value->out_channel(), static_cast<schema::ActivationType>(value->activation_type()));
2177+      auto prim_offset =
2178+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_CONV2D_FUSION), ops_offset.o);
2179+      fbb.Finish(prim_offset);
2180+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
2181+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2182+      *primitive = ret_value;
2183+    }
2184+  }
2185+}
2186+int64_t MindIR_Conv2DFusion_GetOutChannel(ConstPrimitivePtr primitive) {
2187+  if (primitive != nullptr) {
2188+    auto prim = static_cast<const schema::Primitive *>(primitive);
2189+    auto value = prim->value_as_Conv2DFusion();
2190+    if (prim != nullptr && value != nullptr) {
2191+      return value->out_channel();
2192+    } else {
2193+      return 0;
2194+    }
2195+  } else {
2196+    return 0;
2197+  }
2198+}
2199+
2200+void MindIR_Conv2DFusion_SetOutChannel(PrimitivePtr *primitive, int64_t out_channel) {
2201+  if (primitive != nullptr && *primitive != nullptr) {
2202+    auto prim = static_cast<schema::Primitive *>(*primitive);
2203+    auto value = prim->value_as_Conv2DFusion();
2204+    if (prim != nullptr && value != nullptr) {
2205+      flatbuffers::FlatBufferBuilder fbb;
2206+      auto ops_offset = schema::CreateConv2DFusion(
2207+        fbb, mindspore::schema::Format_NCHW,
2208+        fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
2209+        fbb.CreateVector(value->stride()->data(), value->stride()->size()),
2210+        fbb.CreateVector(value->dilation()->data(), value->dilation()->size()),
2211+        static_cast<schema::PadMode>(value->pad_mode()),
2212+        fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), value->in_channel(),
2213+        out_channel, static_cast<schema::ActivationType>(value->activation_type()));
2214+      auto prim_offset =
2215+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_CONV2D_FUSION), ops_offset.o);
2216+      fbb.Finish(prim_offset);
2217+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
2218+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2219+      *primitive = ret_value;
2220+    }
2221+  }
2222+}
2223+ActivationType MindIR_Conv2DFusion_GetActivationType(ConstPrimitivePtr primitive) {
2224+  if (primitive != nullptr) {
2225+    auto prim = static_cast<const schema::Primitive *>(primitive);
2226+    auto value = prim->value_as_Conv2DFusion();
2227+    if (prim != nullptr && value != nullptr) {
2228+      return static_cast<ActivationType>(value->activation_type());
2229+    } else {
2230+      ActivationType en = static_cast<ActivationType>(0);
2231+      return en;
2232+    }
2233+  } else {
2234+    ActivationType en = static_cast<ActivationType>(0);
2235+    return en;
2236+  }
2237+}
2238+
2239+void MindIR_Conv2DFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type) {
2240+  if (primitive != nullptr && *primitive != nullptr) {
2241+    auto prim = static_cast<schema::Primitive *>(*primitive);
2242+    auto value = prim->value_as_Conv2DFusion();
2243+    if (prim != nullptr && value != nullptr) {
2244+      flatbuffers::FlatBufferBuilder fbb;
2245+      auto ops_offset = schema::CreateConv2DFusion(
2246+        fbb, mindspore::schema::Format_NCHW,
2247+        fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
2248+        fbb.CreateVector(value->stride()->data(), value->stride()->size()),
2249+        fbb.CreateVector(value->dilation()->data(), value->dilation()->size()),
2250+        static_cast<schema::PadMode>(value->pad_mode()),
2251+        fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), value->in_channel(),
2252+        value->out_channel(), static_cast<schema::ActivationType>(activation_type));
2253+      auto prim_offset =
2254+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_CONV2D_FUSION), ops_offset.o);
2255+      fbb.Finish(prim_offset);
2256+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
2257+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2258+      *primitive = ret_value;
2259+    }
2260+  }
2261+}
2262+
2263+// ********** Conv2dTransposeFusion **********
2264+PrimitivePtr MindIR_Conv2dTransposeFusion_CreatePrimitive(
2265+  const std::vector<int64_t> &kernel_size, const std::vector<int64_t> &stride, const std::vector<int64_t> &dilation,
2266+  PadMode pad_mode, const std::vector<int64_t> &pad_list, int64_t group, int64_t in_channel, int64_t out_channel,
2267+  ActivationType activation_type, const std::vector<int64_t> &output_paddings) {
2268+  flatbuffers::FlatBufferBuilder fbb;
2269+  auto ops_offset = schema::CreateConv2dTransposeFusion(
2270+    fbb, mindspore::schema::Format_NCHW, fbb.CreateVector(kernel_size.data(), kernel_size.size()),
2271+    fbb.CreateVector(stride.data(), stride.size()), fbb.CreateVector(dilation.data(), dilation.size()),
2272+    static_cast<schema::PadMode>(pad_mode), 0, fbb.CreateVector(pad_list.data(), pad_list.size()), 0, group, in_channel,
2273+    out_channel, static_cast<schema::ActivationType>(activation_type),
2274+    fbb.CreateVector(output_paddings.data(), output_paddings.size()));
2275+  auto prim_offset =
2276+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_CONV2D_TRANSPOSE_FUSION), ops_offset.o);
2277+  fbb.Finish(prim_offset);
2278+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
2279+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2280+  return ret_value;
2281+}
2282+std::vector<int64_t> MindIR_Conv2dTransposeFusion_GetKernelSize(ConstPrimitivePtr primitive) {
2283+  if (primitive != nullptr) {
2284+    auto prim = static_cast<const schema::Primitive *>(primitive);
2285+    auto value = prim->value_as_Conv2dTransposeFusion();
2286+    if (prim != nullptr && value != nullptr) {
2287+      std::vector<int64_t> result;
2288+      auto src = value->kernel_size();
2289+      result.resize(src->size());
2290+      std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; });
2291+      return result;
2292+    } else {
2293+      return {};
2294+    }
2295+  } else {
2296+    return {};
2297+  }
2298+}
2299+
2300+void MindIR_Conv2dTransposeFusion_SetKernelSize(PrimitivePtr *primitive, const std::vector<int64_t> &kernel_size) {
2301+  if (primitive != nullptr && *primitive != nullptr) {
2302+    auto prim = static_cast<schema::Primitive *>(*primitive);
2303+    auto value = prim->value_as_Conv2dTransposeFusion();
2304+    if (prim != nullptr && value != nullptr) {
2305+      flatbuffers::FlatBufferBuilder fbb;
2306+      auto ops_offset = schema::CreateConv2dTransposeFusion(
2307+        fbb, mindspore::schema::Format_NCHW, fbb.CreateVector(kernel_size.data(), kernel_size.size()),
2308+        fbb.CreateVector(value->stride()->data(), value->stride()->size()),
2309+        fbb.CreateVector(value->dilation()->data(), value->dilation()->size()),
2310+        static_cast<schema::PadMode>(value->pad_mode()), 0,
2311+        fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), value->in_channel(),
2312+        value->out_channel(), static_cast<schema::ActivationType>(value->activation_type()),
2313+        fbb.CreateVector(value->output_paddings()->data(), value->output_paddings()->size()));
2314+      auto prim_offset = schema::CreatePrimitive(
2315+        fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_CONV2D_TRANSPOSE_FUSION), ops_offset.o);
2316+      fbb.Finish(prim_offset);
2317+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
2318+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2319+      *primitive = ret_value;
2320+    }
2321+  }
2322+}
2323+std::vector<int64_t> MindIR_Conv2dTransposeFusion_GetStride(ConstPrimitivePtr primitive) {
2324+  if (primitive != nullptr) {
2325+    auto prim = static_cast<const schema::Primitive *>(primitive);
2326+    auto value = prim->value_as_Conv2dTransposeFusion();
2327+    if (prim != nullptr && value != nullptr) {
2328+      std::vector<int64_t> result;
2329+      auto src = value->stride();
2330+      result.resize(src->size());
2331+      std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; });
2332+      return result;
2333+    } else {
2334+      return {};
2335+    }
2336+  } else {
2337+    return {};
2338+  }
2339+}
2340+
2341+void MindIR_Conv2dTransposeFusion_SetStride(PrimitivePtr *primitive, const std::vector<int64_t> &stride) {
2342+  if (primitive != nullptr && *primitive != nullptr) {
2343+    auto prim = static_cast<schema::Primitive *>(*primitive);
2344+    auto value = prim->value_as_Conv2dTransposeFusion();
2345+    if (prim != nullptr && value != nullptr) {
2346+      flatbuffers::FlatBufferBuilder fbb;
2347+      auto ops_offset = schema::CreateConv2dTransposeFusion(
2348+        fbb, mindspore::schema::Format_NCHW,
2349+        fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
2350+        fbb.CreateVector(stride.data(), stride.size()),
2351+        fbb.CreateVector(value->dilation()->data(), value->dilation()->size()),
2352+        static_cast<schema::PadMode>(value->pad_mode()), 0,
2353+        fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), value->in_channel(),
2354+        value->out_channel(), static_cast<schema::ActivationType>(value->activation_type()),
2355+        fbb.CreateVector(value->output_paddings()->data(), value->output_paddings()->size()));
2356+      auto prim_offset = schema::CreatePrimitive(
2357+        fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_CONV2D_TRANSPOSE_FUSION), ops_offset.o);
2358+      fbb.Finish(prim_offset);
2359+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
2360+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2361+      *primitive = ret_value;
2362+    }
2363+  }
2364+}
2365+std::vector<int64_t> MindIR_Conv2dTransposeFusion_GetDilation(ConstPrimitivePtr primitive) {
2366+  if (primitive != nullptr) {
2367+    auto prim = static_cast<const schema::Primitive *>(primitive);
2368+    auto value = prim->value_as_Conv2dTransposeFusion();
2369+    if (prim != nullptr && value != nullptr) {
2370+      std::vector<int64_t> result;
2371+      auto src = value->dilation();
2372+      result.resize(src->size());
2373+      std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; });
2374+      return result;
2375+    } else {
2376+      return {};
2377+    }
2378+  } else {
2379+    return {};
2380+  }
2381+}
2382+
2383+void MindIR_Conv2dTransposeFusion_SetDilation(PrimitivePtr *primitive, const std::vector<int64_t> &dilation) {
2384+  if (primitive != nullptr && *primitive != nullptr) {
2385+    auto prim = static_cast<schema::Primitive *>(*primitive);
2386+    auto value = prim->value_as_Conv2dTransposeFusion();
2387+    if (prim != nullptr && value != nullptr) {
2388+      flatbuffers::FlatBufferBuilder fbb;
2389+      auto ops_offset = schema::CreateConv2dTransposeFusion(
2390+        fbb, mindspore::schema::Format_NCHW,
2391+        fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
2392+        fbb.CreateVector(value->stride()->data(), value->stride()->size()),
2393+        fbb.CreateVector(dilation.data(), dilation.size()), static_cast<schema::PadMode>(value->pad_mode()), 0,
2394+        fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), value->in_channel(),
2395+        value->out_channel(), static_cast<schema::ActivationType>(value->activation_type()),
2396+        fbb.CreateVector(value->output_paddings()->data(), value->output_paddings()->size()));
2397+      auto prim_offset = schema::CreatePrimitive(
2398+        fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_CONV2D_TRANSPOSE_FUSION), ops_offset.o);
2399+      fbb.Finish(prim_offset);
2400+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
2401+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2402+      *primitive = ret_value;
2403+    }
2404+  }
2405+}
2406+PadMode MindIR_Conv2dTransposeFusion_GetPadMode(ConstPrimitivePtr primitive) {
2407+  if (primitive != nullptr) {
2408+    auto prim = static_cast<const schema::Primitive *>(primitive);
2409+    auto value = prim->value_as_Conv2dTransposeFusion();
2410+    if (prim != nullptr && value != nullptr) {
2411+      return static_cast<PadMode>(value->pad_mode());
2412+    } else {
2413+      PadMode en = static_cast<PadMode>(0);
2414+      return en;
2415+    }
2416+  } else {
2417+    PadMode en = static_cast<PadMode>(0);
2418+    return en;
2419+  }
2420+}
2421+
2422+void MindIR_Conv2dTransposeFusion_SetPadMode(PrimitivePtr *primitive, PadMode pad_mode) {
2423+  if (primitive != nullptr && *primitive != nullptr) {
2424+    auto prim = static_cast<schema::Primitive *>(*primitive);
2425+    auto value = prim->value_as_Conv2dTransposeFusion();
2426+    if (prim != nullptr && value != nullptr) {
2427+      flatbuffers::FlatBufferBuilder fbb;
2428+      auto ops_offset = schema::CreateConv2dTransposeFusion(
2429+        fbb, mindspore::schema::Format_NCHW,
2430+        fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
2431+        fbb.CreateVector(value->stride()->data(), value->stride()->size()),
2432+        fbb.CreateVector(value->dilation()->data(), value->dilation()->size()), static_cast<schema::PadMode>(pad_mode),
2433+        0, fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(),
2434+        value->in_channel(), value->out_channel(), static_cast<schema::ActivationType>(value->activation_type()),
2435+        fbb.CreateVector(value->output_paddings()->data(), value->output_paddings()->size()));
2436+      auto prim_offset = schema::CreatePrimitive(
2437+        fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_CONV2D_TRANSPOSE_FUSION), ops_offset.o);
2438+      fbb.Finish(prim_offset);
2439+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
2440+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2441+      *primitive = ret_value;
2442+    }
2443+  }
2444+}
2445+std::vector<int64_t> MindIR_Conv2dTransposeFusion_GetPadList(ConstPrimitivePtr primitive) {
2446+  if (primitive != nullptr) {
2447+    auto prim = static_cast<const schema::Primitive *>(primitive);
2448+    auto value = prim->value_as_Conv2dTransposeFusion();
2449+    if (prim != nullptr && value != nullptr) {
2450+      std::vector<int64_t> result;
2451+      auto src = value->pad_list();
2452+      result.resize(src->size());
2453+      std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; });
2454+      return result;
2455+    } else {
2456+      return {};
2457+    }
2458+  } else {
2459+    return {};
2460+  }
2461+}
2462+
2463+void MindIR_Conv2dTransposeFusion_SetPadList(PrimitivePtr *primitive, const std::vector<int64_t> &pad_list) {
2464+  if (primitive != nullptr && *primitive != nullptr) {
2465+    auto prim = static_cast<schema::Primitive *>(*primitive);
2466+    auto value = prim->value_as_Conv2dTransposeFusion();
2467+    if (prim != nullptr && value != nullptr) {
2468+      flatbuffers::FlatBufferBuilder fbb;
2469+      auto ops_offset = schema::CreateConv2dTransposeFusion(
2470+        fbb, mindspore::schema::Format_NCHW,
2471+        fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
2472+        fbb.CreateVector(value->stride()->data(), value->stride()->size()),
2473+        fbb.CreateVector(value->dilation()->data(), value->dilation()->size()),
2474+        static_cast<schema::PadMode>(value->pad_mode()), 0, fbb.CreateVector(pad_list.data(), pad_list.size()), 0,
2475+        value->group(), value->in_channel(), value->out_channel(),
2476+        static_cast<schema::ActivationType>(value->activation_type()),
2477+        fbb.CreateVector(value->output_paddings()->data(), value->output_paddings()->size()));
2478+      auto prim_offset = schema::CreatePrimitive(
2479+        fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_CONV2D_TRANSPOSE_FUSION), ops_offset.o);
2480+      fbb.Finish(prim_offset);
2481+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
2482+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2483+      *primitive = ret_value;
2484+    }
2485+  }
2486+}
2487+int64_t MindIR_Conv2dTransposeFusion_GetGroup(ConstPrimitivePtr primitive) {
2488+  if (primitive != nullptr) {
2489+    auto prim = static_cast<const schema::Primitive *>(primitive);
2490+    auto value = prim->value_as_Conv2dTransposeFusion();
2491+    if (prim != nullptr && value != nullptr) {
2492+      return value->group();
2493+    } else {
2494+      return 0;
2495+    }
2496+  } else {
2497+    return 0;
2498+  }
2499+}
2500+
2501+void MindIR_Conv2dTransposeFusion_SetGroup(PrimitivePtr *primitive, int64_t group) {
2502+  if (primitive != nullptr && *primitive != nullptr) {
2503+    auto prim = static_cast<schema::Primitive *>(*primitive);
2504+    auto value = prim->value_as_Conv2dTransposeFusion();
2505+    if (prim != nullptr && value != nullptr) {
2506+      flatbuffers::FlatBufferBuilder fbb;
2507+      auto ops_offset = schema::CreateConv2dTransposeFusion(
2508+        fbb, mindspore::schema::Format_NCHW,
2509+        fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
2510+        fbb.CreateVector(value->stride()->data(), value->stride()->size()),
2511+        fbb.CreateVector(value->dilation()->data(), value->dilation()->size()),
2512+        static_cast<schema::PadMode>(value->pad_mode()), 0,
2513+        fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, group, value->in_channel(),
2514+        value->out_channel(), static_cast<schema::ActivationType>(value->activation_type()),
2515+        fbb.CreateVector(value->output_paddings()->data(), value->output_paddings()->size()));
2516+      auto prim_offset = schema::CreatePrimitive(
2517+        fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_CONV2D_TRANSPOSE_FUSION), ops_offset.o);
2518+      fbb.Finish(prim_offset);
2519+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
2520+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2521+      *primitive = ret_value;
2522+    }
2523+  }
2524+}
2525+int64_t MindIR_Conv2dTransposeFusion_GetInChannel(ConstPrimitivePtr primitive) {
2526+  if (primitive != nullptr) {
2527+    auto prim = static_cast<const schema::Primitive *>(primitive);
2528+    auto value = prim->value_as_Conv2dTransposeFusion();
2529+    if (prim != nullptr && value != nullptr) {
2530+      return value->in_channel();
2531+    } else {
2532+      return 0;
2533+    }
2534+  } else {
2535+    return 0;
2536+  }
2537+}
2538+
2539+void MindIR_Conv2dTransposeFusion_SetInChannel(PrimitivePtr *primitive, int64_t in_channel) {
2540+  if (primitive != nullptr && *primitive != nullptr) {
2541+    auto prim = static_cast<schema::Primitive *>(*primitive);
2542+    auto value = prim->value_as_Conv2dTransposeFusion();
2543+    if (prim != nullptr && value != nullptr) {
2544+      flatbuffers::FlatBufferBuilder fbb;
2545+      auto ops_offset = schema::CreateConv2dTransposeFusion(
2546+        fbb, mindspore::schema::Format_NCHW,
2547+        fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
2548+        fbb.CreateVector(value->stride()->data(), value->stride()->size()),
2549+        fbb.CreateVector(value->dilation()->data(), value->dilation()->size()),
2550+        static_cast<schema::PadMode>(value->pad_mode()), 0,
2551+        fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), in_channel,
2552+        value->out_channel(), static_cast<schema::ActivationType>(value->activation_type()),
2553+        fbb.CreateVector(value->output_paddings()->data(), value->output_paddings()->size()));
2554+      auto prim_offset = schema::CreatePrimitive(
2555+        fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_CONV2D_TRANSPOSE_FUSION), ops_offset.o);
2556+      fbb.Finish(prim_offset);
2557+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
2558+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2559+      *primitive = ret_value;
2560+    }
2561+  }
2562+}
2563+int64_t MindIR_Conv2dTransposeFusion_GetOutChannel(ConstPrimitivePtr primitive) {
2564+  if (primitive != nullptr) {
2565+    auto prim = static_cast<const schema::Primitive *>(primitive);
2566+    auto value = prim->value_as_Conv2dTransposeFusion();
2567+    if (prim != nullptr && value != nullptr) {
2568+      return value->out_channel();
2569+    } else {
2570+      return 0;
2571+    }
2572+  } else {
2573+    return 0;
2574+  }
2575+}
2576+
2577+void MindIR_Conv2dTransposeFusion_SetOutChannel(PrimitivePtr *primitive, int64_t out_channel) {
2578+  if (primitive != nullptr && *primitive != nullptr) {
2579+    auto prim = static_cast<schema::Primitive *>(*primitive);
2580+    auto value = prim->value_as_Conv2dTransposeFusion();
2581+    if (prim != nullptr && value != nullptr) {
2582+      flatbuffers::FlatBufferBuilder fbb;
2583+      auto ops_offset = schema::CreateConv2dTransposeFusion(
2584+        fbb, mindspore::schema::Format_NCHW,
2585+        fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
2586+        fbb.CreateVector(value->stride()->data(), value->stride()->size()),
2587+        fbb.CreateVector(value->dilation()->data(), value->dilation()->size()),
2588+        static_cast<schema::PadMode>(value->pad_mode()), 0,
2589+        fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), value->in_channel(),
2590+        out_channel, static_cast<schema::ActivationType>(value->activation_type()),
2591+        fbb.CreateVector(value->output_paddings()->data(), value->output_paddings()->size()));
2592+      auto prim_offset = schema::CreatePrimitive(
2593+        fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_CONV2D_TRANSPOSE_FUSION), ops_offset.o);
2594+      fbb.Finish(prim_offset);
2595+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
2596+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2597+      *primitive = ret_value;
2598+    }
2599+  }
2600+}
2601+ActivationType MindIR_Conv2dTransposeFusion_GetActivationType(ConstPrimitivePtr primitive) {
2602+  if (primitive != nullptr) {
2603+    auto prim = static_cast<const schema::Primitive *>(primitive);
2604+    auto value = prim->value_as_Conv2dTransposeFusion();
2605+    if (prim != nullptr && value != nullptr) {
2606+      return static_cast<ActivationType>(value->activation_type());
2607+    } else {
2608+      ActivationType en = static_cast<ActivationType>(0);
2609+      return en;
2610+    }
2611+  } else {
2612+    ActivationType en = static_cast<ActivationType>(0);
2613+    return en;
2614+  }
2615+}
2616+
2617+void MindIR_Conv2dTransposeFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type) {
2618+  if (primitive != nullptr && *primitive != nullptr) {
2619+    auto prim = static_cast<schema::Primitive *>(*primitive);
2620+    auto value = prim->value_as_Conv2dTransposeFusion();
2621+    if (prim != nullptr && value != nullptr) {
2622+      flatbuffers::FlatBufferBuilder fbb;
2623+      auto ops_offset = schema::CreateConv2dTransposeFusion(
2624+        fbb, mindspore::schema::Format_NCHW,
2625+        fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
2626+        fbb.CreateVector(value->stride()->data(), value->stride()->size()),
2627+        fbb.CreateVector(value->dilation()->data(), value->dilation()->size()),
2628+        static_cast<schema::PadMode>(value->pad_mode()), 0,
2629+        fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), value->in_channel(),
2630+        value->out_channel(), static_cast<schema::ActivationType>(activation_type),
2631+        fbb.CreateVector(value->output_paddings()->data(), value->output_paddings()->size()));
2632+      auto prim_offset = schema::CreatePrimitive(
2633+        fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_CONV2D_TRANSPOSE_FUSION), ops_offset.o);
2634+      fbb.Finish(prim_offset);
2635+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
2636+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2637+      *primitive = ret_value;
2638+    }
2639+  }
2640+}
2641+std::vector<int64_t> MindIR_Conv2dTransposeFusion_GetOutputPaddings(ConstPrimitivePtr primitive) {
2642+  if (primitive != nullptr) {
2643+    auto prim = static_cast<const schema::Primitive *>(primitive);
2644+    auto value = prim->value_as_Conv2dTransposeFusion();
2645+    if (prim != nullptr && value != nullptr) {
2646+      std::vector<int64_t> result;
2647+      auto src = value->output_paddings();
2648+      result.resize(src->size());
2649+      std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; });
2650+      return result;
2651+    } else {
2652+      return {};
2653+    }
2654+  } else {
2655+    return {};
2656+  }
2657+}
2658+
2659+void MindIR_Conv2dTransposeFusion_SetOutputPaddings(PrimitivePtr *primitive,
2660+                                                    const std::vector<int64_t> &output_paddings) {
2661+  if (primitive != nullptr && *primitive != nullptr) {
2662+    auto prim = static_cast<schema::Primitive *>(*primitive);
2663+    auto value = prim->value_as_Conv2dTransposeFusion();
2664+    if (prim != nullptr && value != nullptr) {
2665+      flatbuffers::FlatBufferBuilder fbb;
2666+      auto ops_offset = schema::CreateConv2dTransposeFusion(
2667+        fbb, mindspore::schema::Format_NCHW,
2668+        fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
2669+        fbb.CreateVector(value->stride()->data(), value->stride()->size()),
2670+        fbb.CreateVector(value->dilation()->data(), value->dilation()->size()),
2671+        static_cast<schema::PadMode>(value->pad_mode()), 0,
2672+        fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), value->in_channel(),
2673+        value->out_channel(), static_cast<schema::ActivationType>(value->activation_type()),
2674+        fbb.CreateVector(output_paddings.data(), output_paddings.size()));
2675+      auto prim_offset = schema::CreatePrimitive(
2676+        fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_CONV2D_TRANSPOSE_FUSION), ops_offset.o);
2677+      fbb.Finish(prim_offset);
2678+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
2679+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2680+      *primitive = ret_value;
2681+    }
2682+  }
2683+}
2684+
2685+// ********** DivFusion **********
2686+PrimitivePtr MindIR_DivFusion_CreatePrimitive(ActivationType activation_type) {
2687+  flatbuffers::FlatBufferBuilder fbb;
2688+  auto ops_offset = schema::CreateDivFusion(fbb, static_cast<schema::ActivationType>(activation_type));
2689+  auto prim_offset =
2690+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_DIV_FUSION), ops_offset.o);
2691+  fbb.Finish(prim_offset);
2692+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
2693+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2694+  return ret_value;
2695+}
2696+ActivationType MindIR_DivFusion_GetActivationType(ConstPrimitivePtr primitive) {
2697+  if (primitive != nullptr) {
2698+    auto prim = static_cast<const schema::Primitive *>(primitive);
2699+    auto value = prim->value_as_DivFusion();
2700+    if (prim != nullptr && value != nullptr) {
2701+      return static_cast<ActivationType>(value->activation_type());
2702+    } else {
2703+      ActivationType en = static_cast<ActivationType>(0);
2704+      return en;
2705+    }
2706+  } else {
2707+    ActivationType en = static_cast<ActivationType>(0);
2708+    return en;
2709+  }
2710+}
2711+
2712+void MindIR_DivFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type) {
2713+  if (primitive != nullptr && *primitive != nullptr) {
2714+    auto prim = static_cast<schema::Primitive *>(*primitive);
2715+    auto value = prim->value_as_DivFusion();
2716+    if (prim != nullptr && value != nullptr) {
2717+      flatbuffers::FlatBufferBuilder fbb;
2718+      auto ops_offset = schema::CreateDivFusion(fbb, static_cast<schema::ActivationType>(activation_type));
2719+      auto prim_offset =
2720+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_DIV_FUSION), ops_offset.o);
2721+      fbb.Finish(prim_offset);
2722+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
2723+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2724+      *primitive = ret_value;
2725+    }
2726+  }
2727+}
2728+
2729+// ********** Eltwise **********
2730+PrimitivePtr MindIR_Eltwise_CreatePrimitive(EltwiseMode mode) {
2731+  flatbuffers::FlatBufferBuilder fbb;
2732+  auto ops_offset = schema::CreateEltwise(fbb, static_cast<schema::EltwiseMode>(mode));
2733+  auto prim_offset = schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_ELTWISE), ops_offset.o);
2734+  fbb.Finish(prim_offset);
2735+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
2736+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2737+  return ret_value;
2738+}
2739+EltwiseMode MindIR_Eltwise_GetMode(ConstPrimitivePtr primitive) {
2740+  if (primitive != nullptr) {
2741+    auto prim = static_cast<const schema::Primitive *>(primitive);
2742+    auto value = prim->value_as_Eltwise();
2743+    if (prim != nullptr && value != nullptr) {
2744+      return static_cast<EltwiseMode>(value->mode());
2745+    } else {
2746+      EltwiseMode en = static_cast<EltwiseMode>(0);
2747+      return en;
2748+    }
2749+  } else {
2750+    EltwiseMode en = static_cast<EltwiseMode>(0);
2751+    return en;
2752+  }
2753+}
2754+
2755+void MindIR_Eltwise_SetMode(PrimitivePtr *primitive, EltwiseMode mode) {
2756+  if (primitive != nullptr && *primitive != nullptr) {
2757+    auto prim = static_cast<schema::Primitive *>(*primitive);
2758+    auto value = prim->value_as_Eltwise();
2759+    if (prim != nullptr && value != nullptr) {
2760+      flatbuffers::FlatBufferBuilder fbb;
2761+      auto ops_offset = schema::CreateEltwise(fbb, static_cast<schema::EltwiseMode>(mode));
2762+      auto prim_offset =
2763+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_ELTWISE), ops_offset.o);
2764+      fbb.Finish(prim_offset);
2765+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
2766+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2767+      *primitive = ret_value;
2768+    }
2769+  }
2770+}
2771+
2772+// ********** ExpandDims **********
2773+PrimitivePtr MindIR_ExpandDims_CreatePrimitive() {
2774+  flatbuffers::FlatBufferBuilder fbb;
2775+  auto ops_offset = schema::CreateExpandDims(fbb);
2776+  auto prim_offset =
2777+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_EXPAND_DIMS), ops_offset.o);
2778+  fbb.Finish(prim_offset);
2779+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
2780+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2781+  return ret_value;
2782+}
2783+
2784+// ********** Fill **********
2785+PrimitivePtr MindIR_Fill_CreatePrimitive() {
2786+  flatbuffers::FlatBufferBuilder fbb;
2787+  auto ops_offset = schema::CreateFill(fbb);
2788+  auto prim_offset = schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_FILL), ops_offset.o);
2789+  fbb.Finish(prim_offset);
2790+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
2791+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2792+  return ret_value;
2793+}
2794+
2795+// ********** FullConnection **********
2796+PrimitivePtr MindIR_FullConnection_CreatePrimitive(bool has_bias, bool use_axis, int64_t axis,
2797+                                                   ActivationType activation_type) {
2798+  flatbuffers::FlatBufferBuilder fbb;
2799+  auto ops_offset =
2800+    schema::CreateFullConnection(fbb, has_bias, use_axis, axis, static_cast<schema::ActivationType>(activation_type));
2801+  auto prim_offset =
2802+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_FULL_CONNECTION), ops_offset.o);
2803+  fbb.Finish(prim_offset);
2804+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
2805+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2806+  return ret_value;
2807+}
2808+bool MindIR_FullConnection_GetHasBias(ConstPrimitivePtr primitive) {
2809+  if (primitive != nullptr) {
2810+    auto prim = static_cast<const schema::Primitive *>(primitive);
2811+    auto value = prim->value_as_FullConnection();
2812+    if (prim != nullptr && value != nullptr) {
2813+      return value->has_bias();
2814+    } else {
2815+      return false;
2816+    }
2817+  } else {
2818+    return false;
2819+  }
2820+}
2821+
2822+void MindIR_FullConnection_SetHasBias(PrimitivePtr *primitive, bool has_bias) {
2823+  if (primitive != nullptr && *primitive != nullptr) {
2824+    auto prim = static_cast<schema::Primitive *>(*primitive);
2825+    auto value = prim->value_as_FullConnection();
2826+    if (prim != nullptr && value != nullptr) {
2827+      flatbuffers::FlatBufferBuilder fbb;
2828+      auto ops_offset = schema::CreateFullConnection(fbb, has_bias, value->use_axis(), value->axis(),
2829+                                                     static_cast<schema::ActivationType>(value->activation_type()));
2830+      auto prim_offset =
2831+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_FULL_CONNECTION), ops_offset.o);
2832+      fbb.Finish(prim_offset);
2833+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
2834+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2835+      *primitive = ret_value;
2836+    }
2837+  }
2838+}
2839+bool MindIR_FullConnection_GetUseAxis(ConstPrimitivePtr primitive) {
2840+  if (primitive != nullptr) {
2841+    auto prim = static_cast<const schema::Primitive *>(primitive);
2842+    auto value = prim->value_as_FullConnection();
2843+    if (prim != nullptr && value != nullptr) {
2844+      return value->use_axis();
2845+    } else {
2846+      return false;
2847+    }
2848+  } else {
2849+    return false;
2850+  }
2851+}
2852+
2853+void MindIR_FullConnection_SetUseAxis(PrimitivePtr *primitive, bool use_axis) {
2854+  if (primitive != nullptr && *primitive != nullptr) {
2855+    auto prim = static_cast<schema::Primitive *>(*primitive);
2856+    auto value = prim->value_as_FullConnection();
2857+    if (prim != nullptr && value != nullptr) {
2858+      flatbuffers::FlatBufferBuilder fbb;
2859+      auto ops_offset = schema::CreateFullConnection(fbb, value->has_bias(), use_axis, value->axis(),
2860+                                                     static_cast<schema::ActivationType>(value->activation_type()));
2861+      auto prim_offset =
2862+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_FULL_CONNECTION), ops_offset.o);
2863+      fbb.Finish(prim_offset);
2864+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
2865+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2866+      *primitive = ret_value;
2867+    }
2868+  }
2869+}
2870+int64_t MindIR_FullConnection_GetAxis(ConstPrimitivePtr primitive) {
2871+  if (primitive != nullptr) {
2872+    auto prim = static_cast<const schema::Primitive *>(primitive);
2873+    auto value = prim->value_as_FullConnection();
2874+    if (prim != nullptr && value != nullptr) {
2875+      return value->axis();
2876+    } else {
2877+      return 0;
2878+    }
2879+  } else {
2880+    return 0;
2881+  }
2882+}
2883+
2884+void MindIR_FullConnection_SetAxis(PrimitivePtr *primitive, int64_t axis) {
2885+  if (primitive != nullptr && *primitive != nullptr) {
2886+    auto prim = static_cast<schema::Primitive *>(*primitive);
2887+    auto value = prim->value_as_FullConnection();
2888+    if (prim != nullptr && value != nullptr) {
2889+      flatbuffers::FlatBufferBuilder fbb;
2890+      auto ops_offset = schema::CreateFullConnection(fbb, value->has_bias(), value->use_axis(), axis,
2891+                                                     static_cast<schema::ActivationType>(value->activation_type()));
2892+      auto prim_offset =
2893+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_FULL_CONNECTION), ops_offset.o);
2894+      fbb.Finish(prim_offset);
2895+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
2896+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2897+      *primitive = ret_value;
2898+    }
2899+  }
2900+}
2901+ActivationType MindIR_FullConnection_GetActivationType(ConstPrimitivePtr primitive) {
2902+  if (primitive != nullptr) {
2903+    auto prim = static_cast<const schema::Primitive *>(primitive);
2904+    auto value = prim->value_as_FullConnection();
2905+    if (prim != nullptr && value != nullptr) {
2906+      return static_cast<ActivationType>(value->activation_type());
2907+    } else {
2908+      ActivationType en = static_cast<ActivationType>(0);
2909+      return en;
2910+    }
2911+  } else {
2912+    ActivationType en = static_cast<ActivationType>(0);
2913+    return en;
2914+  }
2915+}
2916+
2917+void MindIR_FullConnection_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type) {
2918+  if (primitive != nullptr && *primitive != nullptr) {
2919+    auto prim = static_cast<schema::Primitive *>(*primitive);
2920+    auto value = prim->value_as_FullConnection();
2921+    if (prim != nullptr && value != nullptr) {
2922+      flatbuffers::FlatBufferBuilder fbb;
2923+      auto ops_offset = schema::CreateFullConnection(fbb, value->has_bias(), value->use_axis(), value->axis(),
2924+                                                     static_cast<schema::ActivationType>(activation_type));
2925+      auto prim_offset =
2926+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_FULL_CONNECTION), ops_offset.o);
2927+      fbb.Finish(prim_offset);
2928+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
2929+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2930+      *primitive = ret_value;
2931+    }
2932+  }
2933+}
2934+
2935+// ********** FusedBatchNorm **********
2936+PrimitivePtr MindIR_FusedBatchNorm_CreatePrimitive(float epsilon) {
2937+  flatbuffers::FlatBufferBuilder fbb;
2938+  auto ops_offset = schema::CreateFusedBatchNorm(fbb, 0.9, 0);
2939+  auto prim_offset =
2940+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_FUSED_BATCH_NORM), ops_offset.o);
2941+  fbb.Finish(prim_offset);
2942+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
2943+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2944+  return ret_value;
2945+}
2946+float MindIR_FusedBatchNorm_GetEpsilon(ConstPrimitivePtr primitive) {
2947+  if (primitive != nullptr) {
2948+    auto prim = static_cast<const schema::Primitive *>(primitive);
2949+    auto value = prim->value_as_FusedBatchNorm();
2950+    if (prim != nullptr && value != nullptr) {
2951+      return value->epsilon();
2952+    } else {
2953+      return .0;
2954+    }
2955+  } else {
2956+    return .0;
2957+  }
2958+}
2959+
2960+void MindIR_FusedBatchNorm_SetEpsilon(PrimitivePtr *primitive, float epsilon) {
2961+  if (primitive != nullptr && *primitive != nullptr) {
2962+    auto prim = static_cast<schema::Primitive *>(*primitive);
2963+    auto value = prim->value_as_FusedBatchNorm();
2964+    if (prim != nullptr && value != nullptr) {
2965+      flatbuffers::FlatBufferBuilder fbb;
2966+      auto ops_offset = schema::CreateFusedBatchNorm(fbb, epsilon, 0.9, 0);
2967+      auto prim_offset =
2968+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_FUSED_BATCH_NORM), ops_offset.o);
2969+      fbb.Finish(prim_offset);
2970+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
2971+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2972+      *primitive = ret_value;
2973+    }
2974+  }
2975+}
2976+
2977+// ********** Gather **********
2978+PrimitivePtr MindIR_Gather_CreatePrimitive() {
2979+  flatbuffers::FlatBufferBuilder fbb;
2980+  auto ops_offset = schema::CreateGather(fbb);
2981+  auto prim_offset = schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_GATHER), ops_offset.o);
2982+  fbb.Finish(prim_offset);
2983+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
2984+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2985+  return ret_value;
2986+}
2987+
2988+// ********** LayerNormFusion **********
2989+PrimitivePtr MindIR_LayerNormFusion_CreatePrimitive(int64_t begin_norm_axis, float epsilon, bool elementwise_affine,
2990+                                                    int64_t begin_params_axis) {
2991+  flatbuffers::FlatBufferBuilder fbb;
2992+  auto ops_offset = schema::CreateLayerNormFusion(fbb, begin_norm_axis, epsilon, elementwise_affine, begin_params_axis);
2993+  auto prim_offset =
2994+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_LAYER_NORM_FUSION), ops_offset.o);
2995+  fbb.Finish(prim_offset);
2996+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
2997+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
2998+  return ret_value;
2999+}
3000+int64_t MindIR_LayerNormFusion_GetBeginNormAxis(ConstPrimitivePtr primitive) {
3001+  if (primitive != nullptr) {
3002+    auto prim = static_cast<const schema::Primitive *>(primitive);
3003+    auto value = prim->value_as_LayerNormFusion();
3004+    if (prim != nullptr && value != nullptr) {
3005+      return value->begin_norm_axis();
3006+    } else {
3007+      return 0;
3008+    }
3009+  } else {
3010+    return 0;
3011+  }
3012+}
3013+
3014+void MindIR_LayerNormFusion_SetBeginNormAxis(PrimitivePtr *primitive, int64_t begin_norm_axis) {
3015+  if (primitive != nullptr && *primitive != nullptr) {
3016+    auto prim = static_cast<schema::Primitive *>(*primitive);
3017+    auto value = prim->value_as_LayerNormFusion();
3018+    if (prim != nullptr && value != nullptr) {
3019+      flatbuffers::FlatBufferBuilder fbb;
3020+      auto ops_offset = schema::CreateLayerNormFusion(fbb, begin_norm_axis, value->epsilon(),
3021+                                                      value->elementwise_affine(), value->begin_params_axis());
3022+      auto prim_offset =
3023+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_LAYER_NORM_FUSION), ops_offset.o);
3024+      fbb.Finish(prim_offset);
3025+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3026+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3027+      *primitive = ret_value;
3028+    }
3029+  }
3030+}
3031+float MindIR_LayerNormFusion_GetEpsilon(ConstPrimitivePtr primitive) {
3032+  if (primitive != nullptr) {
3033+    auto prim = static_cast<const schema::Primitive *>(primitive);
3034+    auto value = prim->value_as_LayerNormFusion();
3035+    if (prim != nullptr && value != nullptr) {
3036+      return value->epsilon();
3037+    } else {
3038+      return .0;
3039+    }
3040+  } else {
3041+    return .0;
3042+  }
3043+}
3044+
3045+void MindIR_LayerNormFusion_SetEpsilon(PrimitivePtr *primitive, float epsilon) {
3046+  if (primitive != nullptr && *primitive != nullptr) {
3047+    auto prim = static_cast<schema::Primitive *>(*primitive);
3048+    auto value = prim->value_as_LayerNormFusion();
3049+    if (prim != nullptr && value != nullptr) {
3050+      flatbuffers::FlatBufferBuilder fbb;
3051+      auto ops_offset = schema::CreateLayerNormFusion(fbb, value->begin_norm_axis(), epsilon,
3052+                                                      value->elementwise_affine(), value->begin_params_axis());
3053+      auto prim_offset =
3054+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_LAYER_NORM_FUSION), ops_offset.o);
3055+      fbb.Finish(prim_offset);
3056+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3057+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3058+      *primitive = ret_value;
3059+    }
3060+  }
3061+}
3062+bool MindIR_LayerNormFusion_GetElementwiseAffine(ConstPrimitivePtr primitive) {
3063+  if (primitive != nullptr) {
3064+    auto prim = static_cast<const schema::Primitive *>(primitive);
3065+    auto value = prim->value_as_LayerNormFusion();
3066+    if (prim != nullptr && value != nullptr) {
3067+      return value->elementwise_affine();
3068+    } else {
3069+      return false;
3070+    }
3071+  } else {
3072+    return false;
3073+  }
3074+}
3075+
3076+void MindIR_LayerNormFusion_SetElementwiseAffine(PrimitivePtr *primitive, bool elementwise_affine) {
3077+  if (primitive != nullptr && *primitive != nullptr) {
3078+    auto prim = static_cast<schema::Primitive *>(*primitive);
3079+    auto value = prim->value_as_LayerNormFusion();
3080+    if (prim != nullptr && value != nullptr) {
3081+      flatbuffers::FlatBufferBuilder fbb;
3082+      auto ops_offset = schema::CreateLayerNormFusion(fbb, value->begin_norm_axis(), value->epsilon(),
3083+                                                      elementwise_affine, value->begin_params_axis());
3084+      auto prim_offset =
3085+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_LAYER_NORM_FUSION), ops_offset.o);
3086+      fbb.Finish(prim_offset);
3087+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3088+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3089+      *primitive = ret_value;
3090+    }
3091+  }
3092+}
3093+int64_t MindIR_LayerNormFusion_GetBeginParamsAxis(ConstPrimitivePtr primitive) {
3094+  if (primitive != nullptr) {
3095+    auto prim = static_cast<const schema::Primitive *>(primitive);
3096+    auto value = prim->value_as_LayerNormFusion();
3097+    if (prim != nullptr && value != nullptr) {
3098+      return value->begin_params_axis();
3099+    } else {
3100+      return 0;
3101+    }
3102+  } else {
3103+    return 0;
3104+  }
3105+}
3106+
3107+void MindIR_LayerNormFusion_SetBeginParamsAxis(PrimitivePtr *primitive, int64_t begin_params_axis) {
3108+  if (primitive != nullptr && *primitive != nullptr) {
3109+    auto prim = static_cast<schema::Primitive *>(*primitive);
3110+    auto value = prim->value_as_LayerNormFusion();
3111+    if (prim != nullptr && value != nullptr) {
3112+      flatbuffers::FlatBufferBuilder fbb;
3113+      auto ops_offset = schema::CreateLayerNormFusion(fbb, value->begin_norm_axis(), value->epsilon(),
3114+                                                      value->elementwise_affine(), begin_params_axis);
3115+      auto prim_offset =
3116+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_LAYER_NORM_FUSION), ops_offset.o);
3117+      fbb.Finish(prim_offset);
3118+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3119+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3120+      *primitive = ret_value;
3121+    }
3122+  }
3123+}
3124+
3125+// ********** LessEqual **********
3126+PrimitivePtr MindIR_LessEqual_CreatePrimitive() {
3127+  flatbuffers::FlatBufferBuilder fbb;
3128+  auto ops_offset = schema::CreateLessEqual(fbb);
3129+  auto prim_offset =
3130+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_LESS_EQUAL), ops_offset.o);
3131+  fbb.Finish(prim_offset);
3132+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
3133+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3134+  return ret_value;
3135+}
3136+
3137+// ********** MatMulFusion **********
3138+PrimitivePtr MindIR_MatMulFusion_CreatePrimitive(bool transpose_a, bool transpose_b, ActivationType activation_type) {
3139+  flatbuffers::FlatBufferBuilder fbb;
3140+  auto ops_offset =
3141+    schema::CreateMatMulFusion(fbb, transpose_a, transpose_b, static_cast<schema::ActivationType>(activation_type));
3142+  auto prim_offset =
3143+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_MATMUL_FUSION), ops_offset.o);
3144+  fbb.Finish(prim_offset);
3145+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
3146+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3147+  return ret_value;
3148+}
3149+bool MindIR_MatMulFusion_GetTransposeA(ConstPrimitivePtr primitive) {
3150+  if (primitive != nullptr) {
3151+    auto prim = static_cast<const schema::Primitive *>(primitive);
3152+    auto value = prim->value_as_MatMulFusion();
3153+    if (prim != nullptr && value != nullptr) {
3154+      return value->transpose_a();
3155+    } else {
3156+      return false;
3157+    }
3158+  } else {
3159+    return false;
3160+  }
3161+}
3162+
3163+void MindIR_MatMulFusion_SetTransposeA(PrimitivePtr *primitive, bool transpose_a) {
3164+  if (primitive != nullptr && *primitive != nullptr) {
3165+    auto prim = static_cast<schema::Primitive *>(*primitive);
3166+    auto value = prim->value_as_MatMulFusion();
3167+    if (prim != nullptr && value != nullptr) {
3168+      flatbuffers::FlatBufferBuilder fbb;
3169+      auto ops_offset = schema::CreateMatMulFusion(fbb, transpose_a, value->transpose_b(),
3170+                                                   static_cast<schema::ActivationType>(value->activation_type()));
3171+      auto prim_offset =
3172+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_MATMUL_FUSION), ops_offset.o);
3173+      fbb.Finish(prim_offset);
3174+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3175+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3176+      *primitive = ret_value;
3177+    }
3178+  }
3179+}
3180+bool MindIR_MatMulFusion_GetTransposeB(ConstPrimitivePtr primitive) {
3181+  if (primitive != nullptr) {
3182+    auto prim = static_cast<const schema::Primitive *>(primitive);
3183+    auto value = prim->value_as_MatMulFusion();
3184+    if (prim != nullptr && value != nullptr) {
3185+      return value->transpose_b();
3186+    } else {
3187+      return false;
3188+    }
3189+  } else {
3190+    return false;
3191+  }
3192+}
3193+
3194+void MindIR_MatMulFusion_SetTransposeB(PrimitivePtr *primitive, bool transpose_b) {
3195+  if (primitive != nullptr && *primitive != nullptr) {
3196+    auto prim = static_cast<schema::Primitive *>(*primitive);
3197+    auto value = prim->value_as_MatMulFusion();
3198+    if (prim != nullptr && value != nullptr) {
3199+      flatbuffers::FlatBufferBuilder fbb;
3200+      auto ops_offset = schema::CreateMatMulFusion(fbb, value->transpose_a(), transpose_b,
3201+                                                   static_cast<schema::ActivationType>(value->activation_type()));
3202+      auto prim_offset =
3203+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_MATMUL_FUSION), ops_offset.o);
3204+      fbb.Finish(prim_offset);
3205+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3206+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3207+      *primitive = ret_value;
3208+    }
3209+  }
3210+}
3211+ActivationType MindIR_MatMulFusion_GetActivationType(ConstPrimitivePtr primitive) {
3212+  if (primitive != nullptr) {
3213+    auto prim = static_cast<const schema::Primitive *>(primitive);
3214+    auto value = prim->value_as_MatMulFusion();
3215+    if (prim != nullptr && value != nullptr) {
3216+      return static_cast<ActivationType>(value->activation_type());
3217+    } else {
3218+      ActivationType en = static_cast<ActivationType>(0);
3219+      return en;
3220+    }
3221+  } else {
3222+    ActivationType en = static_cast<ActivationType>(0);
3223+    return en;
3224+  }
3225+}
3226+
3227+void MindIR_MatMulFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type) {
3228+  if (primitive != nullptr && *primitive != nullptr) {
3229+    auto prim = static_cast<schema::Primitive *>(*primitive);
3230+    auto value = prim->value_as_MatMulFusion();
3231+    if (prim != nullptr && value != nullptr) {
3232+      flatbuffers::FlatBufferBuilder fbb;
3233+      auto ops_offset = schema::CreateMatMulFusion(fbb, value->transpose_a(), value->transpose_b(),
3234+                                                   static_cast<schema::ActivationType>(activation_type));
3235+      auto prim_offset =
3236+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_MATMUL_FUSION), ops_offset.o);
3237+      fbb.Finish(prim_offset);
3238+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3239+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3240+      *primitive = ret_value;
3241+    }
3242+  }
3243+}
3244+
3245+// ********** Maximum **********
3246+PrimitivePtr MindIR_Maximum_CreatePrimitive() {
3247+  flatbuffers::FlatBufferBuilder fbb;
3248+  auto ops_offset = schema::CreateMaximum(fbb);
3249+  auto prim_offset = schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_MAXIMUM), ops_offset.o);
3250+  fbb.Finish(prim_offset);
3251+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
3252+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3253+  return ret_value;
3254+}
3255+
3256+// ********** MaxPoolFusion **********
3257+PrimitivePtr MindIR_MaxPoolFusion_CreatePrimitive(const std::vector<int64_t> &kernel_size,
3258+                                                  const std::vector<int64_t> &strides, const std::vector<int64_t> &pad,
3259+                                                  PadMode pad_mode, Format format, bool global,
3260+                                                  ActivationType activation_type) {
3261+  flatbuffers::FlatBufferBuilder fbb;
3262+  auto ops_offset = schema::CreateMaxPoolFusion(
3263+    fbb, fbb.CreateVector(kernel_size.data(), kernel_size.size()), fbb.CreateVector(strides.data(), strides.size()),
3264+    fbb.CreateVector(pad.data(), pad.size()), static_cast<schema::PadMode>(pad_mode),
3265+    mindspore::schema::RoundMode_FLOOR, static_cast<schema::Format>(format), global,
3266+    static_cast<schema::ActivationType>(activation_type));
3267+  auto prim_offset =
3268+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_MAX_POOL_FUSION), ops_offset.o);
3269+  fbb.Finish(prim_offset);
3270+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
3271+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3272+  return ret_value;
3273+}
3274+std::vector<int64_t> MindIR_MaxPoolFusion_GetKernelSize(ConstPrimitivePtr primitive) {
3275+  if (primitive != nullptr) {
3276+    auto prim = static_cast<const schema::Primitive *>(primitive);
3277+    auto value = prim->value_as_MaxPoolFusion();
3278+    if (prim != nullptr && value != nullptr) {
3279+      std::vector<int64_t> result;
3280+      auto src = value->kernel_size();
3281+      result.resize(src->size());
3282+      std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; });
3283+      return result;
3284+    } else {
3285+      return {};
3286+    }
3287+  } else {
3288+    return {};
3289+  }
3290+}
3291+
3292+void MindIR_MaxPoolFusion_SetKernelSize(PrimitivePtr *primitive, const std::vector<int64_t> &kernel_size) {
3293+  if (primitive != nullptr && *primitive != nullptr) {
3294+    auto prim = static_cast<schema::Primitive *>(*primitive);
3295+    auto value = prim->value_as_MaxPoolFusion();
3296+    if (prim != nullptr && value != nullptr) {
3297+      flatbuffers::FlatBufferBuilder fbb;
3298+      auto ops_offset = schema::CreateMaxPoolFusion(
3299+        fbb, fbb.CreateVector(kernel_size.data(), kernel_size.size()),
3300+        fbb.CreateVector(value->strides()->data(), value->strides()->size()),
3301+        fbb.CreateVector(value->pad()->data(), value->pad()->size()), static_cast<schema::PadMode>(value->pad_mode()),
3302+        mindspore::schema::RoundMode_FLOOR, static_cast<schema::Format>(value->format()), value->global(),
3303+        static_cast<schema::ActivationType>(value->activation_type()));
3304+      auto prim_offset =
3305+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_MAX_POOL_FUSION), ops_offset.o);
3306+      fbb.Finish(prim_offset);
3307+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3308+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3309+      *primitive = ret_value;
3310+    }
3311+  }
3312+}
3313+std::vector<int64_t> MindIR_MaxPoolFusion_GetStrides(ConstPrimitivePtr primitive) {
3314+  if (primitive != nullptr) {
3315+    auto prim = static_cast<const schema::Primitive *>(primitive);
3316+    auto value = prim->value_as_MaxPoolFusion();
3317+    if (prim != nullptr && value != nullptr) {
3318+      std::vector<int64_t> result;
3319+      auto src = value->strides();
3320+      result.resize(src->size());
3321+      std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; });
3322+      return result;
3323+    } else {
3324+      return {};
3325+    }
3326+  } else {
3327+    return {};
3328+  }
3329+}
3330+
3331+void MindIR_MaxPoolFusion_SetStrides(PrimitivePtr *primitive, const std::vector<int64_t> &strides) {
3332+  if (primitive != nullptr && *primitive != nullptr) {
3333+    auto prim = static_cast<schema::Primitive *>(*primitive);
3334+    auto value = prim->value_as_MaxPoolFusion();
3335+    if (prim != nullptr && value != nullptr) {
3336+      flatbuffers::FlatBufferBuilder fbb;
3337+      auto ops_offset = schema::CreateMaxPoolFusion(
3338+        fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
3339+        fbb.CreateVector(strides.data(), strides.size()), fbb.CreateVector(value->pad()->data(), value->pad()->size()),
3340+        static_cast<schema::PadMode>(value->pad_mode()), mindspore::schema::RoundMode_FLOOR,
3341+        static_cast<schema::Format>(value->format()), value->global(),
3342+        static_cast<schema::ActivationType>(value->activation_type()));
3343+      auto prim_offset =
3344+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_MAX_POOL_FUSION), ops_offset.o);
3345+      fbb.Finish(prim_offset);
3346+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3347+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3348+      *primitive = ret_value;
3349+    }
3350+  }
3351+}
3352+std::vector<int64_t> MindIR_MaxPoolFusion_GetPad(ConstPrimitivePtr primitive) {
3353+  if (primitive != nullptr) {
3354+    auto prim = static_cast<const schema::Primitive *>(primitive);
3355+    auto value = prim->value_as_MaxPoolFusion();
3356+    if (prim != nullptr && value != nullptr) {
3357+      std::vector<int64_t> result;
3358+      auto src = value->pad();
3359+      result.resize(src->size());
3360+      std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; });
3361+      return result;
3362+    } else {
3363+      return {};
3364+    }
3365+  } else {
3366+    return {};
3367+  }
3368+}
3369+
3370+void MindIR_MaxPoolFusion_SetPad(PrimitivePtr *primitive, const std::vector<int64_t> &pad) {
3371+  if (primitive != nullptr && *primitive != nullptr) {
3372+    auto prim = static_cast<schema::Primitive *>(*primitive);
3373+    auto value = prim->value_as_MaxPoolFusion();
3374+    if (prim != nullptr && value != nullptr) {
3375+      flatbuffers::FlatBufferBuilder fbb;
3376+      auto ops_offset = schema::CreateMaxPoolFusion(
3377+        fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
3378+        fbb.CreateVector(value->strides()->data(), value->strides()->size()), fbb.CreateVector(pad.data(), pad.size()),
3379+        static_cast<schema::PadMode>(value->pad_mode()), mindspore::schema::RoundMode_FLOOR,
3380+        static_cast<schema::Format>(value->format()), value->global(),
3381+        static_cast<schema::ActivationType>(value->activation_type()));
3382+      auto prim_offset =
3383+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_MAX_POOL_FUSION), ops_offset.o);
3384+      fbb.Finish(prim_offset);
3385+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3386+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3387+      *primitive = ret_value;
3388+    }
3389+  }
3390+}
3391+PadMode MindIR_MaxPoolFusion_GetPadMode(ConstPrimitivePtr primitive) {
3392+  if (primitive != nullptr) {
3393+    auto prim = static_cast<const schema::Primitive *>(primitive);
3394+    auto value = prim->value_as_MaxPoolFusion();
3395+    if (prim != nullptr && value != nullptr) {
3396+      return static_cast<PadMode>(value->pad_mode());
3397+    } else {
3398+      PadMode en = static_cast<PadMode>(0);
3399+      return en;
3400+    }
3401+  } else {
3402+    PadMode en = static_cast<PadMode>(0);
3403+    return en;
3404+  }
3405+}
3406+
3407+void MindIR_MaxPoolFusion_SetPadMode(PrimitivePtr *primitive, PadMode pad_mode) {
3408+  if (primitive != nullptr && *primitive != nullptr) {
3409+    auto prim = static_cast<schema::Primitive *>(*primitive);
3410+    auto value = prim->value_as_MaxPoolFusion();
3411+    if (prim != nullptr && value != nullptr) {
3412+      flatbuffers::FlatBufferBuilder fbb;
3413+      auto ops_offset = schema::CreateMaxPoolFusion(
3414+        fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
3415+        fbb.CreateVector(value->strides()->data(), value->strides()->size()),
3416+        fbb.CreateVector(value->pad()->data(), value->pad()->size()), static_cast<schema::PadMode>(pad_mode),
3417+        mindspore::schema::RoundMode_FLOOR, static_cast<schema::Format>(value->format()), value->global(),
3418+        static_cast<schema::ActivationType>(value->activation_type()));
3419+      auto prim_offset =
3420+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_MAX_POOL_FUSION), ops_offset.o);
3421+      fbb.Finish(prim_offset);
3422+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3423+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3424+      *primitive = ret_value;
3425+    }
3426+  }
3427+}
3428+Format MindIR_MaxPoolFusion_GetFormat(ConstPrimitivePtr primitive) {
3429+  if (primitive != nullptr) {
3430+    auto prim = static_cast<const schema::Primitive *>(primitive);
3431+    auto value = prim->value_as_MaxPoolFusion();
3432+    if (prim != nullptr && value != nullptr) {
3433+      return static_cast<Format>(value->format());
3434+    } else {
3435+      Format en = static_cast<Format>(0);
3436+      return en;
3437+    }
3438+  } else {
3439+    Format en = static_cast<Format>(0);
3440+    return en;
3441+  }
3442+}
3443+
3444+void MindIR_MaxPoolFusion_SetFormat(PrimitivePtr *primitive, Format format) {
3445+  if (primitive != nullptr && *primitive != nullptr) {
3446+    auto prim = static_cast<schema::Primitive *>(*primitive);
3447+    auto value = prim->value_as_MaxPoolFusion();
3448+    if (prim != nullptr && value != nullptr) {
3449+      flatbuffers::FlatBufferBuilder fbb;
3450+      auto ops_offset = schema::CreateMaxPoolFusion(
3451+        fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
3452+        fbb.CreateVector(value->strides()->data(), value->strides()->size()),
3453+        fbb.CreateVector(value->pad()->data(), value->pad()->size()), static_cast<schema::PadMode>(value->pad_mode()),
3454+        mindspore::schema::RoundMode_FLOOR, static_cast<schema::Format>(format), value->global(),
3455+        static_cast<schema::ActivationType>(value->activation_type()));
3456+      auto prim_offset =
3457+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_MAX_POOL_FUSION), ops_offset.o);
3458+      fbb.Finish(prim_offset);
3459+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3460+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3461+      *primitive = ret_value;
3462+    }
3463+  }
3464+}
3465+bool MindIR_MaxPoolFusion_GetGlobal(ConstPrimitivePtr primitive) {
3466+  if (primitive != nullptr) {
3467+    auto prim = static_cast<const schema::Primitive *>(primitive);
3468+    auto value = prim->value_as_MaxPoolFusion();
3469+    if (prim != nullptr && value != nullptr) {
3470+      return value->global();
3471+    } else {
3472+      return false;
3473+    }
3474+  } else {
3475+    return false;
3476+  }
3477+}
3478+
3479+void MindIR_MaxPoolFusion_SetGlobal(PrimitivePtr *primitive, bool global) {
3480+  if (primitive != nullptr && *primitive != nullptr) {
3481+    auto prim = static_cast<schema::Primitive *>(*primitive);
3482+    auto value = prim->value_as_MaxPoolFusion();
3483+    if (prim != nullptr && value != nullptr) {
3484+      flatbuffers::FlatBufferBuilder fbb;
3485+      auto ops_offset = schema::CreateMaxPoolFusion(
3486+        fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
3487+        fbb.CreateVector(value->strides()->data(), value->strides()->size()),
3488+        fbb.CreateVector(value->pad()->data(), value->pad()->size()), static_cast<schema::PadMode>(value->pad_mode()),
3489+        mindspore::schema::RoundMode_FLOOR, static_cast<schema::Format>(value->format()), global,
3490+        static_cast<schema::ActivationType>(value->activation_type()));
3491+      auto prim_offset =
3492+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_MAX_POOL_FUSION), ops_offset.o);
3493+      fbb.Finish(prim_offset);
3494+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3495+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3496+      *primitive = ret_value;
3497+    }
3498+  }
3499+}
3500+ActivationType MindIR_MaxPoolFusion_GetActivationType(ConstPrimitivePtr primitive) {
3501+  if (primitive != nullptr) {
3502+    auto prim = static_cast<const schema::Primitive *>(primitive);
3503+    auto value = prim->value_as_MaxPoolFusion();
3504+    if (prim != nullptr && value != nullptr) {
3505+      return static_cast<ActivationType>(value->activation_type());
3506+    } else {
3507+      ActivationType en = static_cast<ActivationType>(0);
3508+      return en;
3509+    }
3510+  } else {
3511+    ActivationType en = static_cast<ActivationType>(0);
3512+    return en;
3513+  }
3514+}
3515+
3516+void MindIR_MaxPoolFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type) {
3517+  if (primitive != nullptr && *primitive != nullptr) {
3518+    auto prim = static_cast<schema::Primitive *>(*primitive);
3519+    auto value = prim->value_as_MaxPoolFusion();
3520+    if (prim != nullptr && value != nullptr) {
3521+      flatbuffers::FlatBufferBuilder fbb;
3522+      auto ops_offset = schema::CreateMaxPoolFusion(
3523+        fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()),
3524+        fbb.CreateVector(value->strides()->data(), value->strides()->size()),
3525+        fbb.CreateVector(value->pad()->data(), value->pad()->size()), static_cast<schema::PadMode>(value->pad_mode()),
3526+        mindspore::schema::RoundMode_FLOOR, static_cast<schema::Format>(value->format()), value->global(),
3527+        static_cast<schema::ActivationType>(activation_type));
3528+      auto prim_offset =
3529+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_MAX_POOL_FUSION), ops_offset.o);
3530+      fbb.Finish(prim_offset);
3531+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3532+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3533+      *primitive = ret_value;
3534+    }
3535+  }
3536+}
3537+
3538+// ********** MulFusion **********
3539+PrimitivePtr MindIR_MulFusion_CreatePrimitive(ActivationType activation_type) {
3540+  flatbuffers::FlatBufferBuilder fbb;
3541+  auto ops_offset = schema::CreateMulFusion(fbb, static_cast<schema::ActivationType>(activation_type));
3542+  auto prim_offset =
3543+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_MUL_FUSION), ops_offset.o);
3544+  fbb.Finish(prim_offset);
3545+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
3546+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3547+  return ret_value;
3548+}
3549+ActivationType MindIR_MulFusion_GetActivationType(ConstPrimitivePtr primitive) {
3550+  if (primitive != nullptr) {
3551+    auto prim = static_cast<const schema::Primitive *>(primitive);
3552+    auto value = prim->value_as_MulFusion();
3553+    if (prim != nullptr && value != nullptr) {
3554+      return static_cast<ActivationType>(value->activation_type());
3555+    } else {
3556+      ActivationType en = static_cast<ActivationType>(0);
3557+      return en;
3558+    }
3559+  } else {
3560+    ActivationType en = static_cast<ActivationType>(0);
3561+    return en;
3562+  }
3563+}
3564+
3565+void MindIR_MulFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type) {
3566+  if (primitive != nullptr && *primitive != nullptr) {
3567+    auto prim = static_cast<schema::Primitive *>(*primitive);
3568+    auto value = prim->value_as_MulFusion();
3569+    if (prim != nullptr && value != nullptr) {
3570+      flatbuffers::FlatBufferBuilder fbb;
3571+      auto ops_offset = schema::CreateMulFusion(fbb, static_cast<schema::ActivationType>(activation_type));
3572+      auto prim_offset =
3573+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_MUL_FUSION), ops_offset.o);
3574+      fbb.Finish(prim_offset);
3575+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3576+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3577+      *primitive = ret_value;
3578+    }
3579+  }
3580+}
3581+
3582+// ********** OneHot **********
3583+PrimitivePtr MindIR_OneHot_CreatePrimitive(int64_t axis) {
3584+  flatbuffers::FlatBufferBuilder fbb;
3585+  auto ops_offset = schema::CreateOneHot(fbb, axis);
3586+  auto prim_offset = schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_ONE_HOT), ops_offset.o);
3587+  fbb.Finish(prim_offset);
3588+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
3589+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3590+  return ret_value;
3591+}
3592+int64_t MindIR_OneHot_GetAxis(ConstPrimitivePtr primitive) {
3593+  if (primitive != nullptr) {
3594+    auto prim = static_cast<const schema::Primitive *>(primitive);
3595+    auto value = prim->value_as_OneHot();
3596+    if (prim != nullptr && value != nullptr) {
3597+      return value->axis();
3598+    } else {
3599+      return 0;
3600+    }
3601+  } else {
3602+    return 0;
3603+  }
3604+}
3605+
3606+void MindIR_OneHot_SetAxis(PrimitivePtr *primitive, int64_t axis) {
3607+  if (primitive != nullptr && *primitive != nullptr) {
3608+    auto prim = static_cast<schema::Primitive *>(*primitive);
3609+    auto value = prim->value_as_OneHot();
3610+    if (prim != nullptr && value != nullptr) {
3611+      flatbuffers::FlatBufferBuilder fbb;
3612+      auto ops_offset = schema::CreateOneHot(fbb, axis);
3613+      auto prim_offset =
3614+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_ONE_HOT), ops_offset.o);
3615+      fbb.Finish(prim_offset);
3616+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3617+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3618+      *primitive = ret_value;
3619+    }
3620+  }
3621+}
3622+
3623+// ********** PadFusion **********
3624+PrimitivePtr MindIR_PadFusion_CreatePrimitive(const std::vector<std::vector<int64_t>> &paddings,
3625+                                              PaddingMode padding_mode, float constant_value) {
3626+  flatbuffers::FlatBufferBuilder fbb;
3627+  auto ops_offset = schema::CreatePadFusion(fbb, CreateVec2D(fbb, paddings),
3628+                                            static_cast<schema::PaddingMode>(padding_mode), constant_value);
3629+  auto prim_offset =
3630+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_PAD_FUSION), ops_offset.o);
3631+  fbb.Finish(prim_offset);
3632+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
3633+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3634+  return ret_value;
3635+}
3636+std::vector<std::vector<int64_t>> MindIR_PadFusion_GetPaddings(ConstPrimitivePtr primitive) {
3637+  if (primitive != nullptr) {
3638+    auto prim = static_cast<const schema::Primitive *>(primitive);
3639+    auto value = prim->value_as_PadFusion();
3640+    if (prim != nullptr && value != nullptr) {
3641+      std::vector<std::vector<int64_t>> out;
3642+      auto src = value->paddings();
3643+      for (auto sub_list : *src->data()) {
3644+        std::vector<int64_t> result_tmp;
3645+        result_tmp.resize(sub_list->data()->size());
3646+        std::transform(sub_list->data()->begin(), sub_list->data()->end(), result_tmp.begin(),
3647+                       [](int64_t item) { return item; });
3648+        out.emplace_back(result_tmp);
3649+      }
3650+      return out;
3651+    } else {
3652+      return {};
3653+    }
3654+  } else {
3655+    return {};
3656+  }
3657+}
3658+
3659+void MindIR_PadFusion_SetPaddings(PrimitivePtr *primitive, const std::vector<std::vector<int64_t>> &paddings) {
3660+  if (primitive != nullptr && *primitive != nullptr) {
3661+    auto prim = static_cast<schema::Primitive *>(*primitive);
3662+    auto value = prim->value_as_PadFusion();
3663+    if (prim != nullptr && value != nullptr) {
3664+      flatbuffers::FlatBufferBuilder fbb;
3665+      auto ops_offset =
3666+        schema::CreatePadFusion(fbb, CreateVec2D(fbb, paddings),
3667+                                static_cast<schema::PaddingMode>(value->padding_mode()), value->constant_value());
3668+      auto prim_offset =
3669+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_PAD_FUSION), ops_offset.o);
3670+      fbb.Finish(prim_offset);
3671+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3672+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3673+      *primitive = ret_value;
3674+    }
3675+  }
3676+}
3677+PaddingMode MindIR_PadFusion_GetPaddingMode(ConstPrimitivePtr primitive) {
3678+  if (primitive != nullptr) {
3679+    auto prim = static_cast<const schema::Primitive *>(primitive);
3680+    auto value = prim->value_as_PadFusion();
3681+    if (prim != nullptr && value != nullptr) {
3682+      return static_cast<PaddingMode>(value->padding_mode());
3683+    } else {
3684+      PaddingMode en = static_cast<PaddingMode>(0);
3685+      return en;
3686+    }
3687+  } else {
3688+    PaddingMode en = static_cast<PaddingMode>(0);
3689+    return en;
3690+  }
3691+}
3692+
3693+void MindIR_PadFusion_SetPaddingMode(PrimitivePtr *primitive, PaddingMode padding_mode) {
3694+  if (primitive != nullptr && *primitive != nullptr) {
3695+    auto prim = static_cast<schema::Primitive *>(*primitive);
3696+    auto value = prim->value_as_PadFusion();
3697+    if (prim != nullptr && value != nullptr) {
3698+      flatbuffers::FlatBufferBuilder fbb;
3699+      auto ops_offset =
3700+        schema::CreatePadFusion(fbb, CreateVec2D(fbb, value->paddings()),
3701+                                static_cast<schema::PaddingMode>(padding_mode), value->constant_value());
3702+      auto prim_offset =
3703+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_PAD_FUSION), ops_offset.o);
3704+      fbb.Finish(prim_offset);
3705+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3706+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3707+      *primitive = ret_value;
3708+    }
3709+  }
3710+}
3711+float MindIR_PadFusion_GetConstantValue(ConstPrimitivePtr primitive) {
3712+  if (primitive != nullptr) {
3713+    auto prim = static_cast<const schema::Primitive *>(primitive);
3714+    auto value = prim->value_as_PadFusion();
3715+    if (prim != nullptr && value != nullptr) {
3716+      return value->constant_value();
3717+    } else {
3718+      return .0;
3719+    }
3720+  } else {
3721+    return .0;
3722+  }
3723+}
3724+
3725+void MindIR_PadFusion_SetConstantValue(PrimitivePtr *primitive, float constant_value) {
3726+  if (primitive != nullptr && *primitive != nullptr) {
3727+    auto prim = static_cast<schema::Primitive *>(*primitive);
3728+    auto value = prim->value_as_PadFusion();
3729+    if (prim != nullptr && value != nullptr) {
3730+      flatbuffers::FlatBufferBuilder fbb;
3731+      auto ops_offset =
3732+        schema::CreatePadFusion(fbb, CreateVec2D(fbb, value->paddings()),
3733+                                static_cast<schema::PaddingMode>(value->padding_mode()), constant_value);
3734+      auto prim_offset =
3735+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_PAD_FUSION), ops_offset.o);
3736+      fbb.Finish(prim_offset);
3737+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3738+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3739+      *primitive = ret_value;
3740+    }
3741+  }
3742+}
3743+
3744+// ********** PowFusion **********
3745+PrimitivePtr MindIR_PowFusion_CreatePrimitive(float scale, float shift) {
3746+  flatbuffers::FlatBufferBuilder fbb;
3747+  auto ops_offset = schema::CreatePowFusion(fbb, scale, shift);
3748+  auto prim_offset =
3749+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_POW_FUSION), ops_offset.o);
3750+  fbb.Finish(prim_offset);
3751+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
3752+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3753+  return ret_value;
3754+}
3755+float MindIR_PowFusion_GetScale(ConstPrimitivePtr primitive) {
3756+  if (primitive != nullptr) {
3757+    auto prim = static_cast<const schema::Primitive *>(primitive);
3758+    auto value = prim->value_as_PowFusion();
3759+    if (prim != nullptr && value != nullptr) {
3760+      return value->scale();
3761+    } else {
3762+      return .0;
3763+    }
3764+  } else {
3765+    return .0;
3766+  }
3767+}
3768+
3769+void MindIR_PowFusion_SetScale(PrimitivePtr *primitive, float scale) {
3770+  if (primitive != nullptr && *primitive != nullptr) {
3771+    auto prim = static_cast<schema::Primitive *>(*primitive);
3772+    auto value = prim->value_as_PowFusion();
3773+    if (prim != nullptr && value != nullptr) {
3774+      flatbuffers::FlatBufferBuilder fbb;
3775+      auto ops_offset = schema::CreatePowFusion(fbb, scale, value->shift());
3776+      auto prim_offset =
3777+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_POW_FUSION), ops_offset.o);
3778+      fbb.Finish(prim_offset);
3779+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3780+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3781+      *primitive = ret_value;
3782+    }
3783+  }
3784+}
3785+float MindIR_PowFusion_GetShift(ConstPrimitivePtr primitive) {
3786+  if (primitive != nullptr) {
3787+    auto prim = static_cast<const schema::Primitive *>(primitive);
3788+    auto value = prim->value_as_PowFusion();
3789+    if (prim != nullptr && value != nullptr) {
3790+      return value->shift();
3791+    } else {
3792+      return .0;
3793+    }
3794+  } else {
3795+    return .0;
3796+  }
3797+}
3798+
3799+void MindIR_PowFusion_SetShift(PrimitivePtr *primitive, float shift) {
3800+  if (primitive != nullptr && *primitive != nullptr) {
3801+    auto prim = static_cast<schema::Primitive *>(*primitive);
3802+    auto value = prim->value_as_PowFusion();
3803+    if (prim != nullptr && value != nullptr) {
3804+      flatbuffers::FlatBufferBuilder fbb;
3805+      auto ops_offset = schema::CreatePowFusion(fbb, value->scale(), shift);
3806+      auto prim_offset =
3807+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_POW_FUSION), ops_offset.o);
3808+      fbb.Finish(prim_offset);
3809+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3810+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3811+      *primitive = ret_value;
3812+    }
3813+  }
3814+}
3815+
3816+// ********** PReLUFusion **********
3817+PrimitivePtr MindIR_PReLUFusion_CreatePrimitive(bool channel_shared) {
3818+  flatbuffers::FlatBufferBuilder fbb;
3819+  auto ops_offset = schema::CreatePReLUFusion(fbb, channel_shared);
3820+  auto prim_offset =
3821+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_PRELU_FUSION), ops_offset.o);
3822+  fbb.Finish(prim_offset);
3823+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
3824+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3825+  return ret_value;
3826+}
3827+bool MindIR_PReLUFusion_GetChannelShared(ConstPrimitivePtr primitive) {
3828+  if (primitive != nullptr) {
3829+    auto prim = static_cast<const schema::Primitive *>(primitive);
3830+    auto value = prim->value_as_PReLUFusion();
3831+    if (prim != nullptr && value != nullptr) {
3832+      return value->channel_shared();
3833+    } else {
3834+      return false;
3835+    }
3836+  } else {
3837+    return false;
3838+  }
3839+}
3840+
3841+void MindIR_PReLUFusion_SetChannelShared(PrimitivePtr *primitive, bool channel_shared) {
3842+  if (primitive != nullptr && *primitive != nullptr) {
3843+    auto prim = static_cast<schema::Primitive *>(*primitive);
3844+    auto value = prim->value_as_PReLUFusion();
3845+    if (prim != nullptr && value != nullptr) {
3846+      flatbuffers::FlatBufferBuilder fbb;
3847+      auto ops_offset = schema::CreatePReLUFusion(fbb, channel_shared);
3848+      auto prim_offset =
3849+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_PRELU_FUSION), ops_offset.o);
3850+      fbb.Finish(prim_offset);
3851+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3852+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3853+      *primitive = ret_value;
3854+    }
3855+  }
3856+}
3857+
3858+// ********** QuantDTypeCast **********
3859+PrimitivePtr MindIR_QuantDTypeCast_CreatePrimitive(int64_t src_t, int64_t dst_t) {
3860+  flatbuffers::FlatBufferBuilder fbb;
3861+  auto ops_offset = schema::CreateQuantDTypeCast(fbb, src_t, dst_t);
3862+  auto prim_offset =
3863+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_QUANT_DTYPE_CAST), ops_offset.o);
3864+  fbb.Finish(prim_offset);
3865+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
3866+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3867+  return ret_value;
3868+}
3869+int64_t MindIR_QuantDTypeCast_GetSrcT(ConstPrimitivePtr primitive) {
3870+  if (primitive != nullptr) {
3871+    auto prim = static_cast<const schema::Primitive *>(primitive);
3872+    auto value = prim->value_as_QuantDTypeCast();
3873+    if (prim != nullptr && value != nullptr) {
3874+      return value->src_t();
3875+    } else {
3876+      return 0;
3877+    }
3878+  } else {
3879+    return 0;
3880+  }
3881+}
3882+
3883+void MindIR_QuantDTypeCast_SetSrcT(PrimitivePtr *primitive, int64_t src_t) {
3884+  if (primitive != nullptr && *primitive != nullptr) {
3885+    auto prim = static_cast<schema::Primitive *>(*primitive);
3886+    auto value = prim->value_as_QuantDTypeCast();
3887+    if (prim != nullptr && value != nullptr) {
3888+      flatbuffers::FlatBufferBuilder fbb;
3889+      auto ops_offset = schema::CreateQuantDTypeCast(fbb, src_t, value->dst_t());
3890+      auto prim_offset =
3891+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_QUANT_DTYPE_CAST), ops_offset.o);
3892+      fbb.Finish(prim_offset);
3893+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3894+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3895+      *primitive = ret_value;
3896+    }
3897+  }
3898+}
3899+int64_t MindIR_QuantDTypeCast_GetDstT(ConstPrimitivePtr primitive) {
3900+  if (primitive != nullptr) {
3901+    auto prim = static_cast<const schema::Primitive *>(primitive);
3902+    auto value = prim->value_as_QuantDTypeCast();
3903+    if (prim != nullptr && value != nullptr) {
3904+      return value->dst_t();
3905+    } else {
3906+      return 0;
3907+    }
3908+  } else {
3909+    return 0;
3910+  }
3911+}
3912+
3913+void MindIR_QuantDTypeCast_SetDstT(PrimitivePtr *primitive, int64_t dst_t) {
3914+  if (primitive != nullptr && *primitive != nullptr) {
3915+    auto prim = static_cast<schema::Primitive *>(*primitive);
3916+    auto value = prim->value_as_QuantDTypeCast();
3917+    if (prim != nullptr && value != nullptr) {
3918+      flatbuffers::FlatBufferBuilder fbb;
3919+      auto ops_offset = schema::CreateQuantDTypeCast(fbb, value->src_t(), dst_t);
3920+      auto prim_offset =
3921+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_QUANT_DTYPE_CAST), ops_offset.o);
3922+      fbb.Finish(prim_offset);
3923+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3924+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3925+      *primitive = ret_value;
3926+    }
3927+  }
3928+}
3929+
3930+// ********** ReduceFusion **********
3931+PrimitivePtr MindIR_ReduceFusion_CreatePrimitive(bool keep_dims, ReduceMode mode, bool reduce_to_end, float coeff) {
3932+  flatbuffers::FlatBufferBuilder fbb;
3933+  auto ops_offset =
3934+    schema::CreateReduceFusion(fbb, keep_dims, static_cast<schema::ReduceMode>(mode), reduce_to_end, coeff);
3935+  auto prim_offset =
3936+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_REDUCE_FUSION), ops_offset.o);
3937+  fbb.Finish(prim_offset);
3938+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
3939+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3940+  return ret_value;
3941+}
3942+bool MindIR_ReduceFusion_GetKeepDims(ConstPrimitivePtr primitive) {
3943+  if (primitive != nullptr) {
3944+    auto prim = static_cast<const schema::Primitive *>(primitive);
3945+    auto value = prim->value_as_ReduceFusion();
3946+    if (prim != nullptr && value != nullptr) {
3947+      return value->keep_dims();
3948+    } else {
3949+      return false;
3950+    }
3951+  } else {
3952+    return false;
3953+  }
3954+}
3955+
3956+void MindIR_ReduceFusion_SetKeepDims(PrimitivePtr *primitive, bool keep_dims) {
3957+  if (primitive != nullptr && *primitive != nullptr) {
3958+    auto prim = static_cast<schema::Primitive *>(*primitive);
3959+    auto value = prim->value_as_ReduceFusion();
3960+    if (prim != nullptr && value != nullptr) {
3961+      flatbuffers::FlatBufferBuilder fbb;
3962+      auto ops_offset = schema::CreateReduceFusion(fbb, keep_dims, static_cast<schema::ReduceMode>(value->mode()),
3963+                                                   value->reduce_to_end(), value->coeff());
3964+      auto prim_offset =
3965+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_REDUCE_FUSION), ops_offset.o);
3966+      fbb.Finish(prim_offset);
3967+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
3968+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
3969+      *primitive = ret_value;
3970+    }
3971+  }
3972+}
3973+ReduceMode MindIR_ReduceFusion_GetMode(ConstPrimitivePtr primitive) {
3974+  if (primitive != nullptr) {
3975+    auto prim = static_cast<const schema::Primitive *>(primitive);
3976+    auto value = prim->value_as_ReduceFusion();
3977+    if (prim != nullptr && value != nullptr) {
3978+      return static_cast<ReduceMode>(value->mode());
3979+    } else {
3980+      ReduceMode en = static_cast<ReduceMode>(0);
3981+      return en;
3982+    }
3983+  } else {
3984+    ReduceMode en = static_cast<ReduceMode>(0);
3985+    return en;
3986+  }
3987+}
3988+
3989+void MindIR_ReduceFusion_SetMode(PrimitivePtr *primitive, ReduceMode mode) {
3990+  if (primitive != nullptr && *primitive != nullptr) {
3991+    auto prim = static_cast<schema::Primitive *>(*primitive);
3992+    auto value = prim->value_as_ReduceFusion();
3993+    if (prim != nullptr && value != nullptr) {
3994+      flatbuffers::FlatBufferBuilder fbb;
3995+      auto ops_offset = schema::CreateReduceFusion(fbb, value->keep_dims(), static_cast<schema::ReduceMode>(mode),
3996+                                                   value->reduce_to_end(), value->coeff());
3997+      auto prim_offset =
3998+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_REDUCE_FUSION), ops_offset.o);
3999+      fbb.Finish(prim_offset);
4000+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4001+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4002+      *primitive = ret_value;
4003+    }
4004+  }
4005+}
4006+bool MindIR_ReduceFusion_GetReduceToEnd(ConstPrimitivePtr primitive) {
4007+  if (primitive != nullptr) {
4008+    auto prim = static_cast<const schema::Primitive *>(primitive);
4009+    auto value = prim->value_as_ReduceFusion();
4010+    if (prim != nullptr && value != nullptr) {
4011+      return value->reduce_to_end();
4012+    } else {
4013+      return false;
4014+    }
4015+  } else {
4016+    return false;
4017+  }
4018+}
4019+
4020+void MindIR_ReduceFusion_SetReduceToEnd(PrimitivePtr *primitive, bool reduce_to_end) {
4021+  if (primitive != nullptr && *primitive != nullptr) {
4022+    auto prim = static_cast<schema::Primitive *>(*primitive);
4023+    auto value = prim->value_as_ReduceFusion();
4024+    if (prim != nullptr && value != nullptr) {
4025+      flatbuffers::FlatBufferBuilder fbb;
4026+      auto ops_offset = schema::CreateReduceFusion(
4027+        fbb, value->keep_dims(), static_cast<schema::ReduceMode>(value->mode()), reduce_to_end, value->coeff());
4028+      auto prim_offset =
4029+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_REDUCE_FUSION), ops_offset.o);
4030+      fbb.Finish(prim_offset);
4031+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4032+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4033+      *primitive = ret_value;
4034+    }
4035+  }
4036+}
4037+float MindIR_ReduceFusion_GetCoeff(ConstPrimitivePtr primitive) {
4038+  if (primitive != nullptr) {
4039+    auto prim = static_cast<const schema::Primitive *>(primitive);
4040+    auto value = prim->value_as_ReduceFusion();
4041+    if (prim != nullptr && value != nullptr) {
4042+      return value->coeff();
4043+    } else {
4044+      return .0;
4045+    }
4046+  } else {
4047+    return .0;
4048+  }
4049+}
4050+
4051+void MindIR_ReduceFusion_SetCoeff(PrimitivePtr *primitive, float coeff) {
4052+  if (primitive != nullptr && *primitive != nullptr) {
4053+    auto prim = static_cast<schema::Primitive *>(*primitive);
4054+    auto value = prim->value_as_ReduceFusion();
4055+    if (prim != nullptr && value != nullptr) {
4056+      flatbuffers::FlatBufferBuilder fbb;
4057+      auto ops_offset = schema::CreateReduceFusion(
4058+        fbb, value->keep_dims(), static_cast<schema::ReduceMode>(value->mode()), value->reduce_to_end(), coeff);
4059+      auto prim_offset =
4060+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_REDUCE_FUSION), ops_offset.o);
4061+      fbb.Finish(prim_offset);
4062+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4063+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4064+      *primitive = ret_value;
4065+    }
4066+  }
4067+}
4068+
4069+// ********** Reshape **********
4070+PrimitivePtr MindIR_Reshape_CreatePrimitive() {
4071+  flatbuffers::FlatBufferBuilder fbb;
4072+  auto ops_offset = schema::CreateReshape(fbb);
4073+  auto prim_offset = schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_RESHAPE), ops_offset.o);
4074+  fbb.Finish(prim_offset);
4075+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
4076+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4077+  return ret_value;
4078+}
4079+
4080+// ********** Resize **********
4081+PrimitivePtr MindIR_Resize_CreatePrimitive(ResizeMethod method, int64_t new_height, int64_t new_width,
4082+                                           bool preserve_aspect_ratio,
4083+                                           CoordinateTransformMode coordinate_transform_mode, float cubic_coeff,
4084+                                           int64_t exclude_outside, float extrapolation_value,
4085+                                           NearestMode nearest_mode) {
4086+  flatbuffers::FlatBufferBuilder fbb;
4087+  auto ops_offset = schema::CreateResize(
4088+    fbb, mindspore::schema::Format_NCHW, static_cast<schema::ResizeMethod>(method), new_height, new_width,
4089+    preserve_aspect_ratio, static_cast<schema::CoordinateTransformMode>(coordinate_transform_mode), cubic_coeff,
4090+    exclude_outside, extrapolation_value, static_cast<schema::NearestMode>(nearest_mode));
4091+  auto prim_offset = schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_RESIZE), ops_offset.o);
4092+  fbb.Finish(prim_offset);
4093+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
4094+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4095+  return ret_value;
4096+}
4097+ResizeMethod MindIR_Resize_GetMethod(ConstPrimitivePtr primitive) {
4098+  if (primitive != nullptr) {
4099+    auto prim = static_cast<const schema::Primitive *>(primitive);
4100+    auto value = prim->value_as_Resize();
4101+    if (prim != nullptr && value != nullptr) {
4102+      return static_cast<ResizeMethod>(value->method());
4103+    } else {
4104+      ResizeMethod en = static_cast<ResizeMethod>(0);
4105+      return en;
4106+    }
4107+  } else {
4108+    ResizeMethod en = static_cast<ResizeMethod>(0);
4109+    return en;
4110+  }
4111+}
4112+
4113+void MindIR_Resize_SetMethod(PrimitivePtr *primitive, ResizeMethod method) {
4114+  if (primitive != nullptr && *primitive != nullptr) {
4115+    auto prim = static_cast<schema::Primitive *>(*primitive);
4116+    auto value = prim->value_as_Resize();
4117+    if (prim != nullptr && value != nullptr) {
4118+      flatbuffers::FlatBufferBuilder fbb;
4119+      auto ops_offset =
4120+        schema::CreateResize(fbb, mindspore::schema::Format_NCHW, static_cast<schema::ResizeMethod>(method),
4121+                             value->new_height(), value->new_width(), value->preserve_aspect_ratio(),
4122+                             static_cast<schema::CoordinateTransformMode>(value->coordinate_transform_mode()),
4123+                             value->cubic_coeff(), value->exclude_outside(), value->extrapolation_value(),
4124+                             static_cast<schema::NearestMode>(value->nearest_mode()));
4125+      auto prim_offset =
4126+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_RESIZE), ops_offset.o);
4127+      fbb.Finish(prim_offset);
4128+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4129+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4130+      *primitive = ret_value;
4131+    }
4132+  }
4133+}
4134+int64_t MindIR_Resize_GetNewHeight(ConstPrimitivePtr primitive) {
4135+  if (primitive != nullptr) {
4136+    auto prim = static_cast<const schema::Primitive *>(primitive);
4137+    auto value = prim->value_as_Resize();
4138+    if (prim != nullptr && value != nullptr) {
4139+      return value->new_height();
4140+    } else {
4141+      return 0;
4142+    }
4143+  } else {
4144+    return 0;
4145+  }
4146+}
4147+
4148+void MindIR_Resize_SetNewHeight(PrimitivePtr *primitive, int64_t new_height) {
4149+  if (primitive != nullptr && *primitive != nullptr) {
4150+    auto prim = static_cast<schema::Primitive *>(*primitive);
4151+    auto value = prim->value_as_Resize();
4152+    if (prim != nullptr && value != nullptr) {
4153+      flatbuffers::FlatBufferBuilder fbb;
4154+      auto ops_offset =
4155+        schema::CreateResize(fbb, mindspore::schema::Format_NCHW, static_cast<schema::ResizeMethod>(value->method()),
4156+                             new_height, value->new_width(), value->preserve_aspect_ratio(),
4157+                             static_cast<schema::CoordinateTransformMode>(value->coordinate_transform_mode()),
4158+                             value->cubic_coeff(), value->exclude_outside(), value->extrapolation_value(),
4159+                             static_cast<schema::NearestMode>(value->nearest_mode()));
4160+      auto prim_offset =
4161+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_RESIZE), ops_offset.o);
4162+      fbb.Finish(prim_offset);
4163+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4164+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4165+      *primitive = ret_value;
4166+    }
4167+  }
4168+}
4169+int64_t MindIR_Resize_GetNewWidth(ConstPrimitivePtr primitive) {
4170+  if (primitive != nullptr) {
4171+    auto prim = static_cast<const schema::Primitive *>(primitive);
4172+    auto value = prim->value_as_Resize();
4173+    if (prim != nullptr && value != nullptr) {
4174+      return value->new_width();
4175+    } else {
4176+      return 0;
4177+    }
4178+  } else {
4179+    return 0;
4180+  }
4181+}
4182+
4183+void MindIR_Resize_SetNewWidth(PrimitivePtr *primitive, int64_t new_width) {
4184+  if (primitive != nullptr && *primitive != nullptr) {
4185+    auto prim = static_cast<schema::Primitive *>(*primitive);
4186+    auto value = prim->value_as_Resize();
4187+    if (prim != nullptr && value != nullptr) {
4188+      flatbuffers::FlatBufferBuilder fbb;
4189+      auto ops_offset =
4190+        schema::CreateResize(fbb, mindspore::schema::Format_NCHW, static_cast<schema::ResizeMethod>(value->method()),
4191+                             value->new_height(), new_width, value->preserve_aspect_ratio(),
4192+                             static_cast<schema::CoordinateTransformMode>(value->coordinate_transform_mode()),
4193+                             value->cubic_coeff(), value->exclude_outside(), value->extrapolation_value(),
4194+                             static_cast<schema::NearestMode>(value->nearest_mode()));
4195+      auto prim_offset =
4196+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_RESIZE), ops_offset.o);
4197+      fbb.Finish(prim_offset);
4198+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4199+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4200+      *primitive = ret_value;
4201+    }
4202+  }
4203+}
4204+bool MindIR_Resize_GetPreserveAspectRatio(ConstPrimitivePtr primitive) {
4205+  if (primitive != nullptr) {
4206+    auto prim = static_cast<const schema::Primitive *>(primitive);
4207+    auto value = prim->value_as_Resize();
4208+    if (prim != nullptr && value != nullptr) {
4209+      return value->preserve_aspect_ratio();
4210+    } else {
4211+      return false;
4212+    }
4213+  } else {
4214+    return false;
4215+  }
4216+}
4217+
4218+void MindIR_Resize_SetPreserveAspectRatio(PrimitivePtr *primitive, bool preserve_aspect_ratio) {
4219+  if (primitive != nullptr && *primitive != nullptr) {
4220+    auto prim = static_cast<schema::Primitive *>(*primitive);
4221+    auto value = prim->value_as_Resize();
4222+    if (prim != nullptr && value != nullptr) {
4223+      flatbuffers::FlatBufferBuilder fbb;
4224+      auto ops_offset =
4225+        schema::CreateResize(fbb, mindspore::schema::Format_NCHW, static_cast<schema::ResizeMethod>(value->method()),
4226+                             value->new_height(), value->new_width(), preserve_aspect_ratio,
4227+                             static_cast<schema::CoordinateTransformMode>(value->coordinate_transform_mode()),
4228+                             value->cubic_coeff(), value->exclude_outside(), value->extrapolation_value(),
4229+                             static_cast<schema::NearestMode>(value->nearest_mode()));
4230+      auto prim_offset =
4231+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_RESIZE), ops_offset.o);
4232+      fbb.Finish(prim_offset);
4233+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4234+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4235+      *primitive = ret_value;
4236+    }
4237+  }
4238+}
4239+CoordinateTransformMode MindIR_Resize_GetCoordinateTransformMode(ConstPrimitivePtr primitive) {
4240+  if (primitive != nullptr) {
4241+    auto prim = static_cast<const schema::Primitive *>(primitive);
4242+    auto value = prim->value_as_Resize();
4243+    if (prim != nullptr && value != nullptr) {
4244+      return static_cast<CoordinateTransformMode>(value->coordinate_transform_mode());
4245+    } else {
4246+      CoordinateTransformMode en = static_cast<CoordinateTransformMode>(0);
4247+      return en;
4248+    }
4249+  } else {
4250+    CoordinateTransformMode en = static_cast<CoordinateTransformMode>(0);
4251+    return en;
4252+  }
4253+}
4254+
4255+void MindIR_Resize_SetCoordinateTransformMode(PrimitivePtr *primitive,
4256+                                              CoordinateTransformMode coordinate_transform_mode) {
4257+  if (primitive != nullptr && *primitive != nullptr) {
4258+    auto prim = static_cast<schema::Primitive *>(*primitive);
4259+    auto value = prim->value_as_Resize();
4260+    if (prim != nullptr && value != nullptr) {
4261+      flatbuffers::FlatBufferBuilder fbb;
4262+      auto ops_offset =
4263+        schema::CreateResize(fbb, mindspore::schema::Format_NCHW, static_cast<schema::ResizeMethod>(value->method()),
4264+                             value->new_height(), value->new_width(), value->preserve_aspect_ratio(),
4265+                             static_cast<schema::CoordinateTransformMode>(coordinate_transform_mode),
4266+                             value->cubic_coeff(), value->exclude_outside(), value->extrapolation_value(),
4267+                             static_cast<schema::NearestMode>(value->nearest_mode()));
4268+      auto prim_offset =
4269+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_RESIZE), ops_offset.o);
4270+      fbb.Finish(prim_offset);
4271+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4272+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4273+      *primitive = ret_value;
4274+    }
4275+  }
4276+}
4277+float MindIR_Resize_GetCubicCoeff(ConstPrimitivePtr primitive) {
4278+  if (primitive != nullptr) {
4279+    auto prim = static_cast<const schema::Primitive *>(primitive);
4280+    auto value = prim->value_as_Resize();
4281+    if (prim != nullptr && value != nullptr) {
4282+      return value->cubic_coeff();
4283+    } else {
4284+      return .0;
4285+    }
4286+  } else {
4287+    return .0;
4288+  }
4289+}
4290+
4291+void MindIR_Resize_SetCubicCoeff(PrimitivePtr *primitive, float cubic_coeff) {
4292+  if (primitive != nullptr && *primitive != nullptr) {
4293+    auto prim = static_cast<schema::Primitive *>(*primitive);
4294+    auto value = prim->value_as_Resize();
4295+    if (prim != nullptr && value != nullptr) {
4296+      flatbuffers::FlatBufferBuilder fbb;
4297+      auto ops_offset =
4298+        schema::CreateResize(fbb, mindspore::schema::Format_NCHW, static_cast<schema::ResizeMethod>(value->method()),
4299+                             value->new_height(), value->new_width(), value->preserve_aspect_ratio(),
4300+                             static_cast<schema::CoordinateTransformMode>(value->coordinate_transform_mode()),
4301+                             cubic_coeff, value->exclude_outside(), value->extrapolation_value(),
4302+                             static_cast<schema::NearestMode>(value->nearest_mode()));
4303+      auto prim_offset =
4304+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_RESIZE), ops_offset.o);
4305+      fbb.Finish(prim_offset);
4306+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4307+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4308+      *primitive = ret_value;
4309+    }
4310+  }
4311+}
4312+int64_t MindIR_Resize_GetExcludeOutside(ConstPrimitivePtr primitive) {
4313+  if (primitive != nullptr) {
4314+    auto prim = static_cast<const schema::Primitive *>(primitive);
4315+    auto value = prim->value_as_Resize();
4316+    if (prim != nullptr && value != nullptr) {
4317+      return value->exclude_outside();
4318+    } else {
4319+      return 0;
4320+    }
4321+  } else {
4322+    return 0;
4323+  }
4324+}
4325+
4326+void MindIR_Resize_SetExcludeOutside(PrimitivePtr *primitive, int64_t exclude_outside) {
4327+  if (primitive != nullptr && *primitive != nullptr) {
4328+    auto prim = static_cast<schema::Primitive *>(*primitive);
4329+    auto value = prim->value_as_Resize();
4330+    if (prim != nullptr && value != nullptr) {
4331+      flatbuffers::FlatBufferBuilder fbb;
4332+      auto ops_offset = schema::CreateResize(
4333+        fbb, mindspore::schema::Format_NCHW, static_cast<schema::ResizeMethod>(value->method()), value->new_height(),
4334+        value->new_width(), value->preserve_aspect_ratio(),
4335+        static_cast<schema::CoordinateTransformMode>(value->coordinate_transform_mode()), value->cubic_coeff(),
4336+        exclude_outside, value->extrapolation_value(), static_cast<schema::NearestMode>(value->nearest_mode()));
4337+      auto prim_offset =
4338+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_RESIZE), ops_offset.o);
4339+      fbb.Finish(prim_offset);
4340+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4341+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4342+      *primitive = ret_value;
4343+    }
4344+  }
4345+}
4346+float MindIR_Resize_GetExtrapolationValue(ConstPrimitivePtr primitive) {
4347+  if (primitive != nullptr) {
4348+    auto prim = static_cast<const schema::Primitive *>(primitive);
4349+    auto value = prim->value_as_Resize();
4350+    if (prim != nullptr && value != nullptr) {
4351+      return value->extrapolation_value();
4352+    } else {
4353+      return .0;
4354+    }
4355+  } else {
4356+    return .0;
4357+  }
4358+}
4359+
4360+void MindIR_Resize_SetExtrapolationValue(PrimitivePtr *primitive, float extrapolation_value) {
4361+  if (primitive != nullptr && *primitive != nullptr) {
4362+    auto prim = static_cast<schema::Primitive *>(*primitive);
4363+    auto value = prim->value_as_Resize();
4364+    if (prim != nullptr && value != nullptr) {
4365+      flatbuffers::FlatBufferBuilder fbb;
4366+      auto ops_offset = schema::CreateResize(
4367+        fbb, mindspore::schema::Format_NCHW, static_cast<schema::ResizeMethod>(value->method()), value->new_height(),
4368+        value->new_width(), value->preserve_aspect_ratio(),
4369+        static_cast<schema::CoordinateTransformMode>(value->coordinate_transform_mode()), value->cubic_coeff(),
4370+        value->exclude_outside(), extrapolation_value, static_cast<schema::NearestMode>(value->nearest_mode()));
4371+      auto prim_offset =
4372+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_RESIZE), ops_offset.o);
4373+      fbb.Finish(prim_offset);
4374+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4375+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4376+      *primitive = ret_value;
4377+    }
4378+  }
4379+}
4380+NearestMode MindIR_Resize_GetNearestMode(ConstPrimitivePtr primitive) {
4381+  if (primitive != nullptr) {
4382+    auto prim = static_cast<const schema::Primitive *>(primitive);
4383+    auto value = prim->value_as_Resize();
4384+    if (prim != nullptr && value != nullptr) {
4385+      return static_cast<NearestMode>(value->nearest_mode());
4386+    } else {
4387+      NearestMode en = static_cast<NearestMode>(0);
4388+      return en;
4389+    }
4390+  } else {
4391+    NearestMode en = static_cast<NearestMode>(0);
4392+    return en;
4393+  }
4394+}
4395+
4396+void MindIR_Resize_SetNearestMode(PrimitivePtr *primitive, NearestMode nearest_mode) {
4397+  if (primitive != nullptr && *primitive != nullptr) {
4398+    auto prim = static_cast<schema::Primitive *>(*primitive);
4399+    auto value = prim->value_as_Resize();
4400+    if (prim != nullptr && value != nullptr) {
4401+      flatbuffers::FlatBufferBuilder fbb;
4402+      auto ops_offset = schema::CreateResize(
4403+        fbb, mindspore::schema::Format_NCHW, static_cast<schema::ResizeMethod>(value->method()), value->new_height(),
4404+        value->new_width(), value->preserve_aspect_ratio(),
4405+        static_cast<schema::CoordinateTransformMode>(value->coordinate_transform_mode()), value->cubic_coeff(),
4406+        value->exclude_outside(), value->extrapolation_value(), static_cast<schema::NearestMode>(nearest_mode));
4407+      auto prim_offset =
4408+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_RESIZE), ops_offset.o);
4409+      fbb.Finish(prim_offset);
4410+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4411+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4412+      *primitive = ret_value;
4413+    }
4414+  }
4415+}
4416+
4417+// ********** Rsqrt **********
4418+PrimitivePtr MindIR_Rsqrt_CreatePrimitive() {
4419+  flatbuffers::FlatBufferBuilder fbb;
4420+  auto ops_offset = schema::CreateRsqrt(fbb);
4421+  auto prim_offset = schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_RSQRT), ops_offset.o);
4422+  fbb.Finish(prim_offset);
4423+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
4424+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4425+  return ret_value;
4426+}
4427+
4428+// ********** ScaleFusion **********
4429+PrimitivePtr MindIR_ScaleFusion_CreatePrimitive(int64_t axis, ActivationType activation_type) {
4430+  flatbuffers::FlatBufferBuilder fbb;
4431+  auto ops_offset = schema::CreateScaleFusion(fbb, axis, static_cast<schema::ActivationType>(activation_type));
4432+  auto prim_offset =
4433+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_SCALE_FUSION), ops_offset.o);
4434+  fbb.Finish(prim_offset);
4435+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
4436+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4437+  return ret_value;
4438+}
4439+int64_t MindIR_ScaleFusion_GetAxis(ConstPrimitivePtr primitive) {
4440+  if (primitive != nullptr) {
4441+    auto prim = static_cast<const schema::Primitive *>(primitive);
4442+    auto value = prim->value_as_ScaleFusion();
4443+    if (prim != nullptr && value != nullptr) {
4444+      return value->axis();
4445+    } else {
4446+      return 0;
4447+    }
4448+  } else {
4449+    return 0;
4450+  }
4451+}
4452+
4453+void MindIR_ScaleFusion_SetAxis(PrimitivePtr *primitive, int64_t axis) {
4454+  if (primitive != nullptr && *primitive != nullptr) {
4455+    auto prim = static_cast<schema::Primitive *>(*primitive);
4456+    auto value = prim->value_as_ScaleFusion();
4457+    if (prim != nullptr && value != nullptr) {
4458+      flatbuffers::FlatBufferBuilder fbb;
4459+      auto ops_offset =
4460+        schema::CreateScaleFusion(fbb, axis, static_cast<schema::ActivationType>(value->activation_type()));
4461+      auto prim_offset =
4462+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_SCALE_FUSION), ops_offset.o);
4463+      fbb.Finish(prim_offset);
4464+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4465+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4466+      *primitive = ret_value;
4467+    }
4468+  }
4469+}
4470+ActivationType MindIR_ScaleFusion_GetActivationType(ConstPrimitivePtr primitive) {
4471+  if (primitive != nullptr) {
4472+    auto prim = static_cast<const schema::Primitive *>(primitive);
4473+    auto value = prim->value_as_ScaleFusion();
4474+    if (prim != nullptr && value != nullptr) {
4475+      return static_cast<ActivationType>(value->activation_type());
4476+    } else {
4477+      ActivationType en = static_cast<ActivationType>(0);
4478+      return en;
4479+    }
4480+  } else {
4481+    ActivationType en = static_cast<ActivationType>(0);
4482+    return en;
4483+  }
4484+}
4485+
4486+void MindIR_ScaleFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type) {
4487+  if (primitive != nullptr && *primitive != nullptr) {
4488+    auto prim = static_cast<schema::Primitive *>(*primitive);
4489+    auto value = prim->value_as_ScaleFusion();
4490+    if (prim != nullptr && value != nullptr) {
4491+      flatbuffers::FlatBufferBuilder fbb;
4492+      auto ops_offset =
4493+        schema::CreateScaleFusion(fbb, value->axis(), static_cast<schema::ActivationType>(activation_type));
4494+      auto prim_offset =
4495+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_SCALE_FUSION), ops_offset.o);
4496+      fbb.Finish(prim_offset);
4497+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4498+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4499+      *primitive = ret_value;
4500+    }
4501+  }
4502+}
4503+
4504+// ********** Shape **********
4505+PrimitivePtr MindIR_Shape_CreatePrimitive() {
4506+  flatbuffers::FlatBufferBuilder fbb;
4507+  auto ops_offset = schema::CreateShape(fbb);
4508+  auto prim_offset = schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_SHAPE), ops_offset.o);
4509+  fbb.Finish(prim_offset);
4510+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
4511+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4512+  return ret_value;
4513+}
4514+
4515+// ********** SliceFusion **********
4516+PrimitivePtr MindIR_SliceFusion_CreatePrimitive(const std::vector<int64_t> &axes) {
4517+  flatbuffers::FlatBufferBuilder fbb;
4518+  auto ops_offset = schema::CreateSliceFusion(fbb, fbb.CreateVector(axes.data(), axes.size()));
4519+  auto prim_offset =
4520+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_SLICE_FUSION), ops_offset.o);
4521+  fbb.Finish(prim_offset);
4522+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
4523+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4524+  return ret_value;
4525+}
4526+std::vector<int64_t> MindIR_SliceFusion_GetAxes(ConstPrimitivePtr primitive) {
4527+  if (primitive != nullptr) {
4528+    auto prim = static_cast<const schema::Primitive *>(primitive);
4529+    auto value = prim->value_as_SliceFusion();
4530+    if (prim != nullptr && value != nullptr) {
4531+      std::vector<int64_t> result;
4532+      auto src = value->axes();
4533+      result.resize(src->size());
4534+      std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; });
4535+      return result;
4536+    } else {
4537+      return {};
4538+    }
4539+  } else {
4540+    return {};
4541+  }
4542+}
4543+
4544+void MindIR_SliceFusion_SetAxes(PrimitivePtr *primitive, const std::vector<int64_t> &axes) {
4545+  if (primitive != nullptr && *primitive != nullptr) {
4546+    auto prim = static_cast<schema::Primitive *>(*primitive);
4547+    auto value = prim->value_as_SliceFusion();
4548+    if (prim != nullptr && value != nullptr) {
4549+      flatbuffers::FlatBufferBuilder fbb;
4550+      auto ops_offset = schema::CreateSliceFusion(fbb, fbb.CreateVector(axes.data(), axes.size()));
4551+      auto prim_offset =
4552+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_SLICE_FUSION), ops_offset.o);
4553+      fbb.Finish(prim_offset);
4554+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4555+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4556+      *primitive = ret_value;
4557+    }
4558+  }
4559+}
4560+
4561+// ********** Softmax **********
4562+PrimitivePtr MindIR_Softmax_CreatePrimitive(const std::vector<int64_t> &axis) {
4563+  flatbuffers::FlatBufferBuilder fbb;
4564+  auto ops_offset = schema::CreateSoftmax(fbb, fbb.CreateVector(axis.data(), axis.size()));
4565+  auto prim_offset = schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_SOFTMAX), ops_offset.o);
4566+  fbb.Finish(prim_offset);
4567+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
4568+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4569+  return ret_value;
4570+}
4571+std::vector<int64_t> MindIR_Softmax_GetAxis(ConstPrimitivePtr primitive) {
4572+  if (primitive != nullptr) {
4573+    auto prim = static_cast<const schema::Primitive *>(primitive);
4574+    auto value = prim->value_as_Softmax();
4575+    if (prim != nullptr && value != nullptr) {
4576+      std::vector<int64_t> result;
4577+      auto src = value->axis();
4578+      result.resize(src->size());
4579+      std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; });
4580+      return result;
4581+    } else {
4582+      return {};
4583+    }
4584+  } else {
4585+    return {};
4586+  }
4587+}
4588+
4589+void MindIR_Softmax_SetAxis(PrimitivePtr *primitive, const std::vector<int64_t> &axis) {
4590+  if (primitive != nullptr && *primitive != nullptr) {
4591+    auto prim = static_cast<schema::Primitive *>(*primitive);
4592+    auto value = prim->value_as_Softmax();
4593+    if (prim != nullptr && value != nullptr) {
4594+      flatbuffers::FlatBufferBuilder fbb;
4595+      auto ops_offset = schema::CreateSoftmax(fbb, fbb.CreateVector(axis.data(), axis.size()));
4596+      auto prim_offset =
4597+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_SOFTMAX), ops_offset.o);
4598+      fbb.Finish(prim_offset);
4599+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4600+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4601+      *primitive = ret_value;
4602+    }
4603+  }
4604+}
4605+
4606+// ********** SpaceToBatchND **********
4607+PrimitivePtr MindIR_SpaceToBatchND_CreatePrimitive(const std::vector<int64_t> &block_shape,
4608+                                                   const std::vector<std::vector<int64_t>> &paddings) {
4609+  flatbuffers::FlatBufferBuilder fbb;
4610+  auto ops_offset = schema::CreateSpaceToBatchND(fbb, fbb.CreateVector(block_shape.data(), block_shape.size()),
4611+                                                 CreateVec2D(fbb, paddings));
4612+  auto prim_offset =
4613+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_SPACE_TO_BATCH_ND), ops_offset.o);
4614+  fbb.Finish(prim_offset);
4615+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
4616+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4617+  return ret_value;
4618+}
4619+std::vector<int64_t> MindIR_SpaceToBatchND_GetBlockShape(ConstPrimitivePtr primitive) {
4620+  if (primitive != nullptr) {
4621+    auto prim = static_cast<const schema::Primitive *>(primitive);
4622+    auto value = prim->value_as_SpaceToBatchND();
4623+    if (prim != nullptr && value != nullptr) {
4624+      std::vector<int64_t> result;
4625+      auto src = value->block_shape();
4626+      result.resize(src->size());
4627+      std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; });
4628+      return result;
4629+    } else {
4630+      return {};
4631+    }
4632+  } else {
4633+    return {};
4634+  }
4635+}
4636+
4637+void MindIR_SpaceToBatchND_SetBlockShape(PrimitivePtr *primitive, const std::vector<int64_t> &block_shape) {
4638+  if (primitive != nullptr && *primitive != nullptr) {
4639+    auto prim = static_cast<schema::Primitive *>(*primitive);
4640+    auto value = prim->value_as_SpaceToBatchND();
4641+    if (prim != nullptr && value != nullptr) {
4642+      flatbuffers::FlatBufferBuilder fbb;
4643+      auto ops_offset = schema::CreateSpaceToBatchND(fbb, fbb.CreateVector(block_shape.data(), block_shape.size()),
4644+                                                     CreateVec2D(fbb, value->paddings()));
4645+      auto prim_offset =
4646+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_SPACE_TO_BATCH_ND), ops_offset.o);
4647+      fbb.Finish(prim_offset);
4648+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4649+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4650+      *primitive = ret_value;
4651+    }
4652+  }
4653+}
4654+std::vector<std::vector<int64_t>> MindIR_SpaceToBatchND_GetPaddings(ConstPrimitivePtr primitive) {
4655+  if (primitive != nullptr) {
4656+    auto prim = static_cast<const schema::Primitive *>(primitive);
4657+    auto value = prim->value_as_SpaceToBatchND();
4658+    if (prim != nullptr && value != nullptr) {
4659+      std::vector<std::vector<int64_t>> out;
4660+      auto src = value->paddings();
4661+      for (auto sub_list : *src->data()) {
4662+        std::vector<int64_t> result_tmp;
4663+        result_tmp.resize(sub_list->data()->size());
4664+        std::transform(sub_list->data()->begin(), sub_list->data()->end(), result_tmp.begin(),
4665+                       [](int64_t item) { return item; });
4666+        out.emplace_back(result_tmp);
4667+      }
4668+      return out;
4669+    } else {
4670+      return {};
4671+    }
4672+  } else {
4673+    return {};
4674+  }
4675+}
4676+
4677+void MindIR_SpaceToBatchND_SetPaddings(PrimitivePtr *primitive, const std::vector<std::vector<int64_t>> &paddings) {
4678+  if (primitive != nullptr && *primitive != nullptr) {
4679+    auto prim = static_cast<schema::Primitive *>(*primitive);
4680+    auto value = prim->value_as_SpaceToBatchND();
4681+    if (prim != nullptr && value != nullptr) {
4682+      flatbuffers::FlatBufferBuilder fbb;
4683+      auto ops_offset = schema::CreateSpaceToBatchND(
4684+        fbb, fbb.CreateVector(value->block_shape()->data(), value->block_shape()->size()), CreateVec2D(fbb, paddings));
4685+      auto prim_offset =
4686+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_SPACE_TO_BATCH_ND), ops_offset.o);
4687+      fbb.Finish(prim_offset);
4688+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4689+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4690+      *primitive = ret_value;
4691+    }
4692+  }
4693+}
4694+
4695+// ********** Split **********
4696+PrimitivePtr MindIR_Split_CreatePrimitive(int64_t output_num, const std::vector<int64_t> &size_splits, int64_t axis) {
4697+  flatbuffers::FlatBufferBuilder fbb;
4698+  auto ops_offset =
4699+    schema::CreateSplit(fbb, output_num, fbb.CreateVector(size_splits.data(), size_splits.size()), axis);
4700+  auto prim_offset = schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_SPLIT), ops_offset.o);
4701+  fbb.Finish(prim_offset);
4702+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
4703+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4704+  return ret_value;
4705+}
4706+int64_t MindIR_Split_GetOutputNum(ConstPrimitivePtr primitive) {
4707+  if (primitive != nullptr) {
4708+    auto prim = static_cast<const schema::Primitive *>(primitive);
4709+    auto value = prim->value_as_Split();
4710+    if (prim != nullptr && value != nullptr) {
4711+      return value->output_num();
4712+    } else {
4713+      return 0;
4714+    }
4715+  } else {
4716+    return 0;
4717+  }
4718+}
4719+
4720+void MindIR_Split_SetOutputNum(PrimitivePtr *primitive, int64_t output_num) {
4721+  if (primitive != nullptr && *primitive != nullptr) {
4722+    auto prim = static_cast<schema::Primitive *>(*primitive);
4723+    auto value = prim->value_as_Split();
4724+    if (prim != nullptr && value != nullptr) {
4725+      flatbuffers::FlatBufferBuilder fbb;
4726+      auto ops_offset = schema::CreateSplit(
4727+        fbb, output_num, fbb.CreateVector(value->size_splits()->data(), value->size_splits()->size()), value->axis());
4728+      auto prim_offset =
4729+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_SPLIT), ops_offset.o);
4730+      fbb.Finish(prim_offset);
4731+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4732+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4733+      *primitive = ret_value;
4734+    }
4735+  }
4736+}
4737+std::vector<int64_t> MindIR_Split_GetSizeSplits(ConstPrimitivePtr primitive) {
4738+  if (primitive != nullptr) {
4739+    auto prim = static_cast<const schema::Primitive *>(primitive);
4740+    auto value = prim->value_as_Split();
4741+    if (prim != nullptr && value != nullptr) {
4742+      std::vector<int64_t> result;
4743+      auto src = value->size_splits();
4744+      result.resize(src->size());
4745+      std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; });
4746+      return result;
4747+    } else {
4748+      return {};
4749+    }
4750+  } else {
4751+    return {};
4752+  }
4753+}
4754+
4755+void MindIR_Split_SetSizeSplits(PrimitivePtr *primitive, const std::vector<int64_t> &size_splits) {
4756+  if (primitive != nullptr && *primitive != nullptr) {
4757+    auto prim = static_cast<schema::Primitive *>(*primitive);
4758+    auto value = prim->value_as_Split();
4759+    if (prim != nullptr && value != nullptr) {
4760+      flatbuffers::FlatBufferBuilder fbb;
4761+      auto ops_offset = schema::CreateSplit(fbb, value->output_num(),
4762+                                            fbb.CreateVector(size_splits.data(), size_splits.size()), value->axis());
4763+      auto prim_offset =
4764+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_SPLIT), ops_offset.o);
4765+      fbb.Finish(prim_offset);
4766+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4767+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4768+      *primitive = ret_value;
4769+    }
4770+  }
4771+}
4772+int64_t MindIR_Split_GetAxis(ConstPrimitivePtr primitive) {
4773+  if (primitive != nullptr) {
4774+    auto prim = static_cast<const schema::Primitive *>(primitive);
4775+    auto value = prim->value_as_Split();
4776+    if (prim != nullptr && value != nullptr) {
4777+      return value->axis();
4778+    } else {
4779+      return 0;
4780+    }
4781+  } else {
4782+    return 0;
4783+  }
4784+}
4785+
4786+void MindIR_Split_SetAxis(PrimitivePtr *primitive, int64_t axis) {
4787+  if (primitive != nullptr && *primitive != nullptr) {
4788+    auto prim = static_cast<schema::Primitive *>(*primitive);
4789+    auto value = prim->value_as_Split();
4790+    if (prim != nullptr && value != nullptr) {
4791+      flatbuffers::FlatBufferBuilder fbb;
4792+      auto ops_offset = schema::CreateSplit(
4793+        fbb, value->output_num(), fbb.CreateVector(value->size_splits()->data(), value->size_splits()->size()), axis);
4794+      auto prim_offset =
4795+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_SPLIT), ops_offset.o);
4796+      fbb.Finish(prim_offset);
4797+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4798+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4799+      *primitive = ret_value;
4800+    }
4801+  }
4802+}
4803+
4804+// ********** Sqrt **********
4805+PrimitivePtr MindIR_Sqrt_CreatePrimitive() {
4806+  flatbuffers::FlatBufferBuilder fbb;
4807+  auto ops_offset = schema::CreateSqrt(fbb);
4808+  auto prim_offset = schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_SQRT), ops_offset.o);
4809+  fbb.Finish(prim_offset);
4810+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
4811+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4812+  return ret_value;
4813+}
4814+
4815+// ********** SquaredDifference **********
4816+PrimitivePtr MindIR_SquaredDifference_CreatePrimitive() {
4817+  flatbuffers::FlatBufferBuilder fbb;
4818+  auto ops_offset = schema::CreateSquaredDifference(fbb);
4819+  auto prim_offset =
4820+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_SQUARED_DIFFERENCE), ops_offset.o);
4821+  fbb.Finish(prim_offset);
4822+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
4823+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4824+  return ret_value;
4825+}
4826+
4827+// ********** Squeeze **********
4828+PrimitivePtr MindIR_Squeeze_CreatePrimitive(const std::vector<int64_t> &axis) {
4829+  flatbuffers::FlatBufferBuilder fbb;
4830+  auto ops_offset = schema::CreateSqueeze(fbb, fbb.CreateVector(axis.data(), axis.size()));
4831+  auto prim_offset = schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_SQUEEZE), ops_offset.o);
4832+  fbb.Finish(prim_offset);
4833+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
4834+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4835+  return ret_value;
4836+}
4837+std::vector<int64_t> MindIR_Squeeze_GetAxis(ConstPrimitivePtr primitive) {
4838+  if (primitive != nullptr) {
4839+    auto prim = static_cast<const schema::Primitive *>(primitive);
4840+    auto value = prim->value_as_Squeeze();
4841+    if (prim != nullptr && value != nullptr) {
4842+      std::vector<int64_t> result;
4843+      auto src = value->axis();
4844+      result.resize(src->size());
4845+      std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; });
4846+      return result;
4847+    } else {
4848+      return {};
4849+    }
4850+  } else {
4851+    return {};
4852+  }
4853+}
4854+
4855+void MindIR_Squeeze_SetAxis(PrimitivePtr *primitive, const std::vector<int64_t> &axis) {
4856+  if (primitive != nullptr && *primitive != nullptr) {
4857+    auto prim = static_cast<schema::Primitive *>(*primitive);
4858+    auto value = prim->value_as_Squeeze();
4859+    if (prim != nullptr && value != nullptr) {
4860+      flatbuffers::FlatBufferBuilder fbb;
4861+      auto ops_offset = schema::CreateSqueeze(fbb, fbb.CreateVector(axis.data(), axis.size()));
4862+      auto prim_offset =
4863+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_SQUEEZE), ops_offset.o);
4864+      fbb.Finish(prim_offset);
4865+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4866+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4867+      *primitive = ret_value;
4868+    }
4869+  }
4870+}
4871+
4872+// ********** Stack **********
4873+PrimitivePtr MindIR_Stack_CreatePrimitive(int64_t axis) {
4874+  flatbuffers::FlatBufferBuilder fbb;
4875+  auto ops_offset = schema::CreateStack(fbb, axis);
4876+  auto prim_offset = schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_STACK), ops_offset.o);
4877+  fbb.Finish(prim_offset);
4878+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
4879+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4880+  return ret_value;
4881+}
4882+int64_t MindIR_Stack_GetAxis(ConstPrimitivePtr primitive) {
4883+  if (primitive != nullptr) {
4884+    auto prim = static_cast<const schema::Primitive *>(primitive);
4885+    auto value = prim->value_as_Stack();
4886+    if (prim != nullptr && value != nullptr) {
4887+      return value->axis();
4888+    } else {
4889+      return 0;
4890+    }
4891+  } else {
4892+    return 0;
4893+  }
4894+}
4895+
4896+void MindIR_Stack_SetAxis(PrimitivePtr *primitive, int64_t axis) {
4897+  if (primitive != nullptr && *primitive != nullptr) {
4898+    auto prim = static_cast<schema::Primitive *>(*primitive);
4899+    auto value = prim->value_as_Stack();
4900+    if (prim != nullptr && value != nullptr) {
4901+      flatbuffers::FlatBufferBuilder fbb;
4902+      auto ops_offset = schema::CreateStack(fbb, axis);
4903+      auto prim_offset =
4904+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_STACK), ops_offset.o);
4905+      fbb.Finish(prim_offset);
4906+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4907+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4908+      *primitive = ret_value;
4909+    }
4910+  }
4911+}
4912+
4913+// ********** StridedSlice **********
4914+PrimitivePtr MindIR_StridedSlice_CreatePrimitive(int64_t begin_mask, int64_t end_mask, int64_t ellipsis_mask,
4915+                                                 int64_t new_axis_mask, int64_t shrink_axis_mask) {
4916+  flatbuffers::FlatBufferBuilder fbb;
4917+  auto ops_offset =
4918+    schema::CreateStridedSlice(fbb, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask);
4919+  auto prim_offset =
4920+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_STRIDED_SLICE), ops_offset.o);
4921+  fbb.Finish(prim_offset);
4922+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
4923+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4924+  return ret_value;
4925+}
4926+int64_t MindIR_StridedSlice_GetBeginMask(ConstPrimitivePtr primitive) {
4927+  if (primitive != nullptr) {
4928+    auto prim = static_cast<const schema::Primitive *>(primitive);
4929+    auto value = prim->value_as_StridedSlice();
4930+    if (prim != nullptr && value != nullptr) {
4931+      return value->begin_mask();
4932+    } else {
4933+      return 0;
4934+    }
4935+  } else {
4936+    return 0;
4937+  }
4938+}
4939+
4940+void MindIR_StridedSlice_SetBeginMask(PrimitivePtr *primitive, int64_t begin_mask) {
4941+  if (primitive != nullptr && *primitive != nullptr) {
4942+    auto prim = static_cast<schema::Primitive *>(*primitive);
4943+    auto value = prim->value_as_StridedSlice();
4944+    if (prim != nullptr && value != nullptr) {
4945+      flatbuffers::FlatBufferBuilder fbb;
4946+      auto ops_offset = schema::CreateStridedSlice(fbb, begin_mask, value->end_mask(), value->ellipsis_mask(),
4947+                                                   value->new_axis_mask(), value->shrink_axis_mask());
4948+      auto prim_offset =
4949+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_STRIDED_SLICE), ops_offset.o);
4950+      fbb.Finish(prim_offset);
4951+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4952+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4953+      *primitive = ret_value;
4954+    }
4955+  }
4956+}
4957+int64_t MindIR_StridedSlice_GetEndMask(ConstPrimitivePtr primitive) {
4958+  if (primitive != nullptr) {
4959+    auto prim = static_cast<const schema::Primitive *>(primitive);
4960+    auto value = prim->value_as_StridedSlice();
4961+    if (prim != nullptr && value != nullptr) {
4962+      return value->end_mask();
4963+    } else {
4964+      return 0;
4965+    }
4966+  } else {
4967+    return 0;
4968+  }
4969+}
4970+
4971+void MindIR_StridedSlice_SetEndMask(PrimitivePtr *primitive, int64_t end_mask) {
4972+  if (primitive != nullptr && *primitive != nullptr) {
4973+    auto prim = static_cast<schema::Primitive *>(*primitive);
4974+    auto value = prim->value_as_StridedSlice();
4975+    if (prim != nullptr && value != nullptr) {
4976+      flatbuffers::FlatBufferBuilder fbb;
4977+      auto ops_offset = schema::CreateStridedSlice(fbb, value->begin_mask(), end_mask, value->ellipsis_mask(),
4978+                                                   value->new_axis_mask(), value->shrink_axis_mask());
4979+      auto prim_offset =
4980+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_STRIDED_SLICE), ops_offset.o);
4981+      fbb.Finish(prim_offset);
4982+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
4983+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
4984+      *primitive = ret_value;
4985+    }
4986+  }
4987+}
4988+int64_t MindIR_StridedSlice_GetEllipsisMask(ConstPrimitivePtr primitive) {
4989+  if (primitive != nullptr) {
4990+    auto prim = static_cast<const schema::Primitive *>(primitive);
4991+    auto value = prim->value_as_StridedSlice();
4992+    if (prim != nullptr && value != nullptr) {
4993+      return value->ellipsis_mask();
4994+    } else {
4995+      return 0;
4996+    }
4997+  } else {
4998+    return 0;
4999+  }
5000+}
5001+
5002+void MindIR_StridedSlice_SetEllipsisMask(PrimitivePtr *primitive, int64_t ellipsis_mask) {
5003+  if (primitive != nullptr && *primitive != nullptr) {
5004+    auto prim = static_cast<schema::Primitive *>(*primitive);
5005+    auto value = prim->value_as_StridedSlice();
5006+    if (prim != nullptr && value != nullptr) {
5007+      flatbuffers::FlatBufferBuilder fbb;
5008+      auto ops_offset = schema::CreateStridedSlice(fbb, value->begin_mask(), value->end_mask(), ellipsis_mask,
5009+                                                   value->new_axis_mask(), value->shrink_axis_mask());
5010+      auto prim_offset =
5011+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_STRIDED_SLICE), ops_offset.o);
5012+      fbb.Finish(prim_offset);
5013+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
5014+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
5015+      *primitive = ret_value;
5016+    }
5017+  }
5018+}
5019+int64_t MindIR_StridedSlice_GetNewAxisMask(ConstPrimitivePtr primitive) {
5020+  if (primitive != nullptr) {
5021+    auto prim = static_cast<const schema::Primitive *>(primitive);
5022+    auto value = prim->value_as_StridedSlice();
5023+    if (prim != nullptr && value != nullptr) {
5024+      return value->new_axis_mask();
5025+    } else {
5026+      return 0;
5027+    }
5028+  } else {
5029+    return 0;
5030+  }
5031+}
5032+
5033+void MindIR_StridedSlice_SetNewAxisMask(PrimitivePtr *primitive, int64_t new_axis_mask) {
5034+  if (primitive != nullptr && *primitive != nullptr) {
5035+    auto prim = static_cast<schema::Primitive *>(*primitive);
5036+    auto value = prim->value_as_StridedSlice();
5037+    if (prim != nullptr && value != nullptr) {
5038+      flatbuffers::FlatBufferBuilder fbb;
5039+      auto ops_offset = schema::CreateStridedSlice(fbb, value->begin_mask(), value->end_mask(), value->ellipsis_mask(),
5040+                                                   new_axis_mask, value->shrink_axis_mask());
5041+      auto prim_offset =
5042+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_STRIDED_SLICE), ops_offset.o);
5043+      fbb.Finish(prim_offset);
5044+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
5045+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
5046+      *primitive = ret_value;
5047+    }
5048+  }
5049+}
5050+int64_t MindIR_StridedSlice_GetShrinkAxisMask(ConstPrimitivePtr primitive) {
5051+  if (primitive != nullptr) {
5052+    auto prim = static_cast<const schema::Primitive *>(primitive);
5053+    auto value = prim->value_as_StridedSlice();
5054+    if (prim != nullptr && value != nullptr) {
5055+      return value->shrink_axis_mask();
5056+    } else {
5057+      return 0;
5058+    }
5059+  } else {
5060+    return 0;
5061+  }
5062+}
5063+
5064+void MindIR_StridedSlice_SetShrinkAxisMask(PrimitivePtr *primitive, int64_t shrink_axis_mask) {
5065+  if (primitive != nullptr && *primitive != nullptr) {
5066+    auto prim = static_cast<schema::Primitive *>(*primitive);
5067+    auto value = prim->value_as_StridedSlice();
5068+    if (prim != nullptr && value != nullptr) {
5069+      flatbuffers::FlatBufferBuilder fbb;
5070+      auto ops_offset = schema::CreateStridedSlice(fbb, value->begin_mask(), value->end_mask(), value->ellipsis_mask(),
5071+                                                   value->new_axis_mask(), shrink_axis_mask);
5072+      auto prim_offset =
5073+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_STRIDED_SLICE), ops_offset.o);
5074+      fbb.Finish(prim_offset);
5075+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
5076+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
5077+      *primitive = ret_value;
5078+    }
5079+  }
5080+}
5081+
5082+// ********** SubFusion **********
5083+PrimitivePtr MindIR_SubFusion_CreatePrimitive(ActivationType activation_type) {
5084+  flatbuffers::FlatBufferBuilder fbb;
5085+  auto ops_offset = schema::CreateSubFusion(fbb, static_cast<schema::ActivationType>(activation_type));
5086+  auto prim_offset =
5087+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_SUB_FUSION), ops_offset.o);
5088+  fbb.Finish(prim_offset);
5089+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
5090+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
5091+  return ret_value;
5092+}
5093+ActivationType MindIR_SubFusion_GetActivationType(ConstPrimitivePtr primitive) {
5094+  if (primitive != nullptr) {
5095+    auto prim = static_cast<const schema::Primitive *>(primitive);
5096+    auto value = prim->value_as_SubFusion();
5097+    if (prim != nullptr && value != nullptr) {
5098+      return static_cast<ActivationType>(value->activation_type());
5099+    } else {
5100+      ActivationType en = static_cast<ActivationType>(0);
5101+      return en;
5102+    }
5103+  } else {
5104+    ActivationType en = static_cast<ActivationType>(0);
5105+    return en;
5106+  }
5107+}
5108+
5109+void MindIR_SubFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type) {
5110+  if (primitive != nullptr && *primitive != nullptr) {
5111+    auto prim = static_cast<schema::Primitive *>(*primitive);
5112+    auto value = prim->value_as_SubFusion();
5113+    if (prim != nullptr && value != nullptr) {
5114+      flatbuffers::FlatBufferBuilder fbb;
5115+      auto ops_offset = schema::CreateSubFusion(fbb, static_cast<schema::ActivationType>(activation_type));
5116+      auto prim_offset =
5117+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_SUB_FUSION), ops_offset.o);
5118+      fbb.Finish(prim_offset);
5119+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
5120+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
5121+      *primitive = ret_value;
5122+    }
5123+  }
5124+}
5125+
5126+// ********** TileFusion **********
5127+PrimitivePtr MindIR_TileFusion_CreatePrimitive(const std::vector<int64_t> &dims) {
5128+  flatbuffers::FlatBufferBuilder fbb;
5129+  auto ops_offset = schema::CreateTileFusion(fbb, fbb.CreateVector(dims.data(), dims.size()));
5130+  auto prim_offset =
5131+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_TILE_FUSION), ops_offset.o);
5132+  fbb.Finish(prim_offset);
5133+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
5134+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
5135+  return ret_value;
5136+}
5137+std::vector<int64_t> MindIR_TileFusion_GetDims(ConstPrimitivePtr primitive) {
5138+  if (primitive != nullptr) {
5139+    auto prim = static_cast<const schema::Primitive *>(primitive);
5140+    auto value = prim->value_as_TileFusion();
5141+    if (prim != nullptr && value != nullptr) {
5142+      std::vector<int64_t> result;
5143+      auto src = value->dims();
5144+      result.resize(src->size());
5145+      std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; });
5146+      return result;
5147+    } else {
5148+      return {};
5149+    }
5150+  } else {
5151+    return {};
5152+  }
5153+}
5154+
5155+void MindIR_TileFusion_SetDims(PrimitivePtr *primitive, const std::vector<int64_t> &dims) {
5156+  if (primitive != nullptr && *primitive != nullptr) {
5157+    auto prim = static_cast<schema::Primitive *>(*primitive);
5158+    auto value = prim->value_as_TileFusion();
5159+    if (prim != nullptr && value != nullptr) {
5160+      flatbuffers::FlatBufferBuilder fbb;
5161+      auto ops_offset = schema::CreateTileFusion(fbb, fbb.CreateVector(dims.data(), dims.size()));
5162+      auto prim_offset =
5163+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_TILE_FUSION), ops_offset.o);
5164+      fbb.Finish(prim_offset);
5165+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
5166+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
5167+      *primitive = ret_value;
5168+    }
5169+  }
5170+}
5171+
5172+// ********** TopKFusion **********
5173+PrimitivePtr MindIR_TopKFusion_CreatePrimitive(bool sorted, int64_t axis) {
5174+  flatbuffers::FlatBufferBuilder fbb;
5175+  auto ops_offset = schema::CreateTopKFusion(fbb, sorted, axis);
5176+  auto prim_offset =
5177+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_TOPK_FUSION), ops_offset.o);
5178+  fbb.Finish(prim_offset);
5179+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
5180+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
5181+  return ret_value;
5182+}
5183+bool MindIR_TopKFusion_GetSorted(ConstPrimitivePtr primitive) {
5184+  if (primitive != nullptr) {
5185+    auto prim = static_cast<const schema::Primitive *>(primitive);
5186+    auto value = prim->value_as_TopKFusion();
5187+    if (prim != nullptr && value != nullptr) {
5188+      return value->sorted();
5189+    } else {
5190+      return false;
5191+    }
5192+  } else {
5193+    return false;
5194+  }
5195+}
5196+
5197+void MindIR_TopKFusion_SetSorted(PrimitivePtr *primitive, bool sorted) {
5198+  if (primitive != nullptr && *primitive != nullptr) {
5199+    auto prim = static_cast<schema::Primitive *>(*primitive);
5200+    auto value = prim->value_as_TopKFusion();
5201+    if (prim != nullptr && value != nullptr) {
5202+      flatbuffers::FlatBufferBuilder fbb;
5203+      auto ops_offset = schema::CreateTopKFusion(fbb, sorted, value->axis());
5204+      auto prim_offset =
5205+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_TOPK_FUSION), ops_offset.o);
5206+      fbb.Finish(prim_offset);
5207+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
5208+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
5209+      *primitive = ret_value;
5210+    }
5211+  }
5212+}
5213+int64_t MindIR_TopKFusion_GetAxis(ConstPrimitivePtr primitive) {
5214+  if (primitive != nullptr) {
5215+    auto prim = static_cast<const schema::Primitive *>(primitive);
5216+    auto value = prim->value_as_TopKFusion();
5217+    if (prim != nullptr && value != nullptr) {
5218+      return value->axis();
5219+    } else {
5220+      return 0;
5221+    }
5222+  } else {
5223+    return 0;
5224+  }
5225+}
5226+
5227+void MindIR_TopKFusion_SetAxis(PrimitivePtr *primitive, int64_t axis) {
5228+  if (primitive != nullptr && *primitive != nullptr) {
5229+    auto prim = static_cast<schema::Primitive *>(*primitive);
5230+    auto value = prim->value_as_TopKFusion();
5231+    if (prim != nullptr && value != nullptr) {
5232+      flatbuffers::FlatBufferBuilder fbb;
5233+      auto ops_offset = schema::CreateTopKFusion(fbb, value->sorted(), axis);
5234+      auto prim_offset =
5235+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_TOPK_FUSION), ops_offset.o);
5236+      fbb.Finish(prim_offset);
5237+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
5238+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
5239+      *primitive = ret_value;
5240+    }
5241+  }
5242+}
5243+
5244+// ********** Transpose **********
5245+PrimitivePtr MindIR_Transpose_CreatePrimitive() {
5246+  flatbuffers::FlatBufferBuilder fbb;
5247+  auto ops_offset = schema::CreateTranspose(fbb);
5248+  auto prim_offset =
5249+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_TRANSPOSE), ops_offset.o);
5250+  fbb.Finish(prim_offset);
5251+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
5252+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
5253+  return ret_value;
5254+}
5255+
5256+// ********** Unsqueeze **********
5257+PrimitivePtr MindIR_Unsqueeze_CreatePrimitive(const std::vector<int64_t> &axis) {
5258+  flatbuffers::FlatBufferBuilder fbb;
5259+  auto ops_offset = schema::CreateUnsqueeze(fbb, fbb.CreateVector(axis.data(), axis.size()));
5260+  auto prim_offset =
5261+    schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_UNSQUEEZE), ops_offset.o);
5262+  fbb.Finish(prim_offset);
5263+  auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr);
5264+  auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
5265+  return ret_value;
5266+}
5267+std::vector<int64_t> MindIR_Unsqueeze_GetAxis(ConstPrimitivePtr primitive) {
5268+  if (primitive != nullptr) {
5269+    auto prim = static_cast<const schema::Primitive *>(primitive);
5270+    auto value = prim->value_as_Unsqueeze();
5271+    if (prim != nullptr && value != nullptr) {
5272+      std::vector<int64_t> result;
5273+      auto src = value->axis();
5274+      result.resize(src->size());
5275+      std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; });
5276+      return result;
5277+    } else {
5278+      return {};
5279+    }
5280+  } else {
5281+    return {};
5282+  }
5283+}
5284+
5285+void MindIR_Unsqueeze_SetAxis(PrimitivePtr *primitive, const std::vector<int64_t> &axis) {
5286+  if (primitive != nullptr && *primitive != nullptr) {
5287+    auto prim = static_cast<schema::Primitive *>(*primitive);
5288+    auto value = prim->value_as_Unsqueeze();
5289+    if (prim != nullptr && value != nullptr) {
5290+      flatbuffers::FlatBufferBuilder fbb;
5291+      auto ops_offset = schema::CreateUnsqueeze(fbb, fbb.CreateVector(axis.data(), axis.size()));
5292+      auto prim_offset =
5293+        schema::CreatePrimitive(fbb, static_cast<schema::PrimitiveType>(NODE_TYPE_UNSQUEEZE), ops_offset.o);
5294+      fbb.Finish(prim_offset);
5295+      auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim);
5296+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
5297+      *primitive = ret_value;
5298+    }
5299+  }
5300+}
5301+
5302+}  // namespace lite
5303+}  // namespace mindspore
5304diff --git a/mindspore/lite/mindir/src/mindir_memory_manager.cc b/mindspore/lite/mindir/src/mindir_memory_manager.cc
5305new file mode 100644
5306index 00000000..c775fa70
5307--- /dev/null
5308+++ b/mindspore/lite/mindir/src/mindir_memory_manager.cc
5309@@ -0,0 +1,122 @@
5310+#include "mindir_memory_manager.h"
5311+#include "src/common/log.h"
5312+#include "utils.h"
5313+#include <iostream>
5314+namespace mindspore {
5315+namespace lite {
5316+namespace {
5317+template <typename KEY_TYPE, typename VALUE_TYPE>
5318+void ClearMap(std::map<KEY_TYPE, VALUE_TYPE> &map) {
5319+  for (auto iter = map.begin(); iter != map.end();) {
5320+    if (iter->second != nullptr) {
5321+      free(iter->second);
5322+      map.erase(iter++);
5323+    } else {
5324+      iter++;
5325+    }
5326+  }
5327+}
5328+}  // namespace
5329+MindIRMemoryManager *MindIRMemoryManager::GetInstance() {
5330+  static MindIRMemoryManager instance;
5331+  return &instance;
5332+}
5333+
5334+void *MindIRMemoryManager::CopyFbbToNewMemory(flatbuffers::FlatBufferBuilder &fbb) {
5335+  auto buff = reinterpret_cast<uint8_t *>(malloc(fbb.GetSize()));
5336+  if (buff == nullptr) {
5337+    MS_LOG(ERROR) << "malloc memory for primitive failed!";
5338+    fbb.Clear();
5339+    return nullptr;
5340+  }
5341+  memcpy(buff, fbb.GetBufferPointer(), fbb.GetSize());
5342+  fbb.Clear();
5343+  return buff;
5344+}
5345+void *MindIRMemoryManager::CreateTensorFromBuilder(flatbuffers::FlatBufferBuilder &fbb_new, schema::Tensor *tensor) {
5346+  std::lock_guard<std::mutex> lck(mutex);
5347+  if (tensor != nullptr) {
5348+    // find primitive exist
5349+    if (tensor_map.find(tensor) != tensor_map.end()) {
5350+      // if find, then delete
5351+      void *flatbuffer_ptr = tensor_map[tensor];
5352+      if (flatbuffer_ptr != nullptr) {
5353+        free(flatbuffer_ptr);
5354+        tensor_map[tensor] = nullptr;
5355+        tensor_map.erase(tensor_map.find(tensor));
5356+      }
5357+    }
5358+  }
5359+  // then copy fbb
5360+  auto new_memory_ptr = CopyFbbToNewMemory(fbb_new);
5361+  auto tensor_root = flatbuffers::GetMutableRoot<schema::Tensor>(new_memory_ptr);
5362+  tensor_map[tensor_root] = new_memory_ptr;
5363+  return new_memory_ptr;
5364+}
5365+
5366+void *MindIRMemoryManager::CreatePrimitiveFromBuilder(flatbuffers::FlatBufferBuilder &fbb_new,
5367+                                                      schema::Primitive *primitive) {
5368+  std::lock_guard<std::mutex> lck(mutex);
5369+  if (primitive != nullptr) {
5370+    // find primitive exist
5371+    if (primitive_map.find(primitive) != primitive_map.end()) {
5372+      // if find, then delete
5373+      void *flatbuffer_ptr = primitive_map[primitive];
5374+      if (flatbuffer_ptr != nullptr) {
5375+        free(flatbuffer_ptr);
5376+        primitive_map[primitive] = nullptr;
5377+        primitive_map.erase(primitive_map.find(primitive));
5378+      }
5379+    }
5380+  }
5381+  // then copy fbb
5382+  auto new_memory_ptr = CopyFbbToNewMemory(fbb_new);
5383+  auto primitive_root = flatbuffers::GetMutableRoot<schema::Primitive>(new_memory_ptr);
5384+  primitive_map[primitive_root] = new_memory_ptr;
5385+  return new_memory_ptr;
5386+}
5387+
5388+void MindIRMemoryManager::DeletePrimitive(schema::Primitive *primitive) {
5389+  std::lock_guard<std::mutex> lck(mutex);
5390+  if (primitive == nullptr) {
5391+    MS_LOG(ERROR) << "primitive is nullptr, no need to delete.";
5392+    return;
5393+  }
5394+  if (primitive_map.find(primitive) != primitive_map.end()) {
5395+    // if find, then delete
5396+    void *flatbuffer_ptr = primitive_map[primitive];
5397+    if (flatbuffer_ptr != nullptr) {
5398+      free(flatbuffer_ptr);
5399+      primitive_map[primitive] = nullptr;
5400+      primitive_map.erase(primitive_map.find(primitive));
5401+    }
5402+  }
5403+}
5404+
5405+void MindIRMemoryManager::DeleteTensor(schema::Tensor *tensor) {
5406+  std::lock_guard<std::mutex> lck(mutex);
5407+  if (tensor == nullptr) {
5408+    MS_LOG(ERROR) << "tensor is nullptr, no need to delete.";
5409+    return;
5410+  }
5411+  if (tensor != nullptr) {
5412+    // find primitive exist
5413+    if (tensor_map.find(tensor) != tensor_map.end()) {
5414+      // if find, then delete
5415+      void *flatbuffer_ptr = tensor_map[tensor];
5416+      if (flatbuffer_ptr != nullptr) {
5417+        free(flatbuffer_ptr);
5418+        tensor_map[tensor] = nullptr;
5419+        tensor_map.erase(tensor_map.find(tensor));
5420+      }
5421+    }
5422+  }
5423+}
5424+
5425+void MindIRMemoryManager::ClearAllMemory() {
5426+  std::lock_guard<std::mutex> lck(mutex);
5427+  ClearMap(primitive_map);
5428+  ClearMap(tensor_map);
5429+}
5430+}  // namespace lite
5431+}  // namespace mindspore
5432\ No newline at end of file
5433diff --git a/mindspore/lite/mindir/src/mindir_nnrt_lite_graph.cc b/mindspore/lite/mindir/src/mindir_nnrt_lite_graph.cc
5434new file mode 100644
5435index 00000000..a914fa6b
5436--- /dev/null
5437+++ b/mindspore/lite/mindir/src/mindir_nnrt_lite_graph.cc
5438@@ -0,0 +1,87 @@
5439+/**
5440+ * Copyright 2021 Huawei Technologies Co., Ltd
5441+ *
5442+ * Licensed under the Apache License, Version 2.0 (the "License");
5443+ * you may not use this file except in compliance with the License.
5444+ * You may obtain a copy of the License at
5445+ *
5446+ * http://www.apache.org/licenses/LICENSE-2.0
5447+ *
5448+ * Unless required by applicable law or agreed to in writing, software
5449+ * distributed under the License is distributed on an "AS IS" BASIS,
5450+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
5451+ * See the License for the specific language governing permissions and
5452+ * limitations under the License.
5453+ */
5454+#include "mindir_lite_graph.h"
5455+#include "mindir_tensor.h"
5456+#include "mindir_primitive.h"
5457+#include "src/common/log.h"
5458+#include "schema/model_generated.h"
5459+#include "mindir_memory_manager.h"
5460+#include <string>
5461+namespace mindspore {
5462+namespace lite {
5463+void MindIR_LiteGraph_Destroy(LiteGraph **lite_graph) {
5464+  if (lite_graph != nullptr && *lite_graph != nullptr) {
5465+    MS_LOG(INFO) << "start to destroy LiteGraph.";
5466+    auto graph = *lite_graph;
5467+    graph->name_.clear();
5468+    graph->input_indices_.clear();
5469+    graph->output_indices_.clear();
5470+    MS_LOG(INFO) << "Destroying  nodes.";
5471+    // node
5472+    for (size_t idx = 0; idx < graph->all_nodes_.size(); idx++) {
5473+      if (graph->all_nodes_[idx] != nullptr) {
5474+        MindIRMemoryManager::GetInstance()->DeletePrimitive(
5475+          static_cast<schema::Primitive *>(graph->all_nodes_[idx]->primitive_));
5476+        delete graph->all_nodes_[idx];
5477+      }
5478+    }
5479+    MS_LOG(INFO) << "Destroying  subgraphs.";
5480+    // subgraph
5481+    for (size_t idx = 0; idx < graph->sub_graphs_.size(); idx++) {
5482+      if (graph->sub_graphs_[idx] != nullptr) {
5483+        delete graph->sub_graphs_[idx];
5484+      }
5485+    }
5486+    MS_LOG(INFO) << "Destroying  tensors.";
5487+    // tensor
5488+    for (size_t idx = 0; idx < graph->all_tensors_.size(); idx++) {
5489+      if (graph->all_tensors_[idx] != nullptr) {
5490+        MindIRMemoryManager::GetInstance()->DeleteTensor(static_cast<schema::Tensor *>(graph->all_tensors_[idx]));
5491+      }
5492+    }
5493+    // graph
5494+    delete graph;
5495+    *lite_graph = nullptr;
5496+  } else {
5497+    MS_LOG(ERROR) << "nnrt_lite_graph is nullptr, can not delete.";
5498+  }
5499+}
5500+
5501+size_t MindIR_LiteGraph_GetConstTensorSize(const LiteGraph *lite_graph) {
5502+  if (lite_graph != nullptr) {
5503+    size_t size = 0;
5504+    for (auto tensor : lite_graph->all_tensors_) {
5505+      if (tensor != nullptr) {
5506+        auto value = static_cast<schema::Tensor *>(tensor);
5507+        if (value != nullptr) {
5508+          auto src = value->data();
5509+          if (src == nullptr) {
5510+            continue;
5511+          }
5512+          size += src->size();
5513+        }
5514+      }
5515+    }
5516+    MS_LOG(DEBUG) << "lite_graph has " << lite_graph->all_tensors_.size() << "tensors ,const tensor size = " << size;
5517+    return size;
5518+  } else {
5519+    MS_LOG(ERROR) << "lite_graph is nullptr";
5520+    return 0;
5521+  }
5522+}
5523+
5524+}  // namespace lite
5525+}  // namespace mindspore
5526\ No newline at end of file
5527diff --git a/mindspore/lite/mindir/src/mindir_nnrt_lite_graph_to_model.cc b/mindspore/lite/mindir/src/mindir_nnrt_lite_graph_to_model.cc
5528new file mode 100644
5529index 00000000..dd9202e2
5530--- /dev/null
5531+++ b/mindspore/lite/mindir/src/mindir_nnrt_lite_graph_to_model.cc
5532@@ -0,0 +1,1496 @@
5533+/**
5534+ * Copyright 2021 Huawei Technologies Co., Ltd
5535+ *
5536+ * Licensed under the Apache License, Version 2.0 (the "License");
5537+ * you may not use this file except in compliance with the License.
5538+ * You may obtain a copy of the License at
5539+ *
5540+ * http://www.apache.org/licenses/LICENSE-2.0
5541+ *
5542+ * Unless required by applicable law or agreed to in writing, software
5543+ * distributed under the License is distributed on an "AS IS" BASIS,
5544+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
5545+ * See the License for the specific language governing permissions and
5546+ * limitations under the License.
5547+ */
5548+#include "mindir.h"
5549+#include <vector>
5550+#include <algorithm>
5551+#include <sys/mman.h>
5552+#include "src/common/log.h"
5553+#include "lite_graph.h"
5554+#include "schema/model_generated.h"
5555+#include "mindir_types.h"
5556+#include "message_parcel.h"
5557+#include "nnrt/v1_0/nnrt_types.h"
5558+#include "nnrt/v1_0/node_attr_types.h"
5559+#include "nnrt/v1_0/model_types.h"
5560+
5561+using namespace OHOS::HDI::Nnrt::V1_0;
5562+namespace mindspore {
5563+namespace lite {
5564+
5565+constexpr size_t kNumTwo = 2;
5566+constexpr size_t kNumFour = 4;
5567+constexpr size_t kNumEight = 8;
5568+
5569+inline std::vector<OHOS::HDI::Nnrt::V1_0::QuantParam> MindIR_Tensor_GetQuantParams_OHOS(TensorPtr tensor) {
5570+  if (tensor != nullptr) {
5571+    auto value = static_cast<schema::Tensor *>(tensor);
5572+
5573+    if (value != nullptr) {
5574+      std::vector<OHOS::HDI::Nnrt::V1_0::QuantParam> result;
5575+      auto src = value->quantParams();
5576+      if (src == nullptr) {
5577+        return {};
5578+      }
5579+      size_t size = src->size();
5580+      result.reserve(src->size());
5581+      for (size_t i = 0; i < size; i++) {
5582+        auto tmp = src->Get(i);
5583+        OHOS::HDI::Nnrt::V1_0::QuantParam quantParam{tmp->numBits(), tmp->zeroPoint(), tmp->scale()};
5584+        result.emplace_back(quantParam);
5585+      }
5586+      return result;
5587+    } else {
5588+      return {};
5589+    }
5590+  } else {
5591+    return {};
5592+  }
5593+}
5594+
5595+void MindIR_Model_Destroy(OHOS::HDI::Nnrt::V1_0::Model **model) {
5596+  if (model != nullptr) {
5597+    auto model_data = *model;
5598+    if (model_data != nullptr) {
5599+      delete (model_data);
5600+      *model = nullptr;
5601+    } else {
5602+      MS_LOG(ERROR) << "*model is nullptr, desrtoy model fail.";
5603+    }
5604+  }
5605+}
5606+
5607+OHOS::HDI::Nnrt::V1_0::Model *MindIR_LiteGraph_To_Model(const LiteGraph *lite_graph, const SharedBuffer &buffer) {
5608+  if (lite_graph != nullptr) {
5609+    MS_LOG(INFO) << "MindIR_LiteGraph_To_Model begin";
5610+    if (!lite_graph->name_.empty()) {
5611+      MS_LOG(INFO) << "Start converting lite graph,name =" << lite_graph->name_;
5612+    } else {
5613+      MS_LOG(INFO) << "Start converting lite graph, but lite graph has no name.";
5614+    }
5615+    std::vector<uint32_t> inputIndex;
5616+    std::vector<uint32_t> outputIndex;
5617+    std::vector<OHOS::HDI::Nnrt::V1_0::Node> nodes;
5618+    std::vector<OHOS::HDI::Nnrt::V1_0::Tensor> allTensors;
5619+    std::vector<OHOS::HDI::Nnrt::V1_0::SubGraph> subGraph;
5620+    // nodes
5621+    MS_LOG(INFO) << "Start converting nodes, vector size = " << lite_graph->all_nodes_.size();
5622+    nodes.reserve(lite_graph->all_nodes_.size());
5623+    for (auto node : lite_graph->all_nodes_) {
5624+      if (node == nullptr) {
5625+        MS_LOG(ERROR) << "node is nullptr, convert fail.";
5626+        return nullptr;
5627+      }
5628+      OHOS::HDI::Nnrt::V1_0::Node tmp;
5629+      tmp.name = node->name_;
5630+      if (node->primitive_ == nullptr) {
5631+        MS_LOG(ERROR) << "node primitive is nullptr, convert fail.";
5632+        return nullptr;
5633+      }
5634+      auto prim = static_cast<schema::Primitive *>(node->primitive_);
5635+      auto value = prim->value_type();
5636+      tmp.nodeType = static_cast<HDI::Nnrt::V1_0::NodeType>(value);
5637+      tmp.nodeAttr = Convert(static_cast<NodeType>(value), node->primitive_);
5638+      tmp.inputIndex = node->input_indices_;
5639+      tmp.outputIndex = node->output_indices_;
5640+      tmp.quantType = static_cast<HDI::Nnrt::V1_0::QuantType>(node->quant_type_);
5641+      nodes.emplace_back(tmp);
5642+    }
5643+
5644+    MS_LOG(INFO) << "Start converting Tensor,Tensor size=" << lite_graph->all_tensors_.size();
5645+    // Tensor
5646+    allTensors.reserve(lite_graph->all_tensors_.size());
5647+    unsigned int tensor_buffer_offset = 0;
5648+    uint8_t *mmap_ptr = nullptr;
5649+    if (buffer.fd != -1) {
5650+      mmap_ptr =
5651+        static_cast<uint8_t *>(mmap(nullptr, buffer.bufferSize, PROT_READ | PROT_WRITE, MAP_SHARED, buffer.fd, 0));
5652+      if (mmap_ptr == MAP_FAILED) {
5653+        MS_LOG(ERROR) << "mmap failed";
5654+        return nullptr;
5655+      }
5656+    }
5657+    MS_LOG(INFO) << "Start parsing tensor, mmap buffer size = " << buffer.bufferSize;
5658+    for (auto tensor : lite_graph->all_tensors_) {
5659+      OHOS::HDI::Nnrt::V1_0::Tensor tmp;
5660+      tmp.name = MindIR_Tensor_GetName(tensor);
5661+      tmp.dataType = static_cast<HDI::Nnrt::V1_0::HDI::Nnrt::V1_0::DataType>(MindIR_Tensor_GetDataType(tensor));
5662+      tmp.dims = MindIR_Tensor_GetDims(tensor);
5663+      tmp.format = static_cast<HDI::Nnrt::V1_0::HDI::Nnrt::V1_0::Format>(MindIR_Tensor_GetFormat(tensor));
5664+      tmp.data = MindIR_Tensor_GetData(tensor, buffer, mmap_ptr, tensor_buffer_offset);
5665+      tmp.quantParams = MindIR_Tensor_GetQuantParams_OHOS(tensor);
5666+      allTensors.emplace_back(tmp);
5667+      tensor_buffer_offset = tmp.data.offset + tmp.data.dataSize;
5668+    }
5669+    MS_LOG(INFO) << ("Parsing tensor finish.");
5670+    if (buffer.fd != -1) {
5671+      auto munmap_res = munmap(mmap_ptr, buffer.bufferSize);
5672+      if (munmap_res != 0) {
5673+        MS_LOG(ERROR) << "unmap failed.";
5674+        return nullptr;
5675+      }
5676+    }
5677+
5678+    MS_LOG(INFO) << "Start converting SubGraph,SubGraph size=" << lite_graph->sub_graphs_.size();
5679+    // SubGraph
5680+    subGraph.reserve(lite_graph->sub_graphs_.size());
5681+    for (auto graph : lite_graph->sub_graphs_) {
5682+      OHOS::HDI::Nnrt::V1_0::SubGraph tmp;
5683+      tmp.name = graph->name_;
5684+      tmp.inputIndices = std::vector<uint32_t>(graph->input_indices_);
5685+      tmp.outputIndices = std::vector<uint32_t>(graph->output_indices_);
5686+      tmp.nodeIndices = std::vector<uint32_t>(graph->node_indices_);
5687+      subGraph.emplace_back(tmp);
5688+    }
5689+
5690+    MS_LOG(INFO) << "Start copying model";
5691+    auto *ret_model = new (std::nothrow) Model();
5692+    if (ret_model == nullptr) {
5693+      MS_LOG(ERROR) << "new Model failed.";
5694+      return nullptr;
5695+    }
5696+    ret_model->name = lite_graph->name_;
5697+    ret_model->inputIndex = lite_graph->input_indices_;
5698+    ret_model->outputIndex = lite_graph->output_indices_;
5699+    ret_model->nodes = nodes;
5700+    ret_model->allTensors = allTensors;
5701+    ret_model->subGraph = subGraph;
5702+    MS_LOG(INFO) << "MindIR_LiteGraph_To_Model success";
5703+    return ret_model;
5704+  } else {
5705+    MS_LOG(ERROR) << "lite graph is nullptr";
5706+    return nullptr;
5707+  }
5708+}
5709+
5710+std::vector<int8_t> ConvertActivation(PrimitivePtr primitive) {
5711+  if (primitive != nullptr) {
5712+    auto prim = static_cast<schema::Primitive *>(primitive);
5713+    auto value = prim->value_as_Activation();
5714+    if (value != nullptr) {
5715+      Activation activation{};
5716+      activation.activationType =
5717+        static_cast<HDI::Nnrt::V1_0::HDI::Nnrt::V1_0::ActivationType>(value->activation_type());
5718+      activation.alpha = value->alpha();
5719+      activation.minVal = value->min_val();
5720+      activation.maxVal = value->max_val();
5721+      activation.approximate = value->approximate();
5722+      OHOS::MessageParcel data;
5723+      (void)ActivationBlockMarshalling(data, activation);
5724+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
5725+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
5726+      return ret;
5727+    } else {
5728+      return {};
5729+    }
5730+  } else {
5731+    return {};
5732+  }
5733+}
5734+std::vector<int8_t> ConvertAddFusion(PrimitivePtr primitive) {
5735+  if (primitive != nullptr) {
5736+    auto prim = static_cast<schema::Primitive *>(primitive);
5737+    auto value = prim->value_as_AddFusion();
5738+    if (value != nullptr) {
5739+      AddFusion add_fusion{};
5740+      add_fusion.activationType = static_cast<HDI::Nnrt::V1_0::ActivationType>(value->activation_type());
5741+      OHOS::MessageParcel data;
5742+      (void)AddFusionBlockMarshalling(data, add_fusion);
5743+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
5744+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
5745+      return ret;
5746+    } else {
5747+      return {};
5748+    }
5749+  } else {
5750+    return {};
5751+  }
5752+}
5753+std::vector<int8_t> ConvertArgMaxFusion(PrimitivePtr primitive) {
5754+  if (primitive != nullptr) {
5755+    auto prim = static_cast<schema::Primitive *>(primitive);
5756+    auto value = prim->value_as_ArgMaxFusion();
5757+    if (value != nullptr) {
5758+      ArgMaxFusion arg_max_fusion{};
5759+      arg_max_fusion.axis = value->axis();
5760+      arg_max_fusion.topK = value->top_k();
5761+      arg_max_fusion.keepDims = value->keep_dims();
5762+      arg_max_fusion.outMaxValue = value->out_max_value();
5763+      OHOS::MessageParcel data;
5764+      (void)ArgMaxFusionBlockMarshalling(data, arg_max_fusion);
5765+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
5766+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
5767+      return ret;
5768+    } else {
5769+      return {};
5770+    }
5771+  } else {
5772+    return {};
5773+  }
5774+}
5775+std::vector<int8_t> ConvertAvgPoolFusion(PrimitivePtr primitive) {
5776+  if (primitive != nullptr) {
5777+    auto prim = static_cast<schema::Primitive *>(primitive);
5778+    auto value = prim->value_as_AvgPoolFusion();
5779+    if (value != nullptr) {
5780+      AvgPoolFusion avg_pool_fusion{};
5781+      std::vector<int64_t> kernel_size;
5782+      kernel_size.reserve(kNumTwo);
5783+      if (value->kernel_size() == nullptr || value->kernel_size()->size() < kNumTwo) {
5784+        kernel_size = {};
5785+      } else {
5786+        kernel_size = std::vector<int64_t>(value->kernel_size()->begin(), value->kernel_size()->end());
5787+      }
5788+      std::vector<int64_t> strides;
5789+      strides.reserve(kNumTwo);
5790+      if (value->strides() == nullptr || value->strides()->size() < kNumTwo) {
5791+        strides = {};
5792+      } else {
5793+        strides = std::vector<int64_t>(value->strides()->begin(), value->strides()->end());
5794+      }
5795+      std::vector<int64_t> padList;
5796+      strides.reserve(kNumTwo);
5797+      if (value->pad() == nullptr || value->pad()->size() < kNumFour) {
5798+        padList = {};
5799+      } else {
5800+        padList = std::vector<int64_t>(value->pad()->begin(), value->pad()->end());
5801+      }
5802+      avg_pool_fusion.kernelSize = kernel_size;
5803+      avg_pool_fusion.strides = strides;
5804+      avg_pool_fusion.pad = padList;
5805+      avg_pool_fusion.padMode = static_cast<HDI::Nnrt::V1_0::PadMode>(value->pad_mode());
5806+      avg_pool_fusion.roundMode = static_cast<HDI::Nnrt::V1_0::RoundMode>(value->round_mode());
5807+      avg_pool_fusion.format = static_cast<HDI::Nnrt::V1_0::Format>(value->format());
5808+      avg_pool_fusion.global = value->global();
5809+      avg_pool_fusion.activationType = static_cast<HDI::Nnrt::V1_0::ActivationType>(value->activation_type());
5810+      OHOS::MessageParcel data;
5811+      (void)AvgPoolFusionBlockMarshalling(data, avg_pool_fusion);
5812+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
5813+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
5814+      return ret;
5815+    } else {
5816+      return {};
5817+    }
5818+  } else {
5819+    return {};
5820+  }
5821+}
5822+std::vector<int8_t> ConvertBatchToSpaceND(PrimitivePtr primitive) {
5823+  if (primitive != nullptr) {
5824+    auto prim = static_cast<schema::Primitive *>(primitive);
5825+    auto value = prim->value_as_BatchToSpaceND();
5826+    if (value != nullptr) {
5827+      BatchToSpaceND batch_to_space_n_d{};
5828+      std::vector<int64_t> blockShape;
5829+      blockShape.reserve(kNumTwo);
5830+      if (value->block_shape() == nullptr || value->block_shape()->size() < kNumTwo) {
5831+        blockShape = {0, 0};
5832+      } else {
5833+        blockShape = std::vector<int64_t>(value->block_shape()->begin(), value->block_shape()->end());
5834+      }
5835+      batch_to_space_n_d.blockShape = blockShape;
5836+      auto crops = value->crops();
5837+      std::vector<std::vector<int64_t>> crops_vec2d;
5838+      if (crops->data() == nullptr) {
5839+        MS_LOG(ERROR) << "crops_data is nullptr";
5840+        crops_vec2d = {{}};
5841+      } else {
5842+        crops_vec2d.reserve(crops->data()->size());
5843+        for (size_t i = 0; i < crops->data()->size(); i++) {
5844+          auto vet = crops->data()->Get(i);
5845+          crops_vec2d.emplace_back(std::vector<int64_t>(vet->data()->begin(), vet->data()->end()));
5846+        }
5847+      }
5848+      batch_to_space_n_d.crops = crops_vec2d;
5849+      OHOS::MessageParcel data;
5850+      (void)BatchToSpaceNDBlockMarshalling(data, batch_to_space_n_d);
5851+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
5852+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
5853+      return ret;
5854+    } else {
5855+      return {};
5856+    }
5857+  } else {
5858+    return {};
5859+  }
5860+}
5861+std::vector<int8_t> ConvertBiasAdd(PrimitivePtr primitive) {
5862+  if (primitive != nullptr) {
5863+    auto prim = static_cast<schema::Primitive *>(primitive);
5864+    auto value = prim->value_as_BiasAdd();
5865+    if (value != nullptr) {
5866+      BiasAdd bias_add{};
5867+      OHOS::MessageParcel data;
5868+      (void)BiasAddBlockMarshalling(data, bias_add);
5869+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
5870+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
5871+      return ret;
5872+    } else {
5873+      return {};
5874+    }
5875+  } else {
5876+    return {};
5877+  }
5878+}
5879+std::vector<int8_t> ConvertCast(PrimitivePtr primitive) {
5880+  if (primitive != nullptr) {
5881+    auto prim = static_cast<schema::Primitive *>(primitive);
5882+    auto value = prim->value_as_Cast();
5883+    if (value != nullptr) {
5884+      Cast cast{};
5885+      OHOS::MessageParcel data;
5886+      (void)CastBlockMarshalling(data, cast);
5887+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
5888+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
5889+      return ret;
5890+    } else {
5891+      return {};
5892+    }
5893+  } else {
5894+    return {};
5895+  }
5896+}
5897+std::vector<int8_t> ConvertConcat(PrimitivePtr primitive) {
5898+  if (primitive != nullptr) {
5899+    auto prim = static_cast<schema::Primitive *>(primitive);
5900+    auto value = prim->value_as_Concat();
5901+    if (value != nullptr) {
5902+      Concat concat{};
5903+      concat.axis = value->axis();
5904+      OHOS::MessageParcel data;
5905+      (void)ConcatBlockMarshalling(data, concat);
5906+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
5907+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
5908+      return ret;
5909+    } else {
5910+      return {};
5911+    }
5912+  } else {
5913+    return {};
5914+  }
5915+}
5916+std::vector<int8_t> ConvertConv2DFusion(PrimitivePtr primitive) {
5917+  if (primitive != nullptr) {
5918+    auto prim = static_cast<schema::Primitive *>(primitive);
5919+    auto value = prim->value_as_Conv2DFusion();
5920+    if (value != nullptr) {
5921+      Conv2DFusion conv2_d_fusion{};
5922+      std::vector<int64_t> kernel_size;
5923+      kernel_size.reserve(kNumTwo);
5924+      if (value->kernel_size() == nullptr || value->kernel_size()->size() < kNumTwo) {
5925+        kernel_size = {};
5926+      } else {
5927+        kernel_size = std::vector<int64_t>(value->kernel_size()->begin(), value->kernel_size()->end());
5928+      }
5929+      std::vector<int64_t> strides;
5930+      strides.reserve(kNumTwo);
5931+      if (value->stride() == nullptr || value->stride()->size() < kNumTwo) {
5932+        strides = {};
5933+      } else {
5934+        strides = std::vector<int64_t>(value->stride()->begin(), value->stride()->end());
5935+      }
5936+      std::vector<int64_t> dilation;
5937+      dilation.reserve(kNumTwo);
5938+      if (value->dilation() == nullptr || value->dilation()->size() < kNumTwo) {
5939+        dilation = {};
5940+      } else {
5941+        dilation = std::vector<int64_t>(value->dilation()->begin(), value->dilation()->end());
5942+      }
5943+      std::vector<int64_t> padList;
5944+      strides.reserve(kNumTwo);
5945+      if (value->pad_list() == nullptr || value->pad_list()->size() < kNumFour) {
5946+        padList = {};
5947+      } else {
5948+        padList = std::vector<int64_t>(value->pad_list()->begin(), value->pad_list()->end());
5949+      }
5950+      conv2_d_fusion.kernelSize = kernel_size;
5951+      conv2_d_fusion.stride = strides;
5952+      conv2_d_fusion.dilation = dilation;
5953+      conv2_d_fusion.padMode = static_cast<HDI::Nnrt::V1_0::PadMode>(value->pad_mode());
5954+      conv2_d_fusion.padList = padList;
5955+      conv2_d_fusion.group = value->group();
5956+      conv2_d_fusion.inChannel = value->in_channel();
5957+      conv2_d_fusion.outChannel = value->out_channel();
5958+      conv2_d_fusion.activationType = static_cast<HDI::Nnrt::V1_0::ActivationType>(value->activation_type());
5959+      OHOS::MessageParcel data;
5960+      (void)Conv2DFusionBlockMarshalling(data, conv2_d_fusion);
5961+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
5962+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
5963+      return ret;
5964+    } else {
5965+      return {};
5966+    }
5967+  } else {
5968+    return {};
5969+  }
5970+}
5971+std::vector<int8_t> ConvertConv2dTransposeFusion(PrimitivePtr primitive) {
5972+  if (primitive != nullptr) {
5973+    auto prim = static_cast<schema::Primitive *>(primitive);
5974+    auto value = prim->value_as_Conv2dTransposeFusion();
5975+    if (value != nullptr) {
5976+      Conv2dTransposeFusion conv2d_transpose_fusion{};
5977+      std::vector<int64_t> kernel_size;
5978+      kernel_size.reserve(kNumTwo);
5979+      if (value->kernel_size() == nullptr || value->kernel_size()->size() < kNumTwo) {
5980+        kernel_size = {};
5981+      } else {
5982+        kernel_size = std::vector<int64_t>(value->kernel_size()->begin(), value->kernel_size()->end());
5983+      }
5984+      std::vector<int64_t> strides;
5985+      strides.reserve(kNumTwo);
5986+      if (value->stride() == nullptr || value->stride()->size() < kNumTwo) {
5987+        strides = {};
5988+      } else {
5989+        strides = std::vector<int64_t>(value->stride()->begin(), value->stride()->end());
5990+      }
5991+      std::vector<int64_t> dilation;
5992+      dilation.reserve(kNumTwo);
5993+      if (value->dilation() == nullptr || value->dilation()->size() < kNumTwo) {
5994+        dilation = {};
5995+      } else {
5996+        dilation = std::vector<int64_t>(value->dilation()->begin(), value->dilation()->end());
5997+      }
5998+      std::vector<int64_t> padList;
5999+      strides.reserve(kNumTwo);
6000+      if (value->pad_list() == nullptr || value->pad_list()->size() < kNumFour) {
6001+        padList = {};
6002+      } else {
6003+        padList = std::vector<int64_t>(value->pad_list()->begin(), value->pad_list()->end());
6004+      }
6005+      std::vector<int64_t> output_paddings;
6006+      output_paddings.reserve(kNumTwo);
6007+      if (value->output_paddings() == nullptr || value->output_paddings()->size() < kNumTwo) {
6008+        output_paddings = {};
6009+      } else {
6010+        output_paddings = std::vector<int64_t>(value->output_paddings()->begin(), value->output_paddings()->end());
6011+      }
6012+      conv2d_transpose_fusion.kernelSize = kernel_size;
6013+      conv2d_transpose_fusion.stride = strides;
6014+      conv2d_transpose_fusion.dilation = dilation;
6015+      conv2d_transpose_fusion.padMode = static_cast<HDI::Nnrt::V1_0::PadMode>(value->pad_mode());
6016+      conv2d_transpose_fusion.padList = padList;
6017+      conv2d_transpose_fusion.group = value->group();
6018+      conv2d_transpose_fusion.inChannel = value->in_channel();
6019+      conv2d_transpose_fusion.outChannel = value->out_channel();
6020+      conv2d_transpose_fusion.activationType = static_cast<HDI::Nnrt::V1_0::ActivationType>(value->activation_type());
6021+      conv2d_transpose_fusion.outputPaddings = output_paddings;
6022+      OHOS::MessageParcel data;
6023+      (void)Conv2dTransposeFusionBlockMarshalling(data, conv2d_transpose_fusion);
6024+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6025+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6026+      return ret;
6027+    } else {
6028+      return {};
6029+    }
6030+  } else {
6031+    return {};
6032+  }
6033+}
6034+std::vector<int8_t> ConvertDivFusion(PrimitivePtr primitive) {
6035+  if (primitive != nullptr) {
6036+    auto prim = static_cast<schema::Primitive *>(primitive);
6037+    auto value = prim->value_as_DivFusion();
6038+    if (value != nullptr) {
6039+      DivFusion div_fusion{};
6040+      div_fusion.activationType = static_cast<HDI::Nnrt::V1_0::ActivationType>(value->activation_type());
6041+      OHOS::MessageParcel data;
6042+      (void)DivFusionBlockMarshalling(data, div_fusion);
6043+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6044+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6045+      return ret;
6046+    } else {
6047+      return {};
6048+    }
6049+  } else {
6050+    return {};
6051+  }
6052+}
6053+std::vector<int8_t> ConvertEltwise(PrimitivePtr primitive) {
6054+  if (primitive != nullptr) {
6055+    auto prim = static_cast<schema::Primitive *>(primitive);
6056+    auto value = prim->value_as_Eltwise();
6057+    if (value != nullptr) {
6058+      Eltwise eltwise{};
6059+      eltwise.mode = static_cast<HDI::Nnrt::V1_0::EltwiseMode>(value->mode());
6060+      OHOS::MessageParcel data;
6061+      (void)EltwiseBlockMarshalling(data, eltwise);
6062+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6063+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6064+      return ret;
6065+    } else {
6066+      return {};
6067+    }
6068+  } else {
6069+    return {};
6070+  }
6071+}
6072+std::vector<int8_t> ConvertExpandDims(PrimitivePtr primitive) {
6073+  if (primitive != nullptr) {
6074+    auto prim = static_cast<schema::Primitive *>(primitive);
6075+    auto value = prim->value_as_ExpandDims();
6076+    if (value != nullptr) {
6077+      ExpandDims expand_dims{};
6078+      OHOS::MessageParcel data;
6079+      (void)ExpandDimsBlockMarshalling(data, expand_dims);
6080+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6081+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6082+      return ret;
6083+    } else {
6084+      return {};
6085+    }
6086+  } else {
6087+    return {};
6088+  }
6089+}
6090+std::vector<int8_t> ConvertFill(PrimitivePtr primitive) {
6091+  if (primitive != nullptr) {
6092+    auto prim = static_cast<schema::Primitive *>(primitive);
6093+    auto value = prim->value_as_Fill();
6094+    if (value != nullptr) {
6095+      Fill fill{};
6096+      OHOS::MessageParcel data;
6097+      (void)FillBlockMarshalling(data, fill);
6098+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6099+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6100+      return ret;
6101+    } else {
6102+      return {};
6103+    }
6104+  } else {
6105+    return {};
6106+  }
6107+}
6108+std::vector<int8_t> ConvertFullConnection(PrimitivePtr primitive) {
6109+  if (primitive != nullptr) {
6110+    auto prim = static_cast<schema::Primitive *>(primitive);
6111+    auto value = prim->value_as_FullConnection();
6112+    if (value != nullptr) {
6113+      FullConnection full_connection{};
6114+      full_connection.hasBias = value->has_bias();
6115+      full_connection.useAxis = value->use_axis();
6116+      full_connection.axis = value->axis();
6117+      full_connection.activationType = static_cast<HDI::Nnrt::V1_0::ActivationType>(value->activation_type());
6118+      OHOS::MessageParcel data;
6119+      (void)FullConnectionBlockMarshalling(data, full_connection);
6120+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6121+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6122+      return ret;
6123+    } else {
6124+      return {};
6125+    }
6126+  } else {
6127+    return {};
6128+  }
6129+}
6130+std::vector<int8_t> ConvertFusedBatchNorm(PrimitivePtr primitive) {
6131+  if (primitive != nullptr) {
6132+    auto prim = static_cast<schema::Primitive *>(primitive);
6133+    auto value = prim->value_as_FusedBatchNorm();
6134+    if (value != nullptr) {
6135+      FusedBatchNorm fused_batch_norm{};
6136+      fused_batch_norm.epsilon = value->epsilon();
6137+      OHOS::MessageParcel data;
6138+      (void)FusedBatchNormBlockMarshalling(data, fused_batch_norm);
6139+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6140+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6141+      return ret;
6142+    } else {
6143+      return {};
6144+    }
6145+  } else {
6146+    return {};
6147+  }
6148+}
6149+std::vector<int8_t> ConvertGather(PrimitivePtr primitive) {
6150+  if (primitive != nullptr) {
6151+    auto prim = static_cast<schema::Primitive *>(primitive);
6152+    auto value = prim->value_as_Gather();
6153+    if (value != nullptr) {
6154+      Gather gather{};
6155+      OHOS::MessageParcel data;
6156+      (void)GatherBlockMarshalling(data, gather);
6157+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6158+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6159+      return ret;
6160+    } else {
6161+      return {};
6162+    }
6163+  } else {
6164+    return {};
6165+  }
6166+}
6167+std::vector<int8_t> ConvertLayerNormFusion(PrimitivePtr primitive) {
6168+  if (primitive != nullptr) {
6169+    auto prim = static_cast<schema::Primitive *>(primitive);
6170+    auto value = prim->value_as_LayerNormFusion();
6171+    if (value != nullptr) {
6172+      LayerNormFusion layer_norm_fusion{};
6173+      layer_norm_fusion.beginNormAxis = value->begin_norm_axis();
6174+      layer_norm_fusion.epsilon = value->epsilon();
6175+      layer_norm_fusion.elementwiseAffine = value->elementwise_affine();
6176+      layer_norm_fusion.beginParamsAxis = value->begin_params_axis();
6177+      OHOS::MessageParcel data;
6178+      (void)LayerNormFusionBlockMarshalling(data, layer_norm_fusion);
6179+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6180+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6181+      return ret;
6182+    } else {
6183+      return {};
6184+    }
6185+  } else {
6186+    return {};
6187+  }
6188+}
6189+std::vector<int8_t> ConvertLessEqual(PrimitivePtr primitive) {
6190+  if (primitive != nullptr) {
6191+    auto prim = static_cast<schema::Primitive *>(primitive);
6192+    auto value = prim->value_as_LessEqual();
6193+    if (value != nullptr) {
6194+      LessEqual less_equal{};
6195+      OHOS::MessageParcel data;
6196+      (void)LessEqualBlockMarshalling(data, less_equal);
6197+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6198+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6199+      return ret;
6200+    } else {
6201+      return {};
6202+    }
6203+  } else {
6204+    return {};
6205+  }
6206+}
6207+std::vector<int8_t> ConvertMatMulFusion(PrimitivePtr primitive) {
6208+  if (primitive != nullptr) {
6209+    auto prim = static_cast<schema::Primitive *>(primitive);
6210+    auto value = prim->value_as_MatMulFusion();
6211+    if (value != nullptr) {
6212+      MatMulFusion mat_mul_fusion{};
6213+      mat_mul_fusion.transposeA = value->transpose_a();
6214+      mat_mul_fusion.transposeB = value->transpose_b();
6215+      mat_mul_fusion.activationType = static_cast<HDI::Nnrt::V1_0::ActivationType>(value->activation_type());
6216+      OHOS::MessageParcel data;
6217+      (void)MatMulFusionBlockMarshalling(data, mat_mul_fusion);
6218+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6219+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6220+      return ret;
6221+    } else {
6222+      return {};
6223+    }
6224+  } else {
6225+    return {};
6226+  }
6227+}
6228+std::vector<int8_t> ConvertMaximum(PrimitivePtr primitive) {
6229+  if (primitive != nullptr) {
6230+    auto prim = static_cast<schema::Primitive *>(primitive);
6231+    auto value = prim->value_as_Maximum();
6232+    if (value != nullptr) {
6233+      Maximum maximum{};
6234+      OHOS::MessageParcel data;
6235+      (void)MaximumBlockMarshalling(data, maximum);
6236+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6237+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6238+      return ret;
6239+    } else {
6240+      return {};
6241+    }
6242+  } else {
6243+    return {};
6244+  }
6245+}
6246+std::vector<int8_t> ConvertMaxPoolFusion(PrimitivePtr primitive) {
6247+  if (primitive != nullptr) {
6248+    auto prim = static_cast<schema::Primitive *>(primitive);
6249+    auto value = prim->value_as_MaxPoolFusion();
6250+    if (value != nullptr) {
6251+      MaxPoolFusion max_pool_fusion{};
6252+      std::vector<int64_t> kernel_size;
6253+      kernel_size.reserve(kNumTwo);
6254+      if (value->kernel_size() == nullptr || value->kernel_size()->size() < kNumTwo) {
6255+        kernel_size = {};
6256+      } else {
6257+        kernel_size = std::vector<int64_t>(value->kernel_size()->begin(), value->kernel_size()->end());
6258+      }
6259+      std::vector<int64_t> strides;
6260+      strides.reserve(kNumTwo);
6261+      if (value->strides() == nullptr || value->strides()->size() < kNumTwo) {
6262+        strides = {};
6263+      } else {
6264+        strides = std::vector<int64_t>(value->strides()->begin(), value->strides()->end());
6265+      }
6266+      std::vector<int64_t> padList;
6267+      padList.reserve(kNumFour);
6268+      if (value->pad() == nullptr || value->pad()->size() < kNumFour) {
6269+        padList = {};
6270+      } else {
6271+        padList = std::vector<int64_t>(value->pad()->begin(), value->pad()->end());
6272+      }
6273+      max_pool_fusion.kernelSize = kernel_size;
6274+      max_pool_fusion.strides = strides;
6275+      max_pool_fusion.pad = padList;
6276+      max_pool_fusion.padMode = static_cast<HDI::Nnrt::V1_0::PadMode>(value->pad_mode());
6277+      max_pool_fusion.format = static_cast<HDI::Nnrt::V1_0::Format>(value->format());
6278+      max_pool_fusion.global = value->global();
6279+      max_pool_fusion.activationType = static_cast<HDI::Nnrt::V1_0::ActivationType>(value->activation_type());
6280+      OHOS::MessageParcel data;
6281+      (void)MaxPoolFusionBlockMarshalling(data, max_pool_fusion);
6282+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6283+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6284+      return ret;
6285+    } else {
6286+      return {};
6287+    }
6288+  } else {
6289+    return {};
6290+  }
6291+}
6292+std::vector<int8_t> ConvertMulFusion(PrimitivePtr primitive) {
6293+  if (primitive != nullptr) {
6294+    auto prim = static_cast<schema::Primitive *>(primitive);
6295+    auto value = prim->value_as_MulFusion();
6296+    if (value != nullptr) {
6297+      MulFusion mul_fusion{};
6298+      mul_fusion.activationType = static_cast<HDI::Nnrt::V1_0::ActivationType>(value->activation_type());
6299+      OHOS::MessageParcel data;
6300+      (void)MulFusionBlockMarshalling(data, mul_fusion);
6301+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6302+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6303+      return ret;
6304+    } else {
6305+      return {};
6306+    }
6307+  } else {
6308+    return {};
6309+  }
6310+}
6311+std::vector<int8_t> ConvertOneHot(PrimitivePtr primitive) {
6312+  if (primitive != nullptr) {
6313+    auto prim = static_cast<schema::Primitive *>(primitive);
6314+    auto value = prim->value_as_OneHot();
6315+    if (value != nullptr) {
6316+      OneHot one_hot{};
6317+      one_hot.axis = value->axis();
6318+      OHOS::MessageParcel data;
6319+      (void)OneHotBlockMarshalling(data, one_hot);
6320+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6321+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6322+      return ret;
6323+    } else {
6324+      return {};
6325+    }
6326+  } else {
6327+    return {};
6328+  }
6329+}
6330+std::vector<int8_t> ConvertPadFusion(PrimitivePtr primitive) {
6331+  if (primitive != nullptr) {
6332+    auto prim = static_cast<schema::Primitive *>(primitive);
6333+    auto value = prim->value_as_PadFusion();
6334+    if (value != nullptr) {
6335+      PadFusion pad_fusion{};
6336+      auto paddings = value->paddings();
6337+      std::vector<std::vector<int64_t>> paddings_vec2d;
6338+      if (paddings == nullptr || paddings->data()->size() < kNumTwo) {
6339+        paddings_vec2d = {{0}, {0}, {0}, {0}};
6340+      } else {
6341+        paddings_vec2d.reserve(paddings->data()->size());
6342+        for (size_t i = 0; i < paddings->data()->size(); i++) {
6343+          auto vet = paddings->data()->Get(i);
6344+          paddings_vec2d.emplace_back(std::vector<int64_t>(vet->data()->begin(), vet->data()->end()));
6345+        }
6346+      }
6347+      pad_fusion.paddings = paddings_vec2d;
6348+      pad_fusion.paddingMode = static_cast<HDI::Nnrt::V1_0::PaddingMode>(value->padding_mode());
6349+      pad_fusion.constantValue = value->constant_value();
6350+      OHOS::MessageParcel data;
6351+      (void)PadFusionBlockMarshalling(data, pad_fusion);
6352+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6353+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6354+      return ret;
6355+    } else {
6356+      return {};
6357+    }
6358+  } else {
6359+    return {};
6360+  }
6361+}
6362+std::vector<int8_t> ConvertPowFusion(PrimitivePtr primitive) {
6363+  if (primitive != nullptr) {
6364+    auto prim = static_cast<schema::Primitive *>(primitive);
6365+    auto value = prim->value_as_PowFusion();
6366+    if (value != nullptr) {
6367+      PowFusion pow_fusion{};
6368+      pow_fusion.scale = value->scale();
6369+      pow_fusion.shift = value->shift();
6370+      OHOS::MessageParcel data;
6371+      (void)PowFusionBlockMarshalling(data, pow_fusion);
6372+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6373+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6374+      return ret;
6375+    } else {
6376+      return {};
6377+    }
6378+  } else {
6379+    return {};
6380+  }
6381+}
6382+std::vector<int8_t> ConvertPReLUFusion(PrimitivePtr primitive) {
6383+  if (primitive != nullptr) {
6384+    auto prim = static_cast<schema::Primitive *>(primitive);
6385+    auto value = prim->value_as_PReLUFusion();
6386+    if (value != nullptr) {
6387+      PReLUFusion p_re_l_u_fusion{};
6388+      p_re_l_u_fusion.channelShared = value->channel_shared();
6389+      OHOS::MessageParcel data;
6390+      (void)PReLUFusionBlockMarshalling(data, p_re_l_u_fusion);
6391+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6392+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6393+      return ret;
6394+    } else {
6395+      return {};
6396+    }
6397+  } else {
6398+    return {};
6399+  }
6400+}
6401+std::vector<int8_t> ConvertQuantDTypeCast(PrimitivePtr primitive) {
6402+  if (primitive != nullptr) {
6403+    auto prim = static_cast<schema::Primitive *>(primitive);
6404+    auto value = prim->value_as_QuantDTypeCast();
6405+    if (value != nullptr) {
6406+      QuantDTypeCast quant_d_type_cast{};
6407+      quant_d_type_cast.srcT = value->src_t();
6408+      quant_d_type_cast.dstT = value->dst_t();
6409+      OHOS::MessageParcel data;
6410+      (void)QuantDTypeCastBlockMarshalling(data, quant_d_type_cast);
6411+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6412+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6413+      return ret;
6414+    } else {
6415+      return {};
6416+    }
6417+  } else {
6418+    return {};
6419+  }
6420+}
6421+std::vector<int8_t> ConvertReduceFusion(PrimitivePtr primitive) {
6422+  if (primitive != nullptr) {
6423+    auto prim = static_cast<schema::Primitive *>(primitive);
6424+    auto value = prim->value_as_ReduceFusion();
6425+    if (value != nullptr) {
6426+      ReduceFusion reduce_fusion{};
6427+      reduce_fusion.keepDims = value->keep_dims();
6428+      reduce_fusion.mode = static_cast<HDI::Nnrt::V1_0::ReduceMode>(value->mode());
6429+      reduce_fusion.reduceToEnd = value->reduce_to_end();
6430+      reduce_fusion.coeff = value->coeff();
6431+      OHOS::MessageParcel data;
6432+      (void)ReduceFusionBlockMarshalling(data, reduce_fusion);
6433+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6434+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6435+      return ret;
6436+    } else {
6437+      return {};
6438+    }
6439+  } else {
6440+    return {};
6441+  }
6442+}
6443+std::vector<int8_t> ConvertReshape(PrimitivePtr primitive) {
6444+  if (primitive != nullptr) {
6445+    auto prim = static_cast<schema::Primitive *>(primitive);
6446+    auto value = prim->value_as_Reshape();
6447+    if (value != nullptr) {
6448+      Reshape reshape{};
6449+      OHOS::MessageParcel data;
6450+      (void)ReshapeBlockMarshalling(data, reshape);
6451+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6452+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6453+      return ret;
6454+    } else {
6455+      return {};
6456+    }
6457+  } else {
6458+    return {};
6459+  }
6460+}
6461+
6462+std::vector<int8_t> ConvertResize(PrimitivePtr primitive) {
6463+  if (primitive != nullptr) {
6464+    auto prim = static_cast<schema::Primitive *>(primitive);
6465+    auto value = prim->value_as_Resize();
6466+    if (value != nullptr) {
6467+      Resize resize{};
6468+      resize.method = static_cast<HDI::Nnrt::V1_0::ResizeMethod>(value->method());
6469+      resize.newHeight = value->new_height();
6470+      resize.newWidth = value->new_width();
6471+      resize.preserveAspectRatio = value->preserve_aspect_ratio();
6472+      resize.coordinateTransformMode =
6473+        static_cast<HDI::Nnrt::V1_0::CoordinateTransformMode>(value->coordinate_transform_mode());
6474+      resize.cubicCoeff = value->cubic_coeff();
6475+      resize.excludeOutside = value->exclude_outside();
6476+      resize.extrapolationValue = value->extrapolation_value();
6477+      resize.nearestMode = static_cast<HDI::Nnrt::V1_0::NearestMode>(value->nearest_mode());
6478+      OHOS::MessageParcel data;
6479+      (void)ResizeBlockMarshalling(data, resize);
6480+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6481+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6482+      return ret;
6483+    } else {
6484+      return {};
6485+    }
6486+  } else {
6487+    return {};
6488+  }
6489+}
6490+std::vector<int8_t> ConvertRsqrt(PrimitivePtr primitive) {
6491+  if (primitive != nullptr) {
6492+    auto prim = static_cast<schema::Primitive *>(primitive);
6493+    auto value = prim->value_as_Rsqrt();
6494+    if (value != nullptr) {
6495+      Rsqrt rsqrt{};
6496+      OHOS::MessageParcel data;
6497+      (void)RsqrtBlockMarshalling(data, rsqrt);
6498+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6499+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6500+      return ret;
6501+    } else {
6502+      return {};
6503+    }
6504+  } else {
6505+    return {};
6506+  }
6507+}
6508+std::vector<int8_t> ConvertScaleFusion(PrimitivePtr primitive) {
6509+  if (primitive != nullptr) {
6510+    auto prim = static_cast<schema::Primitive *>(primitive);
6511+    auto value = prim->value_as_ScaleFusion();
6512+    if (value != nullptr) {
6513+      ScaleFusion scale_fusion{};
6514+      scale_fusion.axis = value->axis();
6515+      scale_fusion.activationType = static_cast<HDI::Nnrt::V1_0::ActivationType>(value->activation_type());
6516+      OHOS::MessageParcel data;
6517+      (void)ScaleFusionBlockMarshalling(data, scale_fusion);
6518+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6519+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6520+      return ret;
6521+    } else {
6522+      return {};
6523+    }
6524+  } else {
6525+    return {};
6526+  }
6527+}
6528+std::vector<int8_t> ConvertShape(PrimitivePtr primitive) {
6529+  if (primitive != nullptr) {
6530+    auto prim = static_cast<schema::Primitive *>(primitive);
6531+    auto value = prim->value_as_Shape();
6532+    if (value != nullptr) {
6533+      Shape shape{};
6534+      OHOS::MessageParcel data;
6535+      (void)ShapeBlockMarshalling(data, shape);
6536+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6537+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6538+      return ret;
6539+    } else {
6540+      return {};
6541+    }
6542+  } else {
6543+    return {};
6544+  }
6545+}
6546+std::vector<int8_t> ConvertSliceFusion(PrimitivePtr primitive) {
6547+  if (primitive != nullptr) {
6548+    auto prim = static_cast<schema::Primitive *>(primitive);
6549+    auto value = prim->value_as_SliceFusion();
6550+    if (value != nullptr) {
6551+      SliceFusion slice_fusion{};
6552+      std::vector<int64_t> axes;
6553+      if (value->axes() == nullptr) {
6554+        axes = {1, 2, 3, 4, 5, 6, 7};
6555+      } else {
6556+        axes = std::vector<int64_t>(value->axes()->begin(), value->axes()->end());
6557+      }
6558+      slice_fusion.axes = axes;
6559+      OHOS::MessageParcel data;
6560+      (void)SliceFusionBlockMarshalling(data, slice_fusion);
6561+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6562+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6563+      return ret;
6564+    } else {
6565+      return {};
6566+    }
6567+  } else {
6568+    return {};
6569+  }
6570+}
6571+std::vector<int8_t> ConvertSoftmax(PrimitivePtr primitive) {
6572+  if (primitive != nullptr) {
6573+    auto prim = static_cast<schema::Primitive *>(primitive);
6574+    auto value = prim->value_as_Softmax();
6575+    if (value != nullptr) {
6576+      Softmax softmax{};
6577+      std::vector<int64_t> axis;
6578+      if (value->axis() == nullptr) {
6579+        axis = {};
6580+      } else {
6581+        axis = std::vector<int64_t>(value->axis()->begin(), value->axis()->end());
6582+      }
6583+      softmax.axis = axis;
6584+      OHOS::MessageParcel data;
6585+      (void)SoftmaxBlockMarshalling(data, softmax);
6586+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6587+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6588+      return ret;
6589+    } else {
6590+      return {};
6591+    }
6592+  } else {
6593+    return {};
6594+  }
6595+}
6596+std::vector<int8_t> ConvertSpaceToBatchND(PrimitivePtr primitive) {
6597+  if (primitive != nullptr) {
6598+    auto prim = static_cast<schema::Primitive *>(primitive);
6599+    auto value = prim->value_as_SpaceToBatchND();
6600+    if (value != nullptr) {
6601+      SpaceToBatchND space_to_batch_n_d{};
6602+      std::vector<int64_t> blockShape;
6603+      blockShape.reserve(kNumTwo);
6604+      if (value->block_shape() == nullptr || value->block_shape()->size() < kNumTwo) {
6605+        blockShape = {0, 0};
6606+      } else {
6607+        blockShape = std::vector<int64_t>(value->block_shape()->begin(), value->block_shape()->end());
6608+      }
6609+      space_to_batch_n_d.blockShape = blockShape;
6610+      auto paddings = value->paddings();
6611+      std::vector<std::vector<int64_t>> paddings_vec2d;
6612+      if (paddings == nullptr || paddings->data()->size() == 0 || *(paddings->data()->begin()) == nullptr ||
6613+          (*(paddings->data()->begin()))->data() == nullptr) {
6614+        paddings_vec2d = {};
6615+      } else {
6616+        paddings_vec2d.reserve(paddings->data()->size());
6617+        for (size_t i = 0; i < paddings->data()->size(); i++) {
6618+          auto vet = paddings->data()->Get(i);
6619+          paddings_vec2d.emplace_back(std::vector<int64_t>(vet->data()->begin(), vet->data()->end()));
6620+        }
6621+      }
6622+      space_to_batch_n_d.paddings = paddings_vec2d;
6623+      OHOS::MessageParcel data;
6624+      (void)SpaceToBatchNDBlockMarshalling(data, space_to_batch_n_d);
6625+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6626+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6627+      return ret;
6628+    } else {
6629+      return {};
6630+    }
6631+  } else {
6632+    return {};
6633+  }
6634+}
6635+std::vector<int8_t> ConvertSplit(PrimitivePtr primitive) {
6636+  if (primitive != nullptr) {
6637+    auto prim = static_cast<schema::Primitive *>(primitive);
6638+    auto value = prim->value_as_Split();
6639+    if (value != nullptr) {
6640+      Split split{};
6641+      split.outputNum = value->output_num();
6642+      std::vector<int64_t> sizeSplits;
6643+      sizeSplits.reserve(split.outputNum);
6644+      if (value->size_splits() == nullptr || value->size_splits()->size() <= static_cast<uint32_t>(split.outputNum)) {
6645+        sizeSplits = {};
6646+      } else {
6647+        sizeSplits = std::vector<int64_t>(value->size_splits()->begin(), value->size_splits()->end());
6648+      }
6649+      split.sizeSplits = sizeSplits;
6650+      split.axis = value->axis();
6651+      OHOS::MessageParcel data;
6652+      (void)SplitBlockMarshalling(data, split);
6653+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6654+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6655+      return ret;
6656+    } else {
6657+      return {};
6658+    }
6659+  } else {
6660+    return {};
6661+  }
6662+}
6663+std::vector<int8_t> ConvertSqrt(PrimitivePtr primitive) {
6664+  if (primitive != nullptr) {
6665+    auto prim = static_cast<schema::Primitive *>(primitive);
6666+    auto value = prim->value_as_Sqrt();
6667+    if (value != nullptr) {
6668+      Sqrt sqrt{};
6669+      OHOS::MessageParcel data;
6670+      (void)SqrtBlockMarshalling(data, sqrt);
6671+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6672+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6673+      return ret;
6674+    } else {
6675+      return {};
6676+    }
6677+  } else {
6678+    return {};
6679+  }
6680+}
6681+std::vector<int8_t> ConvertSquaredDifference(PrimitivePtr primitive) {
6682+  if (primitive != nullptr) {
6683+    auto prim = static_cast<schema::Primitive *>(primitive);
6684+    auto value = prim->value_as_SquaredDifference();
6685+    if (value != nullptr) {
6686+      SquaredDifference squared_difference{};
6687+      OHOS::MessageParcel data;
6688+      (void)SquaredDifferenceBlockMarshalling(data, squared_difference);
6689+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6690+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6691+      return ret;
6692+    } else {
6693+      return {};
6694+    }
6695+  } else {
6696+    return {};
6697+  }
6698+}
6699+std::vector<int8_t> ConvertSqueeze(PrimitivePtr primitive) {
6700+  if (primitive != nullptr) {
6701+    auto prim = static_cast<schema::Primitive *>(primitive);
6702+    auto value = prim->value_as_Squeeze();
6703+    if (value != nullptr) {
6704+      Squeeze squeeze{};
6705+      std::vector<int64_t> axis;
6706+      if (value->axis() == nullptr) {
6707+        axis = {};
6708+      } else {
6709+        axis = std::vector<int64_t>(value->axis()->begin(), value->axis()->end());
6710+      }
6711+      squeeze.axis = axis;
6712+      OHOS::MessageParcel data;
6713+      (void)SqueezeBlockMarshalling(data, squeeze);
6714+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6715+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6716+      return ret;
6717+    } else {
6718+      return {};
6719+    }
6720+  } else {
6721+    return {};
6722+  }
6723+}
6724+std::vector<int8_t> ConvertStack(PrimitivePtr primitive) {
6725+  if (primitive != nullptr) {
6726+    auto prim = static_cast<schema::Primitive *>(primitive);
6727+    auto value = prim->value_as_Stack();
6728+    if (value != nullptr) {
6729+      Stack stack{};
6730+      stack.axis = value->axis();
6731+      OHOS::MessageParcel data;
6732+      (void)StackBlockMarshalling(data, stack);
6733+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6734+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6735+      return ret;
6736+    } else {
6737+      return {};
6738+    }
6739+  } else {
6740+    return {};
6741+  }
6742+}
6743+std::vector<int8_t> ConvertStridedSlice(PrimitivePtr primitive) {
6744+  if (primitive != nullptr) {
6745+    auto prim = static_cast<schema::Primitive *>(primitive);
6746+    auto value = prim->value_as_StridedSlice();
6747+    if (value != nullptr) {
6748+      StridedSlice strided_slice{};
6749+      strided_slice.beginMask = value->begin_mask();
6750+      strided_slice.endMask = value->end_mask();
6751+      strided_slice.ellipsisMask = value->ellipsis_mask();
6752+      strided_slice.newAxisMask = value->new_axis_mask();
6753+      strided_slice.shrinkAxisMask = value->shrink_axis_mask();
6754+      OHOS::MessageParcel data;
6755+      (void)StridedSliceBlockMarshalling(data, strided_slice);
6756+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6757+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6758+      return ret;
6759+    } else {
6760+      return {};
6761+    }
6762+  } else {
6763+    return {};
6764+  }
6765+}
6766+std::vector<int8_t> ConvertSubFusion(PrimitivePtr primitive) {
6767+  if (primitive != nullptr) {
6768+    auto prim = static_cast<schema::Primitive *>(primitive);
6769+    auto value = prim->value_as_SubFusion();
6770+    if (value != nullptr) {
6771+      SubFusion sub_fusion{};
6772+      sub_fusion.activationType = static_cast<HDI::Nnrt::V1_0::ActivationType>(value->activation_type());
6773+      OHOS::MessageParcel data;
6774+      (void)SubFusionBlockMarshalling(data, sub_fusion);
6775+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6776+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6777+      return ret;
6778+    } else {
6779+      return {};
6780+    }
6781+  } else {
6782+    return {};
6783+  }
6784+}
6785+std::vector<int8_t> ConvertTileFusion(PrimitivePtr primitive) {
6786+  if (primitive != nullptr) {
6787+    auto prim = static_cast<schema::Primitive *>(primitive);
6788+    auto value = prim->value_as_TileFusion();
6789+    if (value != nullptr) {
6790+      TileFusion tile_fusion{};
6791+      std::vector<int64_t> dims;
6792+      dims.reserve(kNumEight);
6793+      if (value->dims() == nullptr) {
6794+        dims = {0, 0, 0, 0, 0, 0, 0, 0};
6795+      } else {
6796+        dims = std::vector<int64_t>(value->dims()->begin(), value->dims()->end());
6797+      }
6798+      tile_fusion.dims = dims;
6799+      OHOS::MessageParcel data;
6800+      (void)TileFusionBlockMarshalling(data, tile_fusion);
6801+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6802+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6803+      return ret;
6804+    } else {
6805+      return {};
6806+    }
6807+  } else {
6808+    return {};
6809+  }
6810+}
6811+std::vector<int8_t> ConvertTopKFusion(PrimitivePtr primitive) {
6812+  if (primitive != nullptr) {
6813+    auto prim = static_cast<schema::Primitive *>(primitive);
6814+    auto value = prim->value_as_TopKFusion();
6815+    if (value != nullptr) {
6816+      TopKFusion top_k_fusion{};
6817+      top_k_fusion.sorted = value->sorted();
6818+      top_k_fusion.axis = value->axis();
6819+      OHOS::MessageParcel data;
6820+      (void)TopKFusionBlockMarshalling(data, top_k_fusion);
6821+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6822+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6823+      return ret;
6824+    } else {
6825+      return {};
6826+    }
6827+  } else {
6828+    return {};
6829+  }
6830+}
6831+std::vector<int8_t> ConvertTranspose(PrimitivePtr primitive) {
6832+  if (primitive != nullptr) {
6833+    auto prim = static_cast<schema::Primitive *>(primitive);
6834+    auto value = prim->value_as_Transpose();
6835+    if (value != nullptr) {
6836+      Transpose transpose{};
6837+      OHOS::MessageParcel data;
6838+      (void)TransposeBlockMarshalling(data, transpose);
6839+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6840+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6841+      return ret;
6842+    } else {
6843+      return {};
6844+    }
6845+  } else {
6846+    return {};
6847+  }
6848+}
6849+std::vector<int8_t> ConvertUnsqueeze(PrimitivePtr primitive) {
6850+  if (primitive != nullptr) {
6851+    auto prim = static_cast<schema::Primitive *>(primitive);
6852+    auto value = prim->value_as_Unsqueeze();
6853+    if (value != nullptr) {
6854+      Unsqueeze unsqueeze{};
6855+      std::vector<int64_t> axis;
6856+      axis.reserve(kNumEight);
6857+      if (value->axis() == nullptr) {
6858+        axis = {0, 0, 0, 0};
6859+      } else {
6860+        axis = std::vector<int64_t>(value->axis()->begin(), value->axis()->end());
6861+      }
6862+      unsqueeze.axis = axis;
6863+      OHOS::MessageParcel data;
6864+      (void)UnsqueezeBlockMarshalling(data, unsqueeze);
6865+      std::vector<int8_t> ret(reinterpret_cast<const int8_t *>(data.GetData()),
6866+                              reinterpret_cast<const int8_t *>(data.GetData()) + data.GetDataSize());
6867+      return ret;
6868+    } else {
6869+      return {};
6870+    }
6871+  } else {
6872+    return {};
6873+  }
6874+}
6875+
6876+std::vector<int8_t> Convert(NodeType type, PrimitivePtr primitive) {
6877+  switch (type) {
6878+    case NODE_TYPE_ACTIVATION:
6879+      return ConvertActivation(primitive);
6880+      break;
6881+    case NODE_TYPE_ADD_FUSION:
6882+      return ConvertAddFusion(primitive);
6883+      break;
6884+    case NODE_TYPE_ARGMAX_FUSION:
6885+      return ConvertArgMaxFusion(primitive);
6886+      break;
6887+    case NODE_TYPE_AVG_POOL_FUSION:
6888+      return ConvertAvgPoolFusion(primitive);
6889+      break;
6890+    case NODE_TYPE_BATCH_TO_SPACE_ND:
6891+      return ConvertBatchToSpaceND(primitive);
6892+      break;
6893+    case NODE_TYPE_BIAS_ADD:
6894+      return ConvertBiasAdd(primitive);
6895+      break;
6896+    case NODE_TYPE_CAST:
6897+      return ConvertCast(primitive);
6898+      break;
6899+    case NODE_TYPE_CONCAT:
6900+      return ConvertConcat(primitive);
6901+      break;
6902+    case NODE_TYPE_CONV2D_FUSION:
6903+      return ConvertConv2DFusion(primitive);
6904+      break;
6905+    case NODE_TYPE_CONV2D_TRANSPOSE_FUSION:
6906+      return ConvertConv2dTransposeFusion(primitive);
6907+      break;
6908+    case NODE_TYPE_DIV_FUSION:
6909+      return ConvertDivFusion(primitive);
6910+      break;
6911+    case NODE_TYPE_ELTWISE:
6912+      return ConvertEltwise(primitive);
6913+      break;
6914+    case NODE_TYPE_EXPAND_DIMS:
6915+      return ConvertExpandDims(primitive);
6916+      break;
6917+    case NODE_TYPE_FILL:
6918+      return ConvertFill(primitive);
6919+      break;
6920+    case NODE_TYPE_FULL_CONNECTION:
6921+      return ConvertFullConnection(primitive);
6922+      break;
6923+    case NODE_TYPE_FUSED_BATCH_NORM:
6924+      return ConvertFusedBatchNorm(primitive);
6925+      break;
6926+    case NODE_TYPE_GATHER:
6927+      return ConvertGather(primitive);
6928+      break;
6929+    case NODE_TYPE_LAYER_NORM_FUSION:
6930+      return ConvertLayerNormFusion(primitive);
6931+      break;
6932+    case NODE_TYPE_LESS_EQUAL:
6933+      return ConvertLessEqual(primitive);
6934+      break;
6935+    case NODE_TYPE_MATMUL_FUSION:
6936+      return ConvertMatMulFusion(primitive);
6937+      break;
6938+    case NODE_TYPE_MAXIMUM:
6939+      return ConvertMaximum(primitive);
6940+      break;
6941+    case NODE_TYPE_MAX_POOL_FUSION:
6942+      return ConvertMaxPoolFusion(primitive);
6943+      break;
6944+    case NODE_TYPE_MUL_FUSION:
6945+      return ConvertMulFusion(primitive);
6946+      break;
6947+    case NODE_TYPE_ONE_HOT:
6948+      return ConvertOneHot(primitive);
6949+      break;
6950+    case NODE_TYPE_PAD_FUSION:
6951+      return ConvertPadFusion(primitive);
6952+      break;
6953+    case NODE_TYPE_POW_FUSION:
6954+      return ConvertPowFusion(primitive);
6955+      break;
6956+    case NODE_TYPE_PRELU_FUSION:
6957+      return ConvertPReLUFusion(primitive);
6958+      break;
6959+    case NODE_TYPE_QUANT_DTYPE_CAST:
6960+      return ConvertQuantDTypeCast(primitive);
6961+      break;
6962+    case NODE_TYPE_REDUCE_FUSION:
6963+      return ConvertReduceFusion(primitive);
6964+      break;
6965+    case NODE_TYPE_RESHAPE:
6966+      return ConvertReshape(primitive);
6967+      break;
6968+    case NODE_TYPE_RESIZE:
6969+      return ConvertResize(primitive);
6970+      break;
6971+    case NODE_TYPE_RSQRT:
6972+      return ConvertRsqrt(primitive);
6973+      break;
6974+    case NODE_TYPE_SCALE_FUSION:
6975+      return ConvertScaleFusion(primitive);
6976+      break;
6977+    case NODE_TYPE_SHAPE:
6978+      return ConvertShape(primitive);
6979+      break;
6980+    case NODE_TYPE_SLICE_FUSION:
6981+      return ConvertSliceFusion(primitive);
6982+      break;
6983+    case NODE_TYPE_SOFTMAX:
6984+      return ConvertSoftmax(primitive);
6985+      break;
6986+    case NODE_TYPE_SPACE_TO_BATCH_ND:
6987+      return ConvertSpaceToBatchND(primitive);
6988+      break;
6989+    case NODE_TYPE_SPLIT:
6990+      return ConvertSplit(primitive);
6991+      break;
6992+    case NODE_TYPE_SQRT:
6993+      return ConvertSqrt(primitive);
6994+      break;
6995+    case NODE_TYPE_SQUARED_DIFFERENCE:
6996+      return ConvertSquaredDifference(primitive);
6997+      break;
6998+    case NODE_TYPE_SQUEEZE:
6999+      return ConvertSqueeze(primitive);
7000+      break;
7001+    case NODE_TYPE_STACK:
7002+      return ConvertStack(primitive);
7003+      break;
7004+    case NODE_TYPE_STRIDED_SLICE:
7005+      return ConvertStridedSlice(primitive);
7006+      break;
7007+    case NODE_TYPE_SUB_FUSION:
7008+      return ConvertSubFusion(primitive);
7009+      break;
7010+    case NODE_TYPE_TILE_FUSION:
7011+      return ConvertTileFusion(primitive);
7012+      break;
7013+    case NODE_TYPE_TOPK_FUSION:
7014+      return ConvertTopKFusion(primitive);
7015+      break;
7016+    case NODE_TYPE_TRANSPOSE:
7017+      return ConvertTranspose(primitive);
7018+      break;
7019+    case NODE_TYPE_UNSQUEEZE:
7020+      return ConvertUnsqueeze(primitive);
7021+      break;
7022+    default:
7023+      return {};
7024+  }
7025+}
7026+
7027+}  // namespace lite
7028+}  // namespace mindspore
7029\ No newline at end of file
7030diff --git a/mindspore/lite/mindir/src/mindir_tensor.cc b/mindspore/lite/mindir/src/mindir_tensor.cc
7031new file mode 100644
7032index 00000000..a62ec257
7033--- /dev/null
7034+++ b/mindspore/lite/mindir/src/mindir_tensor.cc
7035@@ -0,0 +1,389 @@
7036+/**
7037+ * Copyright 2021 Huawei Technologies Co., Ltd
7038+ *
7039+ * Licensed under the Apache License, Version 2.0 (the "License");
7040+ * you may not use this file except in compliance with the License.
7041+ * You may obtain a copy of the License at
7042+ *
7043+ * http://www.apache.org/licenses/LICENSE-2.0
7044+ *
7045+ * Unless required by applicable law or agreed to in writing, software
7046+ * distributed under the License is distributed on an "AS IS" BASIS,
7047+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
7048+ * See the License for the specific language governing permissions and
7049+ * limitations under the License.
7050+ */
7051+#include "src/common/log.h"
7052+#include "mindir.h"
7053+#include "utils.h"
7054+#include "mindir_memory_manager.h"
7055+#include "nnrt/v1_0/nnrt_types.h"
7056+
7057+using namespace OHOS::HDI::Nnrt::V1_0;
7058+
7059+namespace mindspore {
7060+namespace lite {
7061+// ********** Tensor **********
7062+TensorPtr MindIR_Tensor_Create() {
7063+  flatbuffers::FlatBufferBuilder fbb;
7064+  std::vector<int32_t> dims(1, 0);
7065+  std::vector<uint8_t> data(1, 0);
7066+  std::vector<QuantParam> quant_params(1, {0, 0, 8});
7067+  std::string name = " ";
7068+  auto ops_offset = schema::CreateTensor(fbb, 0, DataType::DATA_TYPE_INT32, 0, schema::Format::Format_NCHW, 0, 0, 0, 0,
7069+                                         0, fbb.CreateString(name.c_str(), name.size()));
7070+  fbb.Finish(ops_offset);
7071+  auto new_addr = MindIRMemoryManager::GetInstance()->CreateTensorFromBuilder(fbb, nullptr);
7072+  auto ret_value = flatbuffers::GetMutableRoot<schema::Tensor>(new_addr);
7073+  return ret_value;
7074+}
7075+
7076+TensorPtr MindIR_Tensor_Create(const std::string &name, DataType data_type, const std::vector<int32_t> &dims,
7077+                               Format format, const std::vector<uint8_t> &data,
7078+                               const std::vector<QuantParam> &quant_params) {
7079+  flatbuffers::FlatBufferBuilder fbb;
7080+
7081+  auto ops_offset =
7082+    schema::CreateTensor(fbb, 0, data_type, fbb.CreateVector(dims.data(), dims.size()),
7083+                         static_cast<schema::Format>(format), 0, 0, fbb.CreateVector(data.data(), data.size()),
7084+                         ConvertQuantParams(fbb, quant_params), 0, fbb.CreateString(name.c_str(), name.size()));
7085+  fbb.Finish(ops_offset);
7086+  auto new_addr = MindIRMemoryManager::GetInstance()->CreateTensorFromBuilder(fbb, nullptr);
7087+  auto ret_value = flatbuffers::GetMutableRoot<schema::Tensor>(new_addr);
7088+  return ret_value;
7089+}
7090+
7091+std::string MindIR_Tensor_GetName(ConstTensorPtr tensor) {
7092+  if (tensor != nullptr) {
7093+    auto value = static_cast<const schema::Tensor *>(tensor);
7094+    if (value != nullptr) {
7095+      return value->name()->str();
7096+    } else {
7097+      return "";
7098+    }
7099+  } else {
7100+    return "";
7101+  }
7102+}
7103+
7104+void MindIR_Tensor_SetName(TensorPtr *tensor, const std::string &name) {
7105+  if (tensor != nullptr && *tensor != nullptr) {
7106+    auto value = static_cast<schema::Tensor *>(*tensor);
7107+    if (value != nullptr) {
7108+      flatbuffers::FlatBufferBuilder fbb;
7109+      flatbuffers::Offset<flatbuffers::Vector<int32_t>> dims;
7110+      if (value->dims() == nullptr || value->dims()->size() <= 0) {
7111+        dims = 0;
7112+      } else {
7113+        dims = fbb.CreateVector(value->dims()->data(), value->dims()->size());
7114+      }
7115+      flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data;
7116+      if (value->data() == nullptr || value->data()->size() <= 0) {
7117+        data = 0;
7118+      } else {
7119+        data = fbb.CreateVector(value->data()->data(), value->data()->size());
7120+      }
7121+      auto ops_offset = schema::CreateTensor(
7122+        fbb, 0, value->dataType(), dims, static_cast<schema::Format>(value->format()), 0, 0, data,
7123+        ConvertQuantParams(fbb, value->quantParams()), 0, fbb.CreateString(name.c_str(), name.size()));
7124+      fbb.Finish(ops_offset);
7125+      auto new_addr = MindIRMemoryManager::GetInstance()->CreateTensorFromBuilder(fbb, value);
7126+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
7127+      *tensor = ret_value;
7128+    }
7129+  }
7130+}
7131+DataType MindIR_Tensor_GetDataType(ConstTensorPtr tensor) {
7132+  if (tensor != nullptr) {
7133+    auto value = static_cast<const schema::Tensor *>(tensor);
7134+    if (value != nullptr) {
7135+      return static_cast<DataType>(value->dataType());
7136+    } else {
7137+      DataType en = DATA_TYPE_INT32;
7138+      return en;
7139+    }
7140+  } else {
7141+    DataType en = DATA_TYPE_INT32;
7142+    return en;
7143+  }
7144+}
7145+
7146+void MindIR_Tensor_SetDataType(TensorPtr *tensor, DataType data_type) {
7147+  if (tensor != nullptr && *tensor != nullptr) {
7148+    auto value = static_cast<schema::Tensor *>(*tensor);
7149+    if (value != nullptr) {
7150+      flatbuffers::FlatBufferBuilder fbb;
7151+      flatbuffers::Offset<flatbuffers::Vector<int32_t>> dims;
7152+      if (value->dims() == nullptr || value->dims()->size() <= 0) {
7153+        dims = 0;
7154+      } else {
7155+        dims = fbb.CreateVector(value->dims()->data(), value->dims()->size());
7156+      }
7157+      flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data;
7158+      if (value->data() == nullptr || value->data()->size() <= 0) {
7159+        data = 0;
7160+      } else {
7161+        data = fbb.CreateVector(value->data()->data(), value->data()->size());
7162+      }
7163+      flatbuffers::Offset<flatbuffers::String> name;
7164+      if (value->name() == nullptr || value->name()->size() <= 0) {
7165+        name = 0;
7166+      } else {
7167+        name = fbb.CreateString(value->name()->c_str(), value->name()->size());
7168+      }
7169+      auto ops_offset =
7170+        schema::CreateTensor(fbb, 0, value->dataType(), dims, static_cast<schema::Format>(value->format()), 0, 0, data,
7171+                             ConvertQuantParams(fbb, value->quantParams()), 0, name);
7172+      fbb.Finish(ops_offset);
7173+      auto new_addr = MindIRMemoryManager::GetInstance()->CreateTensorFromBuilder(fbb, value);
7174+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
7175+      *tensor = ret_value;
7176+    }
7177+  }
7178+}
7179+
7180+std::vector<int32_t> MindIR_Tensor_GetDims(ConstTensorPtr tensor) {
7181+  if (tensor != nullptr) {
7182+    auto value = static_cast<const schema::Tensor *>(tensor);
7183+    if (value != nullptr) {
7184+      std::vector<int32_t> result;
7185+      auto src = value->dims();
7186+      if (src == nullptr) {
7187+        return {};
7188+      }
7189+      result.resize(src->size());
7190+      std::transform(src->begin(), src->end(), result.begin(), [](int32_t item) { return item; });
7191+      return result;
7192+    } else {
7193+      return {};
7194+    }
7195+  } else {
7196+    return {};
7197+  }
7198+}
7199+
7200+void MindIR_Tensor_SetDims(TensorPtr *tensor, const std::vector<int32_t> &dims) {
7201+  if (tensor != nullptr && *tensor != nullptr) {
7202+    auto value = static_cast<schema::Tensor *>(*tensor);
7203+    if (value != nullptr) {
7204+      flatbuffers::FlatBufferBuilder fbb;
7205+      flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data;
7206+      if (value->data() == nullptr || value->data()->size() <= 0) {
7207+        data = 0;
7208+      } else {
7209+        data = fbb.CreateVector(value->data()->data(), value->data()->size());
7210+      }
7211+      flatbuffers::Offset<flatbuffers::String> name;
7212+      if (value->name() == nullptr || value->name()->size() <= 0) {
7213+        name = 0;
7214+      } else {
7215+        name = fbb.CreateString(value->name()->c_str(), value->name()->size());
7216+      }
7217+      auto ops_offset = schema::CreateTensor(fbb, 0, value->dataType(), fbb.CreateVector(dims.data(), dims.size()),
7218+                                             static_cast<schema::Format>(value->format()), 0, 0, data,
7219+                                             ConvertQuantParams(fbb, value->quantParams()), 0, name);
7220+      fbb.Finish(ops_offset);
7221+      auto new_addr = MindIRMemoryManager::GetInstance()->CreateTensorFromBuilder(fbb, value);
7222+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
7223+      *tensor = ret_value;
7224+    }
7225+  }
7226+}
7227+Format MindIR_Tensor_GetFormat(ConstTensorPtr tensor) {
7228+  if (tensor != nullptr) {
7229+    auto value = static_cast<const schema::Tensor *>(tensor);
7230+    if (value != nullptr) {
7231+      return static_cast<Format>(value->format());
7232+    } else {
7233+      Format en = FORMAT_NCHW;
7234+      return en;
7235+    }
7236+  } else {
7237+    Format en = FORMAT_NCHW;
7238+    return en;
7239+  }
7240+}
7241+
7242+void MindIR_Tensor_SetFormat(TensorPtr *tensor, Format format) {
7243+  if (tensor != nullptr && *tensor != nullptr) {
7244+    auto value = static_cast<schema::Tensor *>(*tensor);
7245+    if (value != nullptr) {
7246+      flatbuffers::FlatBufferBuilder fbb;
7247+      flatbuffers::Offset<flatbuffers::Vector<int32_t>> dims;
7248+      if (value->dims() == nullptr || value->dims()->size() <= 0) {
7249+        dims = 0;
7250+      } else {
7251+        dims = fbb.CreateVector(value->dims()->data(), value->dims()->size());
7252+      }
7253+      flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data;
7254+      if (value->data() == nullptr || value->data()->size() <= 0) {
7255+        data = 0;
7256+      } else {
7257+        data = fbb.CreateVector(value->data()->data(), value->data()->size());
7258+      }
7259+      flatbuffers::Offset<flatbuffers::String> name;
7260+      if (value->name() == nullptr || value->name()->size() <= 0) {
7261+        name = 0;
7262+      } else {
7263+        name = fbb.CreateString(value->name()->c_str(), value->name()->size());
7264+      }
7265+      auto ops_offset = schema::CreateTensor(fbb, 0, value->dataType(), dims, static_cast<schema::Format>(format), 0, 0,
7266+                                             data, ConvertQuantParams(fbb, value->quantParams()), 0, name);
7267+      fbb.Finish(ops_offset);
7268+      auto new_addr = MindIRMemoryManager::GetInstance()->CreateTensorFromBuilder(fbb, value);
7269+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
7270+      *tensor = ret_value;
7271+    }
7272+  }
7273+}
7274+
7275+SharedBuffer MindIR_Tensor_GetData(ConstTensorPtr tensor, const SharedBuffer &buffer_templete, uint8_t *mmap_ptr,
7276+                                   unsigned int offset) {
7277+  if (tensor != nullptr) {
7278+    auto value = static_cast<const schema::Tensor *>(tensor);
7279+    if (value != nullptr) {
7280+      SharedBuffer result{};
7281+
7282+      if (value->data() == nullptr || value->data()->size() == 0) {
7283+        result.fd = -1;
7284+        result.bufferSize = buffer_templete.bufferSize;
7285+        result.offset = offset;
7286+        result.dataSize = 0;
7287+        return result;
7288+      }
7289+      if (mmap_ptr == nullptr) {
7290+        MS_LOG(ERROR) << "Tensor GetData failed, mmap pointer should not be nullptr";
7291+        return {-1, 0, offset, 0};
7292+      }
7293+      result.fd = buffer_templete.fd;
7294+      result.bufferSize = buffer_templete.bufferSize;
7295+      //      MS_LOG(ERROR) << "offset:" << offset << ",src->size():" << value->data()->size();
7296+      memcpy(mmap_ptr + offset, value->data()->data(), value->data()->size());
7297+      result.offset = offset;
7298+      result.dataSize = value->data()->size();
7299+      return result;
7300+    } else {
7301+      MS_LOG(WARNING) << "Tensor GetData failed, mmap pointer should not be nullptr";
7302+      return {-1, 0, offset, 0};
7303+    }
7304+  } else {
7305+    return {-1, 0, offset, 0};
7306+  }
7307+}
7308+
7309+std::vector<uint8_t> MindIR_Tensor_GetData(ConstTensorPtr tensor) {
7310+  if (tensor != nullptr) {
7311+    auto value = static_cast<const schema::Tensor *>(tensor);
7312+    if (value != nullptr) {
7313+      std::vector<uint8_t> result;
7314+      auto src = value->data();
7315+      if (src == nullptr) {
7316+        return {};
7317+      }
7318+      result.resize(src->size());
7319+      std::transform(src->begin(), src->end(), result.begin(), [](uint8_t item) { return item; });
7320+      return result;
7321+    } else {
7322+      return {};
7323+    }
7324+  } else {
7325+    return {};
7326+  }
7327+}
7328+
7329+void MindIR_Tensor_SetData(TensorPtr *tensor, const std::vector<uint8_t> &data) {
7330+  if (tensor != nullptr && *tensor != nullptr) {
7331+    auto value = static_cast<schema::Tensor *>(*tensor);
7332+    if (value != nullptr) {
7333+      flatbuffers::FlatBufferBuilder fbb;
7334+      flatbuffers::Offset<flatbuffers::Vector<int32_t>> dims;
7335+      if (value->dims() == nullptr || value->dims()->size() <= 0) {
7336+        dims = 0;
7337+      } else {
7338+        dims = fbb.CreateVector(value->dims()->data(), value->dims()->size());
7339+      }
7340+      flatbuffers::Offset<flatbuffers::String> name;
7341+      if (value->name() == nullptr || value->name()->size() <= 0) {
7342+        name = 0;
7343+      } else {
7344+        name = fbb.CreateString(value->name()->c_str(), value->name()->size());
7345+      }
7346+      auto ops_offset = schema::CreateTensor(
7347+        fbb, 0, value->dataType(), dims, static_cast<schema::Format>(value->format()), 0, 0,
7348+        fbb.CreateVector(data.data(), data.size()), ConvertQuantParams(fbb, value->quantParams()), 0, name);
7349+      fbb.Finish(ops_offset);
7350+      auto new_addr = MindIRMemoryManager::GetInstance()->CreateTensorFromBuilder(fbb, value);
7351+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
7352+      *tensor = ret_value;
7353+    }
7354+  }
7355+}
7356+std::vector<QuantParam> MindIR_Tensor_GetQuantParams(ConstTensorPtr tensor) {
7357+  if (tensor != nullptr) {
7358+    auto value = static_cast<const schema::Tensor *>(tensor);
7359+    if (value != nullptr) {
7360+      std::vector<QuantParam> result;
7361+      auto src = value->quantParams();
7362+      if (src == nullptr) {
7363+        return {};
7364+      }
7365+      size_t size = src->size();
7366+      result.reserve(src->size());
7367+      for (size_t i = 0; i < size; i++) {
7368+        auto tmp = src->Get(i);
7369+        QuantParam q{tmp->zeroPoint(), tmp->scale(), tmp->numBits()};
7370+        result.emplace_back(q);
7371+      }
7372+      return result;
7373+    } else {
7374+      return {};
7375+    }
7376+  } else {
7377+    return {};
7378+  }
7379+}
7380+
7381+void MindIR_Tensor_SetQuantParams(TensorPtr *tensor, const std::vector<QuantParam> &quant_params) {
7382+  if (tensor != nullptr && *tensor != nullptr) {
7383+    auto value = static_cast<schema::Tensor *>(*tensor);
7384+    if (value != nullptr) {
7385+      flatbuffers::FlatBufferBuilder fbb;
7386+      flatbuffers::Offset<flatbuffers::Vector<int32_t>> dims;
7387+      if (value->dims() == nullptr || value->dims()->size() <= 0) {
7388+        dims = 0;
7389+      } else {
7390+        dims = fbb.CreateVector(value->dims()->data(), value->dims()->size());
7391+      }
7392+      flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data;
7393+      if (value->data() == nullptr || value->data()->size() <= 0) {
7394+        data = 0;
7395+      } else {
7396+        data = fbb.CreateVector(value->data()->data(), value->data()->size());
7397+      }
7398+      flatbuffers::Offset<flatbuffers::String> name;
7399+      if (value->name() == nullptr || value->name()->size() <= 0) {
7400+        name = 0;
7401+      } else {
7402+        name = fbb.CreateString(value->name()->c_str(), value->name()->size());
7403+      }
7404+      auto ops_offset =
7405+        schema::CreateTensor(fbb, 0, value->dataType(), dims, static_cast<schema::Format>(value->format()), 0, 0, data,
7406+                             ConvertQuantParams(fbb, quant_params), 0, name);
7407+      fbb.Finish(ops_offset);
7408+      auto new_addr = MindIRMemoryManager::GetInstance()->CreateTensorFromBuilder(fbb, value);
7409+      auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
7410+      *tensor = ret_value;
7411+    }
7412+  }
7413+}
7414+
7415+void MindIR_Tensor_Destroy(TensorPtr *tensor) {
7416+  if (tensor != nullptr && *tensor != nullptr) {
7417+    auto schema = static_cast<schema::Tensor *>(*tensor);
7418+    MindIRMemoryManager::GetInstance()->DeleteTensor(schema);
7419+    *tensor = nullptr;
7420+  }
7421+  *tensor = nullptr;
7422+}
7423+}  // namespace lite
7424+}  // namespace mindspore
7425diff --git a/mindspore/lite/mindir/src/utils.cc b/mindspore/lite/mindir/src/utils.cc
7426new file mode 100644
7427index 00000000..ca5f7f4b
7428--- /dev/null
7429+++ b/mindspore/lite/mindir/src/utils.cc
7430@@ -0,0 +1,96 @@
7431+/**
7432+ * Copyright 2021 Huawei Technologies Co., Ltd
7433+ *
7434+ * Licensed under the Apache License, Version 2.0 (the "License");
7435+ * you may not use this file except in compliance with the License.
7436+ * You may obtain a copy of the License at
7437+ *
7438+ * http://www.apache.org/licenses/LICENSE-2.0
7439+ *
7440+ * Unless required by applicable law or agreed to in writing, software
7441+ * distributed under the License is distributed on an "AS IS" BASIS,
7442+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
7443+ * See the License for the specific language governing permissions and
7444+ * limitations under the License.
7445+ */
7446+#include "utils.h"
7447+#include "src/common/log.h"
7448+#include "mindir_memory_manager.h"
7449+namespace mindspore {
7450+namespace lite {
7451+
7452+// ********** PrimitiveBase **********
7453+NodeType MindIR_Primitive_GetType(PrimitivePtr primitive) {
7454+  auto prim = flatbuffers::GetMutableRoot<schema::Primitive>(primitive);
7455+  auto type = prim->value_type();
7456+  return static_cast<NodeType>(type);
7457+}
7458+
7459+void MindIR_Primitive_Destroy(PrimitivePtr *primitive) {
7460+  if (primitive != nullptr && *primitive != nullptr) {
7461+    auto schema = static_cast<schema::Primitive *>(*primitive);
7462+    MS_LOG(ERROR) << "schema=" << schema->value_type();
7463+    MindIRMemoryManager::GetInstance()->DeletePrimitive(schema);
7464+    *primitive = nullptr;
7465+  }
7466+}
7467+PrimitivePtr MindIR_CreatePrimitiveFromBuilder(flatbuffers::FlatBufferBuilder &fbb) {
7468+  auto buff = reinterpret_cast<uint8_t *>(malloc(fbb.GetSize()));
7469+  if (buff == nullptr) {
7470+    MS_LOG(ERROR) << "malloc memory for primitive failed!";
7471+    fbb.Clear();
7472+    return nullptr;
7473+  }
7474+  memcpy(buff, fbb.GetBufferPointer(), fbb.GetSize());
7475+  fbb.Clear();
7476+  return buff;
7477+}
7478+flatbuffers::Offset<schema::Vec2D> CreateVec2D(flatbuffers::FlatBufferBuilder &fbb,
7479+                                               const std::vector<std::vector<int64_t>> &data) {
7480+  std::vector<flatbuffers::Offset<schema::Vec>> vet2d;
7481+  vet2d.reserve(data.size());
7482+  for (const auto &data_one : data) {
7483+    vet2d.emplace_back(schema::CreateVec(fbb, fbb.CreateVector<int64_t>(data_one)));
7484+  }
7485+  flatbuffers::Offset<schema::Vec2D> v2d = schema::CreateVec2D(fbb, fbb.CreateVector(vet2d));
7486+  return v2d;
7487+}
7488+flatbuffers::Offset<schema::Vec2D> CreateVec2D(flatbuffers::FlatBufferBuilder &fbb,
7489+                                               const mindspore::schema::Vec2D *data) {
7490+  auto data_inner = data->data();
7491+  std::vector<flatbuffers::Offset<schema::Vec>> vet2d;
7492+  vet2d.reserve(data_inner->size());
7493+  for (const auto data_one : *data_inner) {
7494+    vet2d.emplace_back(schema::CreateVec(fbb, fbb.CreateVector(data_one->data()->data(), data_one->data()->size())));
7495+  }
7496+  flatbuffers::Offset<schema::Vec2D> v2d = schema::CreateVec2D(fbb, fbb.CreateVector(vet2d));
7497+  return v2d;
7498+}
7499+
7500+flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<schema::QuantParam>>> ConvertQuantParams(
7501+  flatbuffers::FlatBufferBuilder &fbb, const std::vector<QuantParam> &quant_params) {
7502+  std::vector<flatbuffers::Offset<mindspore::schema::QuantParam>> tmp_vec;
7503+  tmp_vec.reserve(quant_params.size());
7504+  for (auto q_param : quant_params) {
7505+    tmp_vec.emplace_back(schema::CreateQuantParam(fbb, q_param.scale, q_param.zeroPoint, 0, 0, true, q_param.numBits));
7506+  }
7507+  flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<schema::QuantParam>>> ret_quant_param =
7508+    fbb.CreateVector(tmp_vec.data(), tmp_vec.size());
7509+  return ret_quant_param;
7510+}
7511+
7512+flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<schema::QuantParam>>> ConvertQuantParams(
7513+  flatbuffers::FlatBufferBuilder &fbb,
7514+  const flatbuffers::Vector<flatbuffers::Offset<mindspore::schema::QuantParam>> *quant_params) {
7515+  std::vector<flatbuffers::Offset<mindspore::schema::QuantParam>> tmp_vec;
7516+  tmp_vec.reserve(quant_params->size());
7517+  for (auto q_param : *quant_params) {
7518+    tmp_vec.emplace_back(
7519+      schema::CreateQuantParam(fbb, q_param->scale(), q_param->zeroPoint(), 0, 0, true, q_param->numBits()));
7520+  }
7521+  flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<schema::QuantParam>>> ret_quant_param =
7522+    fbb.CreateVector(tmp_vec.data(), tmp_vec.size());
7523+  return ret_quant_param;
7524+}
7525+}  // namespace lite
7526+}  // namespace mindspore
7527\ No newline at end of file
7528diff --git a/mindspore/lite/mindir/tests/BUILD.gn b/mindspore/lite/mindir/tests/BUILD.gn
7529new file mode 100644
7530index 00000000..de1902fe
7531--- /dev/null
7532+++ b/mindspore/lite/mindir/tests/BUILD.gn
7533@@ -0,0 +1,35 @@
7534+# Copyright 2022 Huawei Technologies Co., Ltd
7535+#
7536+# Licensed under the Apache License, Version 2.0 (the "License");
7537+# you may not use this file except in compliance with the License.
7538+# You may obtain a copy of the License at
7539+#
7540+# http://www.apache.org/licenses/LICENSE-2.0
7541+#
7542+# Unless required by applicable law or agreed to in writing, software
7543+# distributed under the License is distributed on an "AS IS" BASIS,
7544+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
7545+# See the License for the specific language governing permissions and
7546+# limitations under the License.
7547+# ============================================================================
7548+
7549+import("//build/ohos.gni")
7550+
7551+ohos_executable("mindir_test") {
7552+  sources = [ "mindir_test.cc" ]
7553+
7554+  include_dirs = [
7555+    "./",
7556+    "../include",
7557+    "../include/inner",
7558+    "../../",
7559+    "//third_party/flatbuffers/include",
7560+  ]
7561+  remove_configs = [ "//build/config/compiler:no_rtti" ]
7562+
7563+  deps = [ "../:mindir" ]
7564+
7565+  output_name = "mindir_test"
7566+  install_enable = true
7567+  part_name = "mindspore"
7568+}
7569diff --git a/mindspore/lite/mindir/tests/mindir_test.cc b/mindspore/lite/mindir/tests/mindir_test.cc
7570new file mode 100644
7571index 00000000..09ef7090
7572--- /dev/null
7573+++ b/mindspore/lite/mindir/tests/mindir_test.cc
7574@@ -0,0 +1,51 @@
7575+#include "mindir.h"
7576+#include "mindir_memory_manager.h"
7577+#include <iostream>
7578+#include <string>
7579+using namespace mindspore::lite;
7580+int main() {
7581+  int loop = 0;
7582+  int all = 100;
7583+  while (loop < all) {
7584+    auto tensor = MindIR_Tensor_Create();
7585+    auto str = MindIR_Tensor_GetName(tensor);
7586+    DataType dataType = DATA_TYPE_UINT16;
7587+    MindIR_Tensor_SetDataType(&tensor, dataType);
7588+    std::cout << "set data type" << std::endl;
7589+    DataType d = MindIR_Tensor_GetDataType(tensor);
7590+    std::cout << "loop = " << loop << ",data type = " << (int)d << std::endl;
7591+    PrimitivePtr ret = nullptr;
7592+    ret = MindIR_SquaredDifference_CreatePrimitive();
7593+    std::cout << "MindIR_SquaredDifference_CreatePrimitive 1,PrimitivePtr = " << ret << std::endl;
7594+    ret = MindIR_SquaredDifference_CreatePrimitive();
7595+    std::cout << "MindIR_SquaredDifference_CreatePrimitive 1,PrimitivePtr = " << ret << std::endl;
7596+    ret = MindIR_SubFusion_CreatePrimitive(ACTIVATION_TYPE_RELU6);
7597+    std::cout << "MindIR_SubFusion_CreatePrimitive 2,PrimitivePtr = " << ret << std::endl;
7598+    ret = MindIR_Activation_CreatePrimitive(ACTIVATION_TYPE_RELU6, .5, 0, 1, true);
7599+    std::cout << "MindIR_Activation_CreatePrimitive 3,PrimitivePtr = " << ret << std::endl;
7600+    MindIR_Primitive_Destroy(&ret);
7601+    std::cout << "MindIR_Primitive_Destroy,PrimitivePtr = " << ret << std::endl;
7602+    TensorPtr t_ret = nullptr;
7603+    t_ret = MindIR_Tensor_Create();
7604+    std::cout << "MindIR_Tensor_Create 3,TensorPtr = " << t_ret << std::endl;
7605+    MindIR_Tensor_Destroy(&t_ret);
7606+    std::cout << "MindIR_Tensor_Destroy,Tensor = " << t_ret << std::endl;
7607+    ret = MindIR_SpaceToBatchND_CreatePrimitive({2, 2}, {{0}, {0}, {0}, {0}});
7608+    auto blockshape = MindIR_SpaceToBatchND_GetBlockShape(ret);
7609+    std::string bs_("");
7610+    for (int i = 0; i < 2; i++) {
7611+      bs_.append(std::to_string(blockshape[i]).c_str());
7612+    }
7613+    std::cout << "MindIR_SpaceToBatchND_GetBlockShape,blockshape = " << bs_ << std::endl;
7614+    auto paddings = MindIR_SpaceToBatchND_GetPaddings(ret);
7615+    std::string pad_("");
7616+    for (auto item : paddings) {
7617+      pad_.append(std::to_string(item[0]).c_str());
7618+    }
7619+    std::cout << "MindIR_SpaceToBatchND_GetPaddings,Paddings = " << pad_ << std::endl;
7620+    loop++;
7621+  }
7622+  MindIRMemoryManager::GetInstance()->ClearAllMemory();
7623+  std::cout << "MindIRMemoryManager::GetInstance()->ClearAllMemory()" << std::endl;
7624+  loop++;
7625+}
7626\ No newline at end of file
7627diff --git a/mindspore/lite/src/runtime/delegate/nnrt/CMakeLists.txt b/mindspore/lite/src/runtime/delegate/nnrt/CMakeLists.txt
7628new file mode 100644
7629index 00000000..70aa63f3
7630--- /dev/null
7631+++ b/mindspore/lite/src/runtime/delegate/nnrt/CMakeLists.txt
7632@@ -0,0 +1,30 @@
7633+include_directories(${DDK_PATH})
7634+include_directories($(CCSRC_DIR)/plugin/device/cpu/kernel)
7635+
7636+include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include)
7637+#include_directories(/home/tony/wty/workspace/ohos/third_party/mindspore/mindspore/lite/mindir/include/inner)
7638+#include_directories(/home/tony/wty/workspace/ohos/third_party/mindspore/mindspore/lite/mindir/include)
7639+file(GLOB_RECURSE NNRT_SRC
7640+        ${CMAKE_CURRENT_SOURCE_DIR}/*.cc
7641+)
7642+
7643+#add_library(hiai SHARED IMPORTED)
7644+#set_target_properties(hiai PROPERTIES IMPORTED_LOCATION
7645+#        ${DDK_LIB_PATH}/libhiai.so)
7646+#add_library(hiai_ir SHARED IMPORTED)
7647+#set_target_properties(hiai_ir PROPERTIES IMPORTED_LOCATION
7648+#        ${DDK_LIB_PATH}/libhiai_ir.so)
7649+#add_library(hiai_ir_build SHARED IMPORTED)
7650+#set_target_properties(hiai_ir_build PROPERTIES IMPORTED_LOCATION
7651+#        ${DDK_LIB_PATH}/libhiai_ir_build.so)
7652+#add_library(npu_kernel_mid OBJECT ${NPU_RUNTIME_SRC})
7653+#add_dependencies(npu_kernel_mid fbs_src)
7654+#target_link_libraries(
7655+#        npu_kernel_mid
7656+#        hiai
7657+#        hiai_ir
7658+#        hiai_ir_build
7659+#)
7660+
7661+file(GLOB convert_source checker/*.cc)
7662+add_library(nnr_mid OBJECT ${NNRT_SRC} ${convert_source} )
7663\ No newline at end of file
7664diff --git a/mindspore/lite/src/runtime/delegate/nnrt/checker/primitive_check.cc b/mindspore/lite/src/runtime/delegate/nnrt/checker/primitive_check.cc
7665new file mode 100644
7666index 00000000..a647796c
7667--- /dev/null
7668+++ b/mindspore/lite/src/runtime/delegate/nnrt/checker/primitive_check.cc
7669@@ -0,0 +1,187 @@
7670+#include <string>
7671+#include <vector>
7672+#include "primitive_check.h"
7673+#include "dtype/type_id.h"
7674+#include "src/runtime/weight_decoder.h"
7675+#include "src/common/log.h"
7676+#include "src/common/utils.h"
7677+namespace mindspore {
7678+namespace lite {
7679+
7680+Status CheckPrimitiveSupported(const schema::Primitive *primitive) {
7681+  if (primitive != nullptr) {
7682+    auto prim = primitive;
7683+    auto type = prim->value_type();
7684+    switch (type) {
7685+      case schema::PrimitiveType_Activation:
7686+        return mindspore::kSuccess;
7687+      case schema::PrimitiveType_AddFusion:
7688+        return mindspore::kSuccess;
7689+      case schema::PrimitiveType_ArgMaxFusion:
7690+        return mindspore::kSuccess;
7691+      case schema::PrimitiveType_AvgPoolFusion:
7692+        return mindspore::kSuccess;
7693+      case schema::PrimitiveType_BatchToSpaceND:
7694+        return mindspore::kSuccess;
7695+      case schema::PrimitiveType_BiasAdd:
7696+        return mindspore::kSuccess;
7697+      case schema::PrimitiveType_Cast:
7698+        return mindspore::kSuccess;
7699+      case schema::PrimitiveType_Concat:
7700+        return mindspore::kSuccess;
7701+      case schema::PrimitiveType_Conv2DFusion:
7702+        return mindspore::kSuccess;
7703+      case schema::PrimitiveType_Conv2dTransposeFusion:
7704+        return mindspore::kSuccess;
7705+      case schema::PrimitiveType_DivFusion:
7706+        return mindspore::kSuccess;
7707+      case schema::PrimitiveType_Eltwise:
7708+        return mindspore::kSuccess;
7709+      case schema::PrimitiveType_ExpandDims:
7710+        return mindspore::kSuccess;
7711+      case schema::PrimitiveType_Fill:
7712+        return mindspore::kSuccess;
7713+      case schema::PrimitiveType_FullConnection:
7714+        return mindspore::kSuccess;
7715+      case schema::PrimitiveType_FusedBatchNorm:
7716+        return mindspore::kSuccess;
7717+      case schema::PrimitiveType_Gather:
7718+        return mindspore::kSuccess;
7719+      case schema::PrimitiveType_LayerNormFusion:
7720+        return mindspore::kSuccess;
7721+      case schema::PrimitiveType_LessEqual:
7722+        return mindspore::kSuccess;
7723+      case schema::PrimitiveType_MatMulFusion:
7724+        return mindspore::kSuccess;
7725+      case schema::PrimitiveType_Maximum:
7726+        return mindspore::kSuccess;
7727+      case schema::PrimitiveType_MaxPoolFusion:
7728+        return mindspore::kSuccess;
7729+      case schema::PrimitiveType_MulFusion:
7730+        return mindspore::kSuccess;
7731+      case schema::PrimitiveType_OneHot:
7732+        return mindspore::kSuccess;
7733+      case schema::PrimitiveType_PadFusion:
7734+        return mindspore::kSuccess;
7735+      case schema::PrimitiveType_PowFusion:
7736+        return mindspore::kSuccess;
7737+      case schema::PrimitiveType_PReLUFusion:
7738+        return mindspore::kSuccess;
7739+      case schema::PrimitiveType_QuantDTypeCast:
7740+        return mindspore::kSuccess;
7741+      case schema::PrimitiveType_ReduceFusion:
7742+        return mindspore::kSuccess;
7743+      case schema::PrimitiveType_Reshape:
7744+        return mindspore::kSuccess;
7745+      case schema::PrimitiveType_Resize:
7746+        return mindspore::kSuccess;
7747+      case schema::PrimitiveType_Rsqrt:
7748+        return mindspore::kSuccess;
7749+      case schema::PrimitiveType_ScaleFusion:
7750+        return mindspore::kSuccess;
7751+      case schema::PrimitiveType_Shape:
7752+        return mindspore::kSuccess;
7753+      case schema::PrimitiveType_SliceFusion:
7754+        return mindspore::kSuccess;
7755+      case schema::PrimitiveType_Softmax:
7756+        return mindspore::kSuccess;
7757+      case schema::PrimitiveType_SpaceToBatchND:
7758+        return mindspore::kSuccess;
7759+      case schema::PrimitiveType_Split:
7760+        return mindspore::kSuccess;
7761+      case schema::PrimitiveType_Sqrt:
7762+        return mindspore::kSuccess;
7763+      case schema::PrimitiveType_SquaredDifference:
7764+        return mindspore::kSuccess;
7765+      case schema::PrimitiveType_Squeeze:
7766+        return mindspore::kSuccess;
7767+      case schema::PrimitiveType_Stack:
7768+        return mindspore::kSuccess;
7769+      case schema::PrimitiveType_StridedSlice:
7770+        return mindspore::kSuccess;
7771+      case schema::PrimitiveType_SubFusion:
7772+        return mindspore::kSuccess;
7773+      case schema::PrimitiveType_TileFusion:
7774+        return mindspore::kSuccess;
7775+      case schema::PrimitiveType_TopKFusion:
7776+        return mindspore::kSuccess;
7777+      case schema::PrimitiveType_Transpose:
7778+        return mindspore::kSuccess;
7779+      case schema::PrimitiveType_Unsqueeze:
7780+        return mindspore::kSuccess;
7781+      default: {
7782+        MS_LOG(WARNING) << "No primitive type :" << (int)(type);
7783+        return mindspore::kLiteSuccessExit;
7784+      }
7785+    }
7786+    return mindspore::kSuccess;
7787+  } else {
7788+    MS_LOG(ERROR) << "primitive is nullptr.";
7789+    return mindspore::kLiteError;
7790+  }
7791+}
7792+namespace {
7793+bool NeedBitUppackCheck(const schema::Tensor &src_tensor) {
7794+  if (src_tensor.enableHuffmanCode()) {
7795+    return true;
7796+  }
7797+  bool need_bit_unpack = src_tensor.quantParams() != nullptr && src_tensor.quantParams()->size() > 0 &&
7798+                         src_tensor.quantParams()->Get(0) != nullptr;
7799+  if (need_bit_unpack) {
7800+    auto num_bits = src_tensor.quantParams()->Get(0)->numBits();
7801+    need_bit_unpack = ((num_bits >= kBitNum1 && num_bits < kBitNum8) || (num_bits > kBitNum8 && num_bits < kBitNum16));
7802+  }
7803+
7804+  return need_bit_unpack;
7805+}
7806+int DecompressTensor(const schema::Tensor &src_tensor) {
7807+  if (src_tensor.weightQunatCompressType() == schema::WeightQunatCompressType_FSE ||
7808+      src_tensor.weightQunatCompressType() == schema::WeightQunatCompressType_INDEXING ||
7809+      src_tensor.weightQunatCompressType() == schema::WeightQunatCompressType_SPARSE) {
7810+    return RET_NOT_SUPPORT;
7811+  }
7812+  if (!NeedBitUppackCheck(src_tensor)) {
7813+    return RET_NO_CHANGE;
7814+  }
7815+  MS_LOG(ERROR) << "DecompressTensor Error.";
7816+  return RET_ERROR;
7817+}
7818+}  // namespace
7819+
7820+Status CheckTensorSupported(const schema::Tensor *primitive) {
7821+  if (primitive == nullptr) {
7822+    MS_LOG(ERROR) << "primitive is nullptr, which type is Tensor.";
7823+    return mindspore::kLiteSuccessExit;
7824+  }
7825+
7826+  int32_t data_type = primitive->dataType();
7827+  if (data_type <= kTypeUnknown || data_type >= kMonadTypeEnd) {
7828+    MS_LOG(ERROR) << "invalid data type. " << data_type;
7829+    return mindspore::kLiteSuccessExit;
7830+  }
7831+
7832+  if (primitive->dims() == nullptr) {
7833+    MS_LOG(DEBUG) << "Dims of tensor is nullptr";
7834+  }
7835+
7836+  if (data_type == kObjectTypeTensorType) {
7837+    MS_LOG(ERROR) << "Not support TensorList.";
7838+    return mindspore::kLiteNotSupport;
7839+  }
7840+
7841+  if (primitive->data() == nullptr || primitive->data()->size() <= 0) {
7842+    MS_LOG(DEBUG) << "No valid data converted.";
7843+    return mindspore::kSuccess;
7844+  } else {
7845+    auto ret = DecompressTensor(*primitive);
7846+    if (ret == RET_NO_CHANGE) {
7847+    } else {
7848+      MS_LOG(ERROR) << "Not support Decompress Tensor.";
7849+      return mindspore::kLiteNotSupport;
7850+    }
7851+  }
7852+  return mindspore::kSuccess;
7853+  ;
7854+}
7855+}  // namespace lite
7856+}  // namespace mindspore
7857diff --git a/mindspore/lite/src/runtime/delegate/nnrt/checker/primitive_check.h b/mindspore/lite/src/runtime/delegate/nnrt/checker/primitive_check.h
7858new file mode 100644
7859index 00000000..dbdd812c
7860--- /dev/null
7861+++ b/mindspore/lite/src/runtime/delegate/nnrt/checker/primitive_check.h
7862@@ -0,0 +1,12 @@
7863+#ifndef OHOS_HDI_NNRT_V1_0_CPP_H
7864+#define OHOS_HDI_NNRT_V1_0_CPP_H
7865+#include "schema/model_generated.h"
7866+#include "include/api/status.h"
7867+namespace mindspore {
7868+namespace lite {
7869+Status CheckPrimitiveSupported(const schema::Primitive *primitive);
7870+Status CheckTensorSupported(const schema::Tensor *primitive);
7871+}  // namespace lite
7872+}  // namespace mindspore
7873+
7874+#endif  // OHOS_HDI_NNRT_V1_0_CPP_H
7875\ No newline at end of file
7876diff --git a/mindspore/lite/src/runtime/delegate/nnrt/nnrt_delegate.cc b/mindspore/lite/src/runtime/delegate/nnrt/nnrt_delegate.cc
7877new file mode 100644
7878index 00000000..34897331
7879--- /dev/null
7880+++ b/mindspore/lite/src/runtime/delegate/nnrt/nnrt_delegate.cc
7881@@ -0,0 +1,360 @@
7882+/**
7883+ * Copyright 2022 Huawei Technologies Co., Ltd
7884+ *
7885+ * Licensed under the Apache License, Version 2.0 (the "License");
7886+ * you may not use this file except in compliance with the License.
7887+ * You may obtain a copy of the License at
7888+ *
7889+ * http://www.apache.org/licenses/LICENSE-2.0
7890+ *
7891+ * Unless required by applicable law or agreed to in writing, software
7892+ * distributed under the License is distributed on an "AS IS" BASIS,
7893+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
7894+ * See the License for the specific language governing permissions and
7895+ * limitations under the License.
7896+ */
7897+#include "nnrt_delegate.h"
7898+#include "checker/primitive_check.h"
7899+#include "src/common/log_adapter.h"
7900+#include "interfaces/kits/c/neural_network_runtime.h"
7901+#include "interfaces/innerkits/c/neural_network_runtime_inner.h"
7902+#include "nnrt_model_kernel.h"
7903+
7904+mindspore::Status mindspore::NNRTDelegate::Build(DelegateModel<schema::Primitive> *model) {
7905+  if (this->nnrt_lite_graph == nullptr) {
7906+    MS_LOG(ERROR) << "nnrt_lite_graph is nullptr.";
7907+    return mindspore::kLiteError;
7908+  }
7909+  if (this->nnrt_lite_graph->sub_graphs_.empty()) {
7910+    // must have at lease one subgraph
7911+    MS_LOG(ERROR) << "must have at lease one subgraph";
7912+    return mindspore::kLiteError;
7913+  }
7914+  OH_NN_ReturnCode ret_code;
7915+  OH_NNModel *oh_nnmodel = OH_NNModel_Construct();
7916+  if (oh_nnmodel == nullptr) {
7917+    MS_LOG(ERROR) << "Construct NNModel failed, oh_nnmodel is nullptr.";
7918+    return mindspore::kLiteError;
7919+  }
7920+
7921+  ret_code = OH_NNModel_BuildFromLiteGraph(oh_nnmodel, this->nnrt_lite_graph);
7922+  if (ret_code != OH_NN_SUCCESS) {
7923+    MS_LOG(ERROR) << "Build NNModel failed, OH_NN_ReturnCode = " << ret_code;
7924+    OH_NNModel_Destroy(&oh_nnmodel);
7925+    return mindspore::kLiteError;
7926+  }
7927+  MS_LOG(INFO) << "NNRTDelegate creates NNModel success.";
7928+
7929+  OH_NNCompilation *oh_nn_compilation = nullptr;
7930+  oh_nn_compilation = OH_NNCompilation_Construct(oh_nnmodel);
7931+
7932+  if (oh_nn_compilation == nullptr) {
7933+    MS_LOG(ERROR) << "Construct NNCompilation failed";
7934+    OH_NNModel_Destroy(&oh_nnmodel);
7935+    return mindspore::kLiteError;
7936+  }
7937+  MS_LOG(INFO) << "NNRTDelegate creates NNCompilation success.";
7938+
7939+  const size_t *allDevicesID = nullptr;
7940+  uint32_t device_count = 0;
7941+  ret_code = OH_NNDevice_GetAllDevicesID(&allDevicesID, &device_count);
7942+  if (ret_code != OH_NN_SUCCESS) {
7943+    MS_LOG(ERROR) << "NNModel GetAllDevicesID failed, OH_NN_ReturnCode = " << ret_code;
7944+    OH_NNCompilation_Destroy(&oh_nn_compilation);
7945+    OH_NNModel_Destroy(&oh_nnmodel);
7946+    return mindspore::kLiteError;
7947+  }
7948+
7949+  if (device_count <= 0) {
7950+    MS_LOG(WARNING) << "No NNRt Device found, fall back to CPU. ";
7951+    // OH_NNCompilation_Destroy(&oh_nn_compilation);
7952+    // OH_NNModel_Destroy(&oh_nnmodel);
7953+    return mindspore::kSuccess;
7954+  }
7955+  MS_LOG(INFO) << "NNRTDelegate GetAllDevicesID success.";
7956+
7957+  // check if model ops are supported
7958+  const bool *issupported = nullptr;
7959+  uint32_t op_count = 0;
7960+  ret_code = OH_NNModel_GetAvailableOperations(oh_nnmodel, allDevicesID[0], &issupported, &op_count);
7961+  if (ret_code != OH_NN_SUCCESS) {
7962+    MS_LOG(ERROR) << "NNModel GetAvailableOperations failed, OH_NN_ReturnCode = " << ret_code
7963+                  << ", maybe due to dataParcel data length limitaion. Fall back to CPU.";
7964+    OH_NNCompilation_Destroy(&oh_nn_compilation);
7965+    OH_NNModel_Destroy(&oh_nnmodel);
7966+    return mindspore::kSuccess;
7967+  }
7968+  uint32_t supported_op_count = 0;
7969+  for (uint32_t i = 0; i < op_count; i++) {
7970+    if (issupported[i]) {
7971+      supported_op_count++;
7972+    }
7973+  }
7974+  if (op_count != supported_op_count) {
7975+    MS_LOG(WARNING) << "this model has " << op_count << "ops, but NNRT only support " << supported_op_count
7976+                    << " ops, fall back to CPU.";
7977+    // must support all op, else fall back to CPU
7978+    OH_NNCompilation_Destroy(&oh_nn_compilation);
7979+    OH_NNModel_Destroy(&oh_nnmodel);
7980+    return mindspore::kSuccess;
7981+  }
7982+  MS_LOG(INFO) << "NNRtDelegate supports all op in this model.";
7983+
7984+  ret_code = OH_NNCompilation_SetDevice(oh_nn_compilation, allDevicesID[0]);
7985+
7986+  if (ret_code != OH_NN_SUCCESS) {
7987+    MS_LOG(ERROR) << "NNCompilation SetDevice failed, OH_NN_ReturnCode = " << ret_code;
7988+    OH_NNCompilation_Destroy(&oh_nn_compilation);
7989+    OH_NNModel_Destroy(&oh_nnmodel);
7990+    return mindspore::kLiteError;
7991+  }
7992+
7993+  ret_code = OH_NNCompilation_Build(oh_nn_compilation);
7994+
7995+  if (ret_code != OH_NN_SUCCESS) {
7996+    MS_LOG(ERROR) << "Build NNCompilation failed, OH_NN_ReturnCode = " << ret_code;
7997+    OH_NNCompilation_Destroy(&oh_nn_compilation);
7998+    OH_NNModel_Destroy(&oh_nnmodel);
7999+    return mindspore::kLiteError;
8000+  }
8001+
8002+  MS_LOG(DEBUG) << "NNRTDelegate SetDevice success.";
8003+
8004+  OH_NNExecutor *oh_nn_executor = nullptr;
8005+  oh_nn_executor = OH_NNExecutor_Construct(oh_nn_compilation);
8006+  if (oh_nn_executor == nullptr) {
8007+    MS_LOG(ERROR) << "Construct NNCompilation SetDevice failed, OH_NN_ReturnCode = " << ret_code;
8008+    OH_NNCompilation_Destroy(&oh_nn_compilation);
8009+    OH_NNModel_Destroy(&oh_nnmodel);
8010+    return mindspore::kLiteError;
8011+  }
8012+  MS_LOG(DEBUG) << "NNRTDelegate creates NNExecutor success.";
8013+  mindspore::Status prepare_data_ret;
8014+  auto nnr_model_kernel = new (std::nothrow) NNRTModelKernel(oh_nn_executor, model->inputs(), model->outputs());
8015+  if (nnr_model_kernel == nullptr) {
8016+    MS_LOG(ERROR) << "new NNRTModelKernel failed";
8017+    return mindspore::kLiteError;
8018+  }
8019+  OH_NNCompilation_Destroy(&oh_nn_compilation);
8020+  OH_NNModel_Destroy(&oh_nnmodel);
8021+  KernelIter from = model->BeginKernelIterator();
8022+  KernelIter end = model->EndKernelIterator();
8023+  model->Replace(from, end, nnr_model_kernel);
8024+
8025+  MS_LOG(INFO) << "NNRTDelegate build  success.";
8026+  return mindspore::kSuccess;
8027+}
8028+
8029+mindspore::Status mindspore::NNRTDelegate::Init() {
8030+  MS_LOG(DEBUG) << "NNRTDelegate init success.";
8031+  return mindspore::kSuccess;
8032+}
8033+mindspore::Status mindspore::NNRTDelegate::PrepareInputs(DelegateModel<schema::Primitive> *model,
8034+                                                         OH_NNExecutor *oh_nn_executor) {
8035+  auto input_tensors = model->inputs();
8036+  for (size_t i = 0; i < input_tensors.size(); i++) {
8037+    auto tensor = input_tensors[i];
8038+    auto tensor_shape = tensor.Shape();
8039+    auto tmp_quant_param = tensor.QuantParams();
8040+    OH_NN_QuantParam *quant_param = nullptr;
8041+    std::vector<uint32_t> bit_num;
8042+    std::vector<double> scale;
8043+    std::vector<int32_t> zero_point;
8044+    if (!tmp_quant_param.empty()) {
8045+      quant_param = new (std::nothrow) OH_NN_QuantParam;
8046+      if (quant_param == nullptr) {
8047+        MS_LOG(ERROR) << "new OH_NN_QuantParam failed.";
8048+        return mindspore::kLiteError;
8049+      }
8050+      for (auto qparam : tmp_quant_param) {
8051+        bit_num.emplace_back(qparam.bit_num);
8052+        scale.emplace_back(qparam.scale);
8053+        zero_point.emplace_back(qparam.zero_point);
8054+      }
8055+      quant_param->quantCount = tmp_quant_param.size();
8056+      quant_param->numBits = bit_num.data();
8057+      quant_param->scale = scale.data();
8058+      quant_param->zeroPoint = zero_point.data();
8059+    }
8060+    auto oprend = new (std::nothrow) OH_NN_Tensor;
8061+    if (oprend == nullptr) {
8062+      MS_LOG(ERROR) << "new OH_NN_Tensor Failed";
8063+      return mindspore::kLiteError;
8064+    }
8065+    oprend->dataType = ConvertDataType(tensor.DataType());
8066+    oprend->dimensionCount = tensor_shape.size();
8067+
8068+    std::vector<int32_t> dimensions_list;
8069+    for (auto shape : tensor_shape) {
8070+      if (shape < INT32_MAX) {
8071+        dimensions_list.emplace_back(static_cast<int32_t>(shape));
8072+      } else {
8073+        MS_LOG(ERROR) << "NNExecutor SetInput failed,tensor dimension is is too large, max dim = " << INT32_MAX
8074+                      << ", but get dimension = " << shape;
8075+        return mindspore::kLiteError;
8076+      }
8077+    }
8078+    oprend->dimensions = dimensions_list.data();
8079+    oprend->quantParam = quant_param;
8080+    oprend->type = OH_NN_TENSOR;
8081+    OH_NN_ReturnCode ret_code =
8082+      OH_NNExecutor_SetInput(oh_nn_executor, i, oprend, tensor.MutableData(), tensor.DataSize());
8083+    delete (oprend);
8084+
8085+    if (!tmp_quant_param.empty()) {
8086+      delete (quant_param);
8087+      quant_param = nullptr;
8088+    }
8089+
8090+    if (ret_code != OH_NN_SUCCESS) {
8091+      MS_LOG(ERROR) << "NNExecutor SetInput failed, current input tensor is" << tensor.Name()
8092+                    << "OH_NN_ReturnCode = " << ret_code;
8093+      return mindspore::kLiteError;
8094+    }
8095+  }
8096+
8097+  return mindspore::kSuccess;
8098+}
8099+OH_NN_DataType mindspore::NNRTDelegate::ConvertDataType(mindspore::DataType data_type) {
8100+  OH_NN_DataType oh_data_type;
8101+  switch (data_type) {
8102+    case mindspore::DataType::kTypeUnknown:
8103+    case mindspore::DataType::kObjectTypeString:
8104+    case mindspore::DataType::kObjectTypeList:
8105+    case mindspore::DataType::kObjectTypeTuple:
8106+    case mindspore::DataType::kObjectTypeTensorType:
8107+    case mindspore::DataType::kNumberTypeBegin:
8108+    case mindspore::DataType::kNumberTypeEnd:
8109+    case mindspore::DataType::kInvalidType:
8110+      oh_data_type = OH_NN_UNKNOWN;
8111+      break;
8112+    case mindspore::DataType::kNumberTypeBool:
8113+      oh_data_type = OH_NN_BOOL;
8114+      break;
8115+    case mindspore::DataType::kNumberTypeInt8:
8116+      oh_data_type = OH_NN_INT8;
8117+      break;
8118+    case mindspore::DataType::kNumberTypeInt16:
8119+      oh_data_type = OH_NN_INT16;
8120+      break;
8121+    case mindspore::DataType::kNumberTypeInt32:
8122+      oh_data_type = OH_NN_INT32;
8123+      break;
8124+    case mindspore::DataType::kNumberTypeInt64:
8125+      oh_data_type = OH_NN_INT64;
8126+      break;
8127+    case mindspore::DataType::kNumberTypeUInt8:
8128+      oh_data_type = OH_NN_UINT8;
8129+      break;
8130+    case mindspore::DataType::kNumberTypeUInt16:
8131+      oh_data_type = OH_NN_UINT16;
8132+      break;
8133+    case mindspore::DataType::kNumberTypeUInt32:
8134+      oh_data_type = OH_NN_UINT32;
8135+      break;
8136+    case mindspore::DataType::kNumberTypeUInt64:
8137+      oh_data_type = OH_NN_UINT64;
8138+      break;
8139+    case mindspore::DataType::kNumberTypeFloat16:
8140+      oh_data_type = OH_NN_FLOAT16;
8141+      break;
8142+    case mindspore::DataType::kNumberTypeFloat32:
8143+      oh_data_type = OH_NN_FLOAT32;
8144+      break;
8145+    case mindspore::DataType::kNumberTypeFloat64:
8146+      oh_data_type = OH_NN_FLOAT64;
8147+      break;
8148+    default: {
8149+      oh_data_type = OH_NN_UNKNOWN;
8150+    }
8151+  }
8152+  return oh_data_type;
8153+}
8154+
8155+mindspore::Status mindspore::NNRTDelegate::PrepareOutputs(DelegateModel<schema::Primitive> *model,
8156+                                                          OH_NNExecutor *oh_nn_executor) {
8157+  auto output_tensors = model->outputs();
8158+  for (size_t i = 0; i < output_tensors.size(); i++) {
8159+    auto tensor = output_tensors[i];
8160+    OH_NN_ReturnCode ret_code = OH_NNExecutor_SetOutput(oh_nn_executor, i, tensor.MutableData(), tensor.DataSize());
8161+    if (ret_code != OH_NN_SUCCESS) {
8162+      MS_LOG(ERROR) << "NNExecutor SetOutput failed, current out tensor is" << tensor.Name()
8163+                    << ", OH_NN_ReturnCode = " << ret_code;
8164+      return mindspore::kLiteError;
8165+    }
8166+  }
8167+  return mindspore::kSuccess;
8168+}
8169+
8170+void mindspore::NNRTDelegate::ShallowCopyLiteGraph(const mindspore::lite::LiteGraph &lite_graph) {
8171+  Status ret;
8172+  for (auto node : lite_graph.all_nodes_) {
8173+    ret = lite::CheckPrimitiveSupported(static_cast<const schema::Primitive *>(node->primitive_));
8174+    if (ret == mindspore::kLiteError) {
8175+      MS_LOG(ERROR) << " primitive supported check failed.";
8176+      return;
8177+    }
8178+  }
8179+  std::vector<LiteGraph::Node *> node_list;
8180+  node_list.reserve(lite_graph.all_nodes_.size());
8181+  // copy node
8182+  for (auto node : lite_graph.all_nodes_) {
8183+    auto new_node = new (std::nothrow) LiteGraph::Node;
8184+    if (new_node == nullptr) {
8185+      MS_LOG(ERROR) << " new LiteGraph::Node failed.";
8186+      return;
8187+    }
8188+    new_node->name_ = node->name_;
8189+    new_node->op_type_ = node->op_type_;
8190+    new_node->node_type_ = node->node_type_;
8191+    new_node->primitive_ = node->primitive_;
8192+    new_node->base_operator_ = node->base_operator_;
8193+    new_node->input_indices_ = node->input_indices_;
8194+    new_node->output_indices_ = node->output_indices_;
8195+    new_node->quant_type_ = node->quant_type_;
8196+    new_node->device_type_ = node->device_type_;
8197+    node_list.emplace_back(new_node);
8198+  }
8199+  // copy subgraph
8200+  std::vector<LiteGraph::SubGraph *> subgraph_list;
8201+  for (auto subgraph : lite_graph.sub_graphs_) {
8202+    auto new_subgraph = new (std::nothrow) LiteGraph::SubGraph;
8203+    if (new_subgraph == nullptr) {
8204+      MS_LOG(ERROR) << "new LiteGraph::Subgraph failed.";
8205+      return;
8206+    }
8207+    new_subgraph->name_ = subgraph->name_;
8208+    new_subgraph->input_indices_ = subgraph->input_indices_;
8209+    new_subgraph->output_indices_ = subgraph->output_indices_;
8210+    new_subgraph->node_indices_ = subgraph->node_indices_;
8211+    subgraph_list.emplace_back(new_subgraph);
8212+  }
8213+  for (auto tensor : lite_graph.all_tensors_) {
8214+    ret = lite::CheckTensorSupported(static_cast<const schema::Tensor *>(tensor));
8215+    if (ret == mindspore::kLiteError) {
8216+      MS_LOG(ERROR) << "tensor supported check failed.";
8217+      return;
8218+    }
8219+  }
8220+
8221+  nnrt_lite_graph = new (std::nothrow) lite::LiteGraph();
8222+  if (nnrt_lite_graph == nullptr) {
8223+    MS_LOG(ERROR) << "new LiteGraph failed.";
8224+    return;
8225+  }
8226+
8227+  nnrt_lite_graph->name_ = lite_graph.name_;
8228+  nnrt_lite_graph->version_ = lite_graph.version_;
8229+  nnrt_lite_graph->input_indices_ = lite_graph.input_indices_;
8230+  nnrt_lite_graph->output_indices_ = lite_graph.output_indices_;
8231+  nnrt_lite_graph->all_tensors_ = lite_graph.all_tensors_;
8232+  nnrt_lite_graph->all_nodes_ = node_list;
8233+  nnrt_lite_graph->sub_graphs_ = subgraph_list;
8234+  MS_LOG(INFO) << "ShallowCopyLiteGraph success.";
8235+}
8236+
8237+mindspore::NNRTDelegate::~NNRTDelegate() {
8238+  if (this->nnrt_lite_graph != nullptr) {
8239+    MS_LOG(ERROR) << "Delete NNRTDelegate.";
8240+  }
8241+};
8242diff --git a/mindspore/lite/src/runtime/delegate/nnrt/nnrt_delegate.h b/mindspore/lite/src/runtime/delegate/nnrt/nnrt_delegate.h
8243new file mode 100644
8244index 00000000..1be08119
8245--- /dev/null
8246+++ b/mindspore/lite/src/runtime/delegate/nnrt/nnrt_delegate.h
8247@@ -0,0 +1,52 @@
8248+/**
8249+ * Copyright 2022 Huawei Technologies Co., Ltd
8250+ *
8251+ * Licensed under the Apache License, Version 2.0 (the "License");
8252+ * you may not use this file except in compliance with the License.
8253+ * You may obtain a copy of the License at
8254+ *
8255+ * http://www.apache.org/licenses/LICENSE-2.0
8256+ *
8257+ * Unless required by applicable law or agreed to in writing, software
8258+ * distributed under the License is distributed on an "AS IS" BASIS,
8259+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
8260+ * See the License for the specific language governing permissions and
8261+ * limitations under the License.
8262+ */
8263+#ifndef MINDSPORE_NNR_DELEGATE_H
8264+#define MINDSPORE_NNR_DELEGATE_H
8265+#include <vector>
8266+#include <map>
8267+#include "include/api/delegate.h"
8268+#include "include/context.h"
8269+#include "include/model.h"
8270+#include "interfaces/kits/c/neural_network_runtime_type.h"
8271+namespace mindspore {
8272+
8273+using namespace lite;
8274+
8275+class NNRTDelegate : public Delegate {
8276+ public:
8277+  NNRTDelegate() : Delegate(){};
8278+
8279+  ~NNRTDelegate() override;
8280+
8281+  Status Init() override;
8282+
8283+  Status Build(DelegateModel<schema::Primitive> *model) override;
8284+
8285+  void ShallowCopyLiteGraph(const lite::LiteGraph &liteGraph);
8286+
8287+ protected:
8288+  LiteGraph *nnrt_lite_graph = nullptr;
8289+
8290+ private:
8291+  //  static LiteGraph* CreateLiteGraph(const LiteGraph &liteGraph);
8292+  Status PrepareInputs(DelegateModel<schema::Primitive> *model, OH_NNExecutor *oh_nn_executor);
8293+  Status PrepareOutputs(DelegateModel<schema::Primitive> *model, OH_NNExecutor *oh_nn_executor);
8294+  OH_NN_DataType ConvertDataType(mindspore::DataType data_type);
8295+};
8296+
8297+}  // namespace mindspore
8298+
8299+#endif  // MINDSPORE_NNR_DELEGATE_H
8300diff --git a/mindspore/lite/src/runtime/delegate/nnrt/nnrt_model_kernel.cc b/mindspore/lite/src/runtime/delegate/nnrt/nnrt_model_kernel.cc
8301new file mode 100644
8302index 00000000..5acf2e9a
8303--- /dev/null
8304+++ b/mindspore/lite/src/runtime/delegate/nnrt/nnrt_model_kernel.cc
8305@@ -0,0 +1,175 @@
8306+/**
8307+ * Copyright 2022 Huawei Technologies Co., Ltd
8308+ *
8309+ * Licensed under the Apache License, Version 2.0 (the "License");
8310+ * you may not use this file except in compliance with the License.
8311+ * You may obtain a copy of the License at
8312+ *
8313+ * http://www.apache.org/licenses/LICENSE-2.0
8314+ *
8315+ * Unless required by applicable law or agreed to in writing, software
8316+ * distributed under the License is distributed on an "AS IS" BASIS,
8317+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
8318+ * See the License for the specific language governing permissions and
8319+ * limitations under the License.
8320+ */
8321+#include <include/errorcode.h>
8322+#include "nnrt_model_kernel.h"
8323+int mindspore::NNRTModelKernel::Prepare() { return 0; }
8324+int mindspore::NNRTModelKernel::Execute() {
8325+  lite::STATUS ret_val = PrepareInputs();
8326+  if (ret_val != lite::RET_OK) {
8327+    MS_LOG(ERROR) << "NNRTModelKernel PrepareInputs failed, STATUS is " << ret_val;
8328+    return ret_val;
8329+  }
8330+  ret_val = TransferOutputs();
8331+  if (ret_val != lite::RET_OK) {
8332+    MS_LOG(ERROR) << "NNRTModelKernel TransferOutputs failed, STATUS is " << ret_val;
8333+    return ret_val;
8334+  }
8335+  MS_LOG(INFO) << "Running NNRtModel Kernel...";
8336+  OH_NN_ReturnCode ret_code;
8337+  ret_code = OH_NNExecutor_Run(this->oh_nn_executor);
8338+
8339+  if (ret_code != OH_NN_SUCCESS) {
8340+    MS_LOG(ERROR) << "NNExecutor Run failed, OH_NN_ReturnCode = " << ret_code;
8341+    return lite::RET_ERROR;
8342+  }
8343+  MS_LOG(INFO) << "Run NNRtModel Kernel success.";
8344+
8345+  return lite::RET_OK;
8346+}
8347+
8348+OH_NN_DataType mindspore::NNRTModelKernel::ConvertDataType(mindspore::DataType data_type) {
8349+  OH_NN_DataType oh_data_type;
8350+  switch (data_type) {
8351+    case DataType::kTypeUnknown:
8352+    case DataType::kObjectTypeString:
8353+    case DataType::kObjectTypeList:
8354+    case DataType::kObjectTypeTuple:
8355+    case DataType::kObjectTypeTensorType:
8356+    case DataType::kNumberTypeBegin:
8357+    case DataType::kNumberTypeEnd:
8358+    case DataType::kInvalidType:
8359+      oh_data_type = OH_NN_UNKNOWN;
8360+      break;
8361+    case DataType::kNumberTypeBool:
8362+      oh_data_type = OH_NN_BOOL;
8363+      break;
8364+    case DataType::kNumberTypeInt8:
8365+      oh_data_type = OH_NN_INT8;
8366+      break;
8367+    case DataType::kNumberTypeInt16:
8368+      oh_data_type = OH_NN_INT16;
8369+      break;
8370+    case DataType::kNumberTypeInt32:
8371+      oh_data_type = OH_NN_INT32;
8372+      break;
8373+    case DataType::kNumberTypeInt64:
8374+      oh_data_type = OH_NN_INT64;
8375+      break;
8376+    case DataType::kNumberTypeUInt8:
8377+      oh_data_type = OH_NN_UINT8;
8378+      break;
8379+    case DataType::kNumberTypeUInt16:
8380+      oh_data_type = OH_NN_UINT16;
8381+      break;
8382+    case DataType::kNumberTypeUInt32:
8383+      oh_data_type = OH_NN_UINT32;
8384+      break;
8385+    case DataType::kNumberTypeUInt64:
8386+      oh_data_type = OH_NN_UINT64;
8387+      break;
8388+    case DataType::kNumberTypeFloat16:
8389+      oh_data_type = OH_NN_FLOAT16;
8390+      break;
8391+    case DataType::kNumberTypeFloat32:
8392+      oh_data_type = OH_NN_FLOAT32;
8393+      break;
8394+    case DataType::kNumberTypeFloat64:
8395+      oh_data_type = OH_NN_FLOAT64;
8396+      break;
8397+    default: {
8398+      oh_data_type = OH_NN_UNKNOWN;
8399+    }
8400+  }
8401+  return oh_data_type;
8402+}
8403+int mindspore::NNRTModelKernel::PrepareInputs() {
8404+  auto input_tensors = this->inputs();
8405+  for (int i = 0; i < input_tensors.size(); i++) {
8406+    auto tensor = input_tensors[i];
8407+    auto tensor_shape = tensor.Shape();
8408+    auto tmp_quant_param = tensor.QuantParams();
8409+    OH_NN_QuantParam *quant_param = nullptr;
8410+    std::vector<uint32_t> bit_num;
8411+    std::vector<double> scale;
8412+    std::vector<int32_t> zero_point;
8413+    if (!tmp_quant_param.empty()) {
8414+      quant_param = (new (std::nothrow) OH_NN_QuantParam);
8415+      if (quant_param == nullptr) {
8416+        MS_LOG(ERROR) << "new OH_NN_QuantParam failed.";
8417+        return lite::RET_NULL_PTR;
8418+      }
8419+      for (auto qparam : tmp_quant_param) {
8420+        bit_num.emplace_back(qparam.bit_num);
8421+        scale.emplace_back(qparam.scale);
8422+        zero_point.emplace_back(qparam.zero_point);
8423+      }
8424+      quant_param->quantCount = tmp_quant_param.size();
8425+      quant_param->numBits = bit_num.data();
8426+      quant_param->scale = scale.data();
8427+      quant_param->zeroPoint = zero_point.data();
8428+    }
8429+    auto oprend = new (std::nothrow) OH_NN_Tensor;
8430+    if (oprend == nullptr) {
8431+      MS_LOG(ERROR) << "new OH_NN_Tensor Failed";
8432+      return lite::RET_ERROR;
8433+    }
8434+    oprend->dataType = ConvertDataType(tensor.DataType());
8435+    oprend->dimensionCount = tensor_shape.size();
8436+
8437+    std::vector<int32_t> dimensions_list;
8438+    for (auto shape : tensor_shape) {
8439+      if (shape < INT32_MAX) {
8440+        dimensions_list.emplace_back(static_cast<int32_t>(shape));
8441+      } else {
8442+        MS_LOG(ERROR) << "NNExecutor SetInput failed,tensor dimension is is too large, max dim = " << INT32_MAX
8443+                      << ", but get dimension = " << shape;
8444+        return lite::RET_ERROR;
8445+      }
8446+    }
8447+    oprend->dimensions = dimensions_list.data();
8448+    oprend->quantParam = quant_param;
8449+    oprend->type = OH_NN_TENSOR;
8450+    OH_NN_ReturnCode ret_code =
8451+      OH_NNExecutor_SetInput(oh_nn_executor, i, oprend, tensor.MutableData(), tensor.DataSize());
8452+    delete (oprend);
8453+
8454+    if (!tmp_quant_param.empty()) {
8455+      free(quant_param);
8456+      quant_param = nullptr;
8457+    }
8458+
8459+    if (ret_code != OH_NN_SUCCESS) {
8460+      MS_LOG(ERROR) << "NNExecutor SetInput failed, current input tensor is" << tensor.Name()
8461+                    << "OH_NN_ReturnCode = " << ret_code;
8462+      return lite::RET_ERROR;
8463+    }
8464+  }
8465+
8466+  return lite::RET_OK;
8467+}
8468+int mindspore::NNRTModelKernel::TransferOutputs() {
8469+  auto output_tensors = this->outputs();
8470+  for (size_t i = 0; i < output_tensors.size(); i++) {
8471+    auto tensor = output_tensors[i];
8472+    OH_NN_ReturnCode ret_code = OH_NNExecutor_SetOutput(oh_nn_executor, i, tensor.MutableData(), tensor.DataSize());
8473+    if (ret_code != OH_NN_SUCCESS) {
8474+      MS_LOG(ERROR) << "NNExecutor SetOutput failed, current out tensor is" << tensor.Name()
8475+                    << ", OH_NN_ReturnCode = " << ret_code;
8476+      return lite::RET_ERROR;
8477+    }
8478+  }
8479+  return lite::RET_OK;
8480+}
8481diff --git a/mindspore/lite/src/runtime/delegate/nnrt/nnrt_model_kernel.h b/mindspore/lite/src/runtime/delegate/nnrt/nnrt_model_kernel.h
8482new file mode 100644
8483index 00000000..cf9481df
8484--- /dev/null
8485+++ b/mindspore/lite/src/runtime/delegate/nnrt/nnrt_model_kernel.h
8486@@ -0,0 +1,57 @@
8487+/**
8488+ * Copyright 2022 Huawei Technologies Co., Ltd
8489+ *
8490+ * Licensed under the Apache License, Version 2.0 (the "License");
8491+ * you may not use this file except in compliance with the License.
8492+ * You may obtain a copy of the License at
8493+ *
8494+ * http://www.apache.org/licenses/LICENSE-2.0
8495+ *
8496+ * Unless required by applicable law or agreed to in writing, software
8497+ * distributed under the License is distributed on an "AS IS" BASIS,
8498+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
8499+ * See the License for the specific language governing permissions and
8500+ * limitations under the License.
8501+ */
8502+#ifndef LITE_NNRT_MODEL_KERNEL_H
8503+#define LITE_NNRT_MODEL_KERNEL_H
8504+#include <vector>
8505+#include <queue>
8506+#include <map>
8507+#include <utility>
8508+#include "include/api/kernel.h"
8509+#include "interfaces/kits/c/neural_network_runtime.h"
8510+#include "src/common/log_adapter.h"
8511+#include "include/errorcode.h"
8512+
8513+namespace mindspore {
8514+
8515+class NNRTModelKernel : public kernel::Kernel {
8516+  /**
8517+   * Because nnr can't run single op, but the whole model. So we decide to make the whole model into one kernel.
8518+   * */
8519+ public:
8520+  NNRTModelKernel(OH_NNExecutor *oh_nn_executor, const std::vector<mindspore::MSTensor> &inputs,
8521+                  const std::vector<mindspore::MSTensor> &outputs)
8522+      : kernel::Kernel(inputs, outputs, nullptr, nullptr), oh_nn_executor(oh_nn_executor) {}
8523+  int Prepare() override;
8524+  int Execute() override;
8525+  int ReSize() override {
8526+    MS_LOG(ERROR) << "NNRT does not support the resize function temporarily.";
8527+    return lite::RET_ERROR;
8528+  };
8529+  OH_NN_DataType ConvertDataType(mindspore::DataType data_type);
8530+  int PrepareInputs();
8531+  int TransferOutputs();
8532+  ~NNRTModelKernel() override {
8533+    MS_LOG(INFO) << "start NNExecutor Destroy.";
8534+    OH_NNExecutor_Destroy(&oh_nn_executor);
8535+    MS_LOG(INFO) << "start NNExecutor Finish.";
8536+  }
8537+
8538+ protected:
8539+  OH_NNExecutor *oh_nn_executor = nullptr;
8540+};
8541+}  // namespace mindspore
8542+
8543+#endif  // LITE_NNRTT_MODEL_KERNEL_H
8544--
85452.34.1
8546
8547