From 5907d68ca0fe549b5fed290f05978db5c2034865 Mon Sep 17 00:00:00 2001 From: Zhu Guodong Date: Mon, 6 Mar 2023 16:05:44 +0800 Subject: [PATCH 3/4] implement mindir module and support nnrt delegate --- mindspore/lite/mindir/BUILD.gn | 52 + mindspore/lite/mindir/CMakeLists.txt | 31 + mindspore/lite/mindir/include/mindir.h | 423 ++ .../lite/mindir/include/mindir_lite_graph.h | 57 + .../lite/mindir/include/mindir_primitive.h | 15 + mindspore/lite/mindir/include/mindir_tensor.h | 45 + mindspore/lite/mindir/include/mindir_types.h | 210 + .../lite/mindir/inner_headers/lite_graph.h | 27 + .../inner_headers/mindir_memory_manager.h | 33 + mindspore/lite/mindir/inner_headers/utils.h | 28 + mindspore/lite/mindir/src/mindir.cc | 4258 +++++++++++++++++ .../lite/mindir/src/mindir_memory_manager.cc | 122 + .../lite/mindir/src/mindir_nnrt_lite_graph.cc | 87 + .../src/mindir_nnrt_lite_graph_to_model.cc | 1496 ++++++ mindspore/lite/mindir/src/mindir_tensor.cc | 389 ++ mindspore/lite/mindir/src/utils.cc | 96 + mindspore/lite/mindir/tests/BUILD.gn | 35 + mindspore/lite/mindir/tests/mindir_test.cc | 51 + .../src/runtime/delegate/nnrt/CMakeLists.txt | 30 + .../delegate/nnrt/checker/primitive_check.cc | 187 + .../delegate/nnrt/checker/primitive_check.h | 12 + .../runtime/delegate/nnrt/nnrt_delegate.cc | 360 ++ .../src/runtime/delegate/nnrt/nnrt_delegate.h | 52 + .../delegate/nnrt/nnrt_model_kernel.cc | 175 + .../runtime/delegate/nnrt/nnrt_model_kernel.h | 57 + 25 files changed, 8328 insertions(+) create mode 100644 mindspore/lite/mindir/BUILD.gn create mode 100644 mindspore/lite/mindir/CMakeLists.txt create mode 100644 mindspore/lite/mindir/include/mindir.h create mode 100644 mindspore/lite/mindir/include/mindir_lite_graph.h create mode 100644 mindspore/lite/mindir/include/mindir_primitive.h create mode 100644 mindspore/lite/mindir/include/mindir_tensor.h create mode 100644 mindspore/lite/mindir/include/mindir_types.h create mode 100644 mindspore/lite/mindir/inner_headers/lite_graph.h create mode 100644 mindspore/lite/mindir/inner_headers/mindir_memory_manager.h create mode 100644 mindspore/lite/mindir/inner_headers/utils.h create mode 100644 mindspore/lite/mindir/src/mindir.cc create mode 100644 mindspore/lite/mindir/src/mindir_memory_manager.cc create mode 100644 mindspore/lite/mindir/src/mindir_nnrt_lite_graph.cc create mode 100644 mindspore/lite/mindir/src/mindir_nnrt_lite_graph_to_model.cc create mode 100644 mindspore/lite/mindir/src/mindir_tensor.cc create mode 100644 mindspore/lite/mindir/src/utils.cc create mode 100644 mindspore/lite/mindir/tests/BUILD.gn create mode 100644 mindspore/lite/mindir/tests/mindir_test.cc create mode 100644 mindspore/lite/src/runtime/delegate/nnrt/CMakeLists.txt create mode 100644 mindspore/lite/src/runtime/delegate/nnrt/checker/primitive_check.cc create mode 100644 mindspore/lite/src/runtime/delegate/nnrt/checker/primitive_check.h create mode 100644 mindspore/lite/src/runtime/delegate/nnrt/nnrt_delegate.cc create mode 100644 mindspore/lite/src/runtime/delegate/nnrt/nnrt_delegate.h create mode 100644 mindspore/lite/src/runtime/delegate/nnrt/nnrt_model_kernel.cc create mode 100644 mindspore/lite/src/runtime/delegate/nnrt/nnrt_model_kernel.h diff --git a/mindspore/lite/mindir/BUILD.gn b/mindspore/lite/mindir/BUILD.gn new file mode 100644 index 00000000..2ef8225d --- /dev/null +++ b/mindspore/lite/mindir/BUILD.gn @@ -0,0 +1,52 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +import("//build/ohos.gni") + +# ohos_group("mindir_test") { +# deps = [ +# "tests:mindir_test", +# ] +# } +ohos_shared_library("mindir_lib") { + include_dirs = [ + "../", + "../../core", + "include", + "inner_headers", + "//third_party/flatbuffers/include", + ] + sources = [ + "../src/common/log.cc", + "src/mindir.cc", + "src/mindir_memory_manager.cc", + "src/mindir_nnrt_lite_graph.cc", + "src/mindir_nnrt_lite_graph_to_model.cc", + "src/mindir_tensor.cc", + "src/utils.cc", + ] + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "hdf_core:libhdi", + "hilog_native:libhilog", + "ipc:ipc_core", + ] + configs = ["../:disable_android"] + defines = [ "MS_COMPILE_OHOS" ] + deps = [ "//drivers/interface/nnrt/v1_0:nnrt_idl_headers" ] + output_name = "mindir" + innerapi_tags = [ "platformsdk_indirect"] + part_name = "mindspore" +} diff --git a/mindspore/lite/mindir/CMakeLists.txt b/mindspore/lite/mindir/CMakeLists.txt new file mode 100644 index 00000000..42b89711 --- /dev/null +++ b/mindspore/lite/mindir/CMakeLists.txt @@ -0,0 +1,31 @@ +#set(CMAKE_TOOLCHAIN_FILE /heaven/wty/devtools/ohos_sdk/native/build/cmake/ohos.toolchain.cmake) +set(OHOS_ARCH arm64-v8a) +set(OHOS_STL c++_static) +set(OHOS_PLATFORM rk3568) +set(CMAKE_CXX_COMPILER /usr/bin/g++) +project(mindir) +cmake_minimum_required(VERSION 3.18) + +file(GLOB source src/*.cc) +file(GLOB convert_source src/converter/*.cpp) +set(mindir_source ../src/common/log.cc) +include_directories(include) +include_directories(inner_headers) +include_directories(../) +include_directories(../../../../../out/rk3568/gen/drivers/interface) +include_directories(../../../../../third_party/flatbuffers/include) +include_directories(../../core) + +include_directories(../../foundation/communication/ipc/interfaces/innerkits/ipc_core/include) +include_directories(../../../../../utils/native/base/include) +add_compile_definitions(MINDIR_INTERFACE) +add_library(mindir SHARED ${source} ${convert_source} ${mindir_source}) +target_link_libraries(mindir ../../../../../out/rk3568/hdf/drivers_interface_nnrt/libnnrt_proxy_1.0.z.so + ../../../../../out/rk3568/commonlibrary/c_utils/libutils.z.so + hilog_ndk.z + ../../../../../out/rk3568/communication/ipc/libipc_core.z.so + ) +file(GLOB test_sources tests/*.cc) + +add_executable(mindir_test ${test_sources}) +target_link_libraries(mindir_test mindir) diff --git a/mindspore/lite/mindir/include/mindir.h b/mindspore/lite/mindir/include/mindir.h new file mode 100644 index 00000000..73cd6898 --- /dev/null +++ b/mindspore/lite/mindir/include/mindir.h @@ -0,0 +1,423 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_MINDIR_H +#define MINDSPORE_LITE_MINDIR_H +#include "mindir_types.h" +#include "mindir_lite_graph.h" +#include "mindir_tensor.h" +#include "mindir_primitive.h" +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +struct Model; +} // namespace V1_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS + +namespace mindspore { +namespace lite { + +// ********** Model ********** +OHOS::HDI::Nnrt::V1_0::Model *MindIR_LiteGraph_To_Model(const LiteGraph *lite_graph, + const OHOS::HDI::Nnrt::V1_0::SharedBuffer &buffer); +void MindIR_Model_Destroy(OHOS::HDI::Nnrt::V1_0::Model **model); + +// ********** Activation ********** +PrimitivePtr MindIR_Activation_CreatePrimitive(ActivationType activation_type, float alpha, float min_val, + float max_val, bool approximate); +ActivationType MindIR_Activation_GetActivationType(ConstPrimitivePtr primitive); +void MindIR_Activation_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type); +float MindIR_Activation_GetAlpha(ConstPrimitivePtr primitive); +void MindIR_Activation_SetAlpha(PrimitivePtr *primitive, float alpha); +float MindIR_Activation_GetMinVal(ConstPrimitivePtr primitive); +void MindIR_Activation_SetMinVal(PrimitivePtr *primitive, float min_val); +float MindIR_Activation_GetMaxVal(ConstPrimitivePtr primitive); +void MindIR_Activation_SetMaxVal(PrimitivePtr *primitive, float max_val); +bool MindIR_Activation_GetApproximate(ConstPrimitivePtr primitive); +void MindIR_Activation_SetApproximate(PrimitivePtr *primitive, bool approximate); + +// ********** AddFusion ********** +PrimitivePtr MindIR_AddFusion_CreatePrimitive(ActivationType activation_type); +ActivationType MindIR_AddFusion_GetActivationType(ConstPrimitivePtr primitive); +void MindIR_AddFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type); + +// ********** ArgMaxFusion ********** +PrimitivePtr MindIR_ArgMaxFusion_CreatePrimitive(int64_t axis, int64_t top_k, bool keep_dims, bool out_max_value); +int64_t MindIR_ArgMaxFusion_GetAxis(ConstPrimitivePtr primitive); +void MindIR_ArgMaxFusion_SetAxis(PrimitivePtr *primitive, int64_t axis); +int64_t MindIR_ArgMaxFusion_GetTopK(ConstPrimitivePtr primitive); +void MindIR_ArgMaxFusion_SetTopK(PrimitivePtr *primitive, int64_t top_k); +bool MindIR_ArgMaxFusion_GetKeepDims(ConstPrimitivePtr primitive); +void MindIR_ArgMaxFusion_SetKeepDims(PrimitivePtr *primitive, bool keep_dims); +bool MindIR_ArgMaxFusion_GetOutMaxValue(ConstPrimitivePtr primitive); +void MindIR_ArgMaxFusion_SetOutMaxValue(PrimitivePtr *primitive, bool out_max_value); + +// ********** AvgPoolFusion ********** +PrimitivePtr MindIR_AvgPoolFusion_CreatePrimitive(const std::vector &kernel_size, + const std::vector &strides, const std::vector &pad, + PadMode pad_mode, RoundMode round_mode, Format format, bool global, + ActivationType activation_type); +std::vector MindIR_AvgPoolFusion_GetKernelSize(ConstPrimitivePtr primitive); +void MindIR_AvgPoolFusion_SetKernelSize(PrimitivePtr *primitive, const std::vector &kernel_size); +std::vector MindIR_AvgPoolFusion_GetStrides(ConstPrimitivePtr primitive); +void MindIR_AvgPoolFusion_SetStrides(PrimitivePtr *primitive, const std::vector &strides); +std::vector MindIR_AvgPoolFusion_GetPad(ConstPrimitivePtr primitive); +void MindIR_AvgPoolFusion_SetPad(PrimitivePtr *primitive, const std::vector &pad); +PadMode MindIR_AvgPoolFusion_GetPadMode(ConstPrimitivePtr primitive); +void MindIR_AvgPoolFusion_SetPadMode(PrimitivePtr *primitive, PadMode pad_mode); +RoundMode MindIR_AvgPoolFusion_GetRoundMode(ConstPrimitivePtr primitive); +void MindIR_AvgPoolFusion_SetRoundMode(PrimitivePtr *primitive, RoundMode round_mode); +Format MindIR_AvgPoolFusion_GetFormat(ConstPrimitivePtr primitive); +void MindIR_AvgPoolFusion_SetFormat(PrimitivePtr *primitive, Format format); +bool MindIR_AvgPoolFusion_GetGlobal(ConstPrimitivePtr primitive); +void MindIR_AvgPoolFusion_SetGlobal(PrimitivePtr *primitive, bool global); +ActivationType MindIR_AvgPoolFusion_GetActivationType(ConstPrimitivePtr primitive); +void MindIR_AvgPoolFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type); + +// ********** BatchToSpaceND ********** +PrimitivePtr MindIR_BatchToSpaceND_CreatePrimitive(const std::vector &block_shape, + const std::vector> &crops); +std::vector MindIR_BatchToSpaceND_GetBlockShape(ConstPrimitivePtr primitive); +void MindIR_BatchToSpaceND_SetBlockShape(PrimitivePtr *primitive, const std::vector &block_shape); +std::vector> MindIR_BatchToSpaceND_GetCrops(ConstPrimitivePtr primitive); +void MindIR_BatchToSpaceND_SetCrops(PrimitivePtr *primitive, const std::vector> &crops); + +// ********** BiasAdd ********** +PrimitivePtr MindIR_BiasAdd_CreatePrimitive(); + +// ********** Cast ********** +PrimitivePtr MindIR_Cast_CreatePrimitive(); + +// ********** Concat ********** +PrimitivePtr MindIR_Concat_CreatePrimitive(int64_t axis); +int64_t MindIR_Concat_GetAxis(ConstPrimitivePtr primitive); +void MindIR_Concat_SetAxis(PrimitivePtr *primitive, int64_t axis); + +// ********** Conv2DFusion ********** +PrimitivePtr MindIR_Conv2DFusion_CreatePrimitive(const std::vector &kernel_size, + const std::vector &stride, + const std::vector &dilation, PadMode pad_mode, + const std::vector &pad_list, int64_t group, + int64_t in_channel, int64_t out_channel, + ActivationType activation_type); +std::vector MindIR_Conv2DFusion_GetKernelSize(ConstPrimitivePtr primitive); +void MindIR_Conv2DFusion_SetKernelSize(PrimitivePtr *primitive, const std::vector &kernel_size); +std::vector MindIR_Conv2DFusion_GetStride(ConstPrimitivePtr primitive); +void MindIR_Conv2DFusion_SetStride(PrimitivePtr *primitive, const std::vector &stride); +std::vector MindIR_Conv2DFusion_GetDilation(ConstPrimitivePtr primitive); +void MindIR_Conv2DFusion_SetDilation(PrimitivePtr *primitive, const std::vector &dilation); +PadMode MindIR_Conv2DFusion_GetPadMode(ConstPrimitivePtr primitive); +void MindIR_Conv2DFusion_SetPadMode(PrimitivePtr *primitive, PadMode pad_mode); +std::vector MindIR_Conv2DFusion_GetPadList(ConstPrimitivePtr primitive); +void MindIR_Conv2DFusion_SetPadList(PrimitivePtr *primitive, const std::vector &pad_list); +int64_t MindIR_Conv2DFusion_GetGroup(ConstPrimitivePtr primitive); +void MindIR_Conv2DFusion_SetGroup(PrimitivePtr *primitive, int64_t group); +int64_t MindIR_Conv2DFusion_GetInChannel(ConstPrimitivePtr primitive); +void MindIR_Conv2DFusion_SetInChannel(PrimitivePtr *primitive, int64_t in_channel); +int64_t MindIR_Conv2DFusion_GetOutChannel(ConstPrimitivePtr primitive); +void MindIR_Conv2DFusion_SetOutChannel(PrimitivePtr *primitive, int64_t out_channel); +ActivationType MindIR_Conv2DFusion_GetActivationType(ConstPrimitivePtr primitive); +void MindIR_Conv2DFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type); + +// ********** Conv2dTransposeFusion ********** +PrimitivePtr MindIR_Conv2dTransposeFusion_CreatePrimitive( + const std::vector &kernel_size, const std::vector &stride, const std::vector &dilation, + PadMode pad_mode, const std::vector &pad_list, int64_t group, int64_t in_channel, int64_t out_channel, + ActivationType activation_type, const std::vector &output_paddings); +std::vector MindIR_Conv2dTransposeFusion_GetKernelSize(ConstPrimitivePtr primitive); +void MindIR_Conv2dTransposeFusion_SetKernelSize(PrimitivePtr *primitive, const std::vector &kernel_size); +std::vector MindIR_Conv2dTransposeFusion_GetStride(ConstPrimitivePtr primitive); +void MindIR_Conv2dTransposeFusion_SetStride(PrimitivePtr *primitive, const std::vector &stride); +std::vector MindIR_Conv2dTransposeFusion_GetDilation(ConstPrimitivePtr primitive); +void MindIR_Conv2dTransposeFusion_SetDilation(PrimitivePtr *primitive, const std::vector &dilation); +PadMode MindIR_Conv2dTransposeFusion_GetPadMode(ConstPrimitivePtr primitive); +void MindIR_Conv2dTransposeFusion_SetPadMode(PrimitivePtr *primitive, PadMode pad_mode); +std::vector MindIR_Conv2dTransposeFusion_GetPadList(ConstPrimitivePtr primitive); +void MindIR_Conv2dTransposeFusion_SetPadList(PrimitivePtr *primitive, const std::vector &pad_list); +int64_t MindIR_Conv2dTransposeFusion_GetGroup(ConstPrimitivePtr primitive); +void MindIR_Conv2dTransposeFusion_SetGroup(PrimitivePtr *primitive, int64_t group); +int64_t MindIR_Conv2dTransposeFusion_GetInChannel(ConstPrimitivePtr primitive); +void MindIR_Conv2dTransposeFusion_SetInChannel(PrimitivePtr *primitive, int64_t in_channel); +int64_t MindIR_Conv2dTransposeFusion_GetOutChannel(ConstPrimitivePtr primitive); +void MindIR_Conv2dTransposeFusion_SetOutChannel(PrimitivePtr *primitive, int64_t out_channel); +ActivationType MindIR_Conv2dTransposeFusion_GetActivationType(ConstPrimitivePtr primitive); +void MindIR_Conv2dTransposeFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type); +std::vector MindIR_Conv2dTransposeFusion_GetOutputPaddings(ConstPrimitivePtr primitive); +void MindIR_Conv2dTransposeFusion_SetOutputPaddings(PrimitivePtr *primitive, + const std::vector &output_paddings); + +// ********** DivFusion ********** +PrimitivePtr MindIR_DivFusion_CreatePrimitive(ActivationType activation_type); +ActivationType MindIR_DivFusion_GetActivationType(ConstPrimitivePtr primitive); +void MindIR_DivFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type); + +// ********** Eltwise ********** +PrimitivePtr MindIR_Eltwise_CreatePrimitive(EltwiseMode mode); +EltwiseMode MindIR_Eltwise_GetMode(ConstPrimitivePtr primitive); +void MindIR_Eltwise_SetMode(PrimitivePtr *primitive, EltwiseMode mode); + +// ********** ExpandDims ********** +PrimitivePtr MindIR_ExpandDims_CreatePrimitive(); + +// ********** Fill ********** +PrimitivePtr MindIR_Fill_CreatePrimitive(); + +// ********** FullConnection ********** +PrimitivePtr MindIR_FullConnection_CreatePrimitive(bool has_bias, bool use_axis, int64_t axis, + ActivationType activation_type); +bool MindIR_FullConnection_GetHasBias(ConstPrimitivePtr primitive); +void MindIR_FullConnection_SetHasBias(PrimitivePtr *primitive, bool has_bias); +bool MindIR_FullConnection_GetUseAxis(ConstPrimitivePtr primitive); +void MindIR_FullConnection_SetUseAxis(PrimitivePtr *primitive, bool use_axis); +int64_t MindIR_FullConnection_GetAxis(ConstPrimitivePtr primitive); +void MindIR_FullConnection_SetAxis(PrimitivePtr *primitive, int64_t axis); +ActivationType MindIR_FullConnection_GetActivationType(ConstPrimitivePtr primitive); +void MindIR_FullConnection_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type); + +// ********** FusedBatchNorm ********** +PrimitivePtr MindIR_FusedBatchNorm_CreatePrimitive(float epsilon); +float MindIR_FusedBatchNorm_GetEpsilon(ConstPrimitivePtr primitive); +void MindIR_FusedBatchNorm_SetEpsilon(PrimitivePtr *primitive, float epsilon); + +// ********** Gather ********** +PrimitivePtr MindIR_Gather_CreatePrimitive(); + +// ********** LayerNormFusion ********** +PrimitivePtr MindIR_LayerNormFusion_CreatePrimitive(int64_t begin_norm_axis, float epsilon, bool elementwise_affine, + int64_t begin_params_axis); +int64_t MindIR_LayerNormFusion_GetBeginNormAxis(ConstPrimitivePtr primitive); +void MindIR_LayerNormFusion_SetBeginNormAxis(PrimitivePtr *primitive, int64_t begin_norm_axis); +float MindIR_LayerNormFusion_GetEpsilon(ConstPrimitivePtr primitive); +void MindIR_LayerNormFusion_SetEpsilon(PrimitivePtr *primitive, float epsilon); +bool MindIR_LayerNormFusion_GetElementwiseAffine(ConstPrimitivePtr primitive); +void MindIR_LayerNormFusion_SetElementwiseAffine(PrimitivePtr *primitive, bool elementwise_affine); +int64_t MindIR_LayerNormFusion_GetBeginParamsAxis(ConstPrimitivePtr primitive); +void MindIR_LayerNormFusion_SetBeginParamsAxis(PrimitivePtr *primitive, int64_t begin_params_axis); + +// ********** LessEqual ********** +PrimitivePtr MindIR_LessEqual_CreatePrimitive(); + +// ********** MatMulFusion ********** +PrimitivePtr MindIR_MatMulFusion_CreatePrimitive(bool transpose_a, bool transpose_b, ActivationType activation_type); +bool MindIR_MatMulFusion_GetTransposeA(ConstPrimitivePtr primitive); +void MindIR_MatMulFusion_SetTransposeA(PrimitivePtr *primitive, bool transpose_a); +bool MindIR_MatMulFusion_GetTransposeB(ConstPrimitivePtr primitive); +void MindIR_MatMulFusion_SetTransposeB(PrimitivePtr *primitive, bool transpose_b); +ActivationType MindIR_MatMulFusion_GetActivationType(ConstPrimitivePtr primitive); +void MindIR_MatMulFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type); + +// ********** Maximum ********** +PrimitivePtr MindIR_Maximum_CreatePrimitive(); + +// ********** MaxPoolFusion ********** +PrimitivePtr MindIR_MaxPoolFusion_CreatePrimitive(const std::vector &kernel_size, + const std::vector &strides, const std::vector &pad, + PadMode pad_mode, Format format, bool global, + ActivationType activation_type); +std::vector MindIR_MaxPoolFusion_GetKernelSize(ConstPrimitivePtr primitive); +void MindIR_MaxPoolFusion_SetKernelSize(PrimitivePtr *primitive, const std::vector &kernel_size); +std::vector MindIR_MaxPoolFusion_GetStrides(ConstPrimitivePtr primitive); +void MindIR_MaxPoolFusion_SetStrides(PrimitivePtr *primitive, const std::vector &strides); +std::vector MindIR_MaxPoolFusion_GetPad(ConstPrimitivePtr primitive); +void MindIR_MaxPoolFusion_SetPad(PrimitivePtr *primitive, const std::vector &pad); +PadMode MindIR_MaxPoolFusion_GetPadMode(ConstPrimitivePtr primitive); +void MindIR_MaxPoolFusion_SetPadMode(PrimitivePtr *primitive, PadMode pad_mode); +Format MindIR_MaxPoolFusion_GetFormat(ConstPrimitivePtr primitive); +void MindIR_MaxPoolFusion_SetFormat(PrimitivePtr *primitive, Format format); +bool MindIR_MaxPoolFusion_GetGlobal(ConstPrimitivePtr primitive); +void MindIR_MaxPoolFusion_SetGlobal(PrimitivePtr *primitive, bool global); +ActivationType MindIR_MaxPoolFusion_GetActivationType(ConstPrimitivePtr primitive); +void MindIR_MaxPoolFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type); + +// ********** MulFusion ********** +PrimitivePtr MindIR_MulFusion_CreatePrimitive(ActivationType activation_type); +ActivationType MindIR_MulFusion_GetActivationType(ConstPrimitivePtr primitive); +void MindIR_MulFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type); + +// ********** OneHot ********** +PrimitivePtr MindIR_OneHot_CreatePrimitive(int64_t axis); +int64_t MindIR_OneHot_GetAxis(ConstPrimitivePtr primitive); +void MindIR_OneHot_SetAxis(PrimitivePtr *primitive, int64_t axis); + +// ********** PadFusion ********** +PrimitivePtr MindIR_PadFusion_CreatePrimitive(const std::vector> &paddings, + PaddingMode padding_mode, float constant_value); +std::vector> MindIR_PadFusion_GetPaddings(ConstPrimitivePtr primitive); +void MindIR_PadFusion_SetPaddings(PrimitivePtr *primitive, const std::vector> &paddings); +PaddingMode MindIR_PadFusion_GetPaddingMode(ConstPrimitivePtr primitive); +void MindIR_PadFusion_SetPaddingMode(PrimitivePtr *primitive, PaddingMode padding_mode); +float MindIR_PadFusion_GetConstantValue(ConstPrimitivePtr primitive); +void MindIR_PadFusion_SetConstantValue(PrimitivePtr *primitive, float constant_value); + +// ********** PowFusion ********** +PrimitivePtr MindIR_PowFusion_CreatePrimitive(float scale, float shift); +float MindIR_PowFusion_GetScale(ConstPrimitivePtr primitive); +void MindIR_PowFusion_SetScale(PrimitivePtr *primitive, float scale); +float MindIR_PowFusion_GetShift(ConstPrimitivePtr primitive); +void MindIR_PowFusion_SetShift(PrimitivePtr *primitive, float shift); + +// ********** PReLUFusion ********** +PrimitivePtr MindIR_PReLUFusion_CreatePrimitive(bool channel_shared); +bool MindIR_PReLUFusion_GetChannelShared(ConstPrimitivePtr primitive); +void MindIR_PReLUFusion_SetChannelShared(PrimitivePtr *primitive, bool channel_shared); + +// ********** QuantDTypeCast ********** +PrimitivePtr MindIR_QuantDTypeCast_CreatePrimitive(int64_t src_t, int64_t dst_t); +int64_t MindIR_QuantDTypeCast_GetSrcT(ConstPrimitivePtr primitive); +void MindIR_QuantDTypeCast_SetSrcT(PrimitivePtr *primitive, int64_t src_t); +int64_t MindIR_QuantDTypeCast_GetDstT(ConstPrimitivePtr primitive); +void MindIR_QuantDTypeCast_SetDstT(PrimitivePtr *primitive, int64_t dst_t); + +// ********** ReduceFusion ********** +PrimitivePtr MindIR_ReduceFusion_CreatePrimitive(bool keep_dims, ReduceMode mode, bool reduce_to_end, float coeff); +bool MindIR_ReduceFusion_GetKeepDims(ConstPrimitivePtr primitive); +void MindIR_ReduceFusion_SetKeepDims(PrimitivePtr *primitive, bool keep_dims); +ReduceMode MindIR_ReduceFusion_GetMode(ConstPrimitivePtr primitive); +void MindIR_ReduceFusion_SetMode(PrimitivePtr *primitive, ReduceMode mode); +bool MindIR_ReduceFusion_GetReduceToEnd(ConstPrimitivePtr primitive); +void MindIR_ReduceFusion_SetReduceToEnd(PrimitivePtr *primitive, bool reduce_to_end); +float MindIR_ReduceFusion_GetCoeff(ConstPrimitivePtr primitive); +void MindIR_ReduceFusion_SetCoeff(PrimitivePtr *primitive, float coeff); + +// ********** Reshape ********** +PrimitivePtr MindIR_Reshape_CreatePrimitive(); + +// ********** Resize ********** +PrimitivePtr MindIR_Resize_CreatePrimitive(ResizeMethod method, int64_t new_height, int64_t new_width, + bool preserve_aspect_ratio, + CoordinateTransformMode coordinate_transform_mode, float cubic_coeff, + int64_t exclude_outside, float extrapolation_value, + NearestMode nearest_mode); +ResizeMethod MindIR_Resize_GetMethod(ConstPrimitivePtr primitive); +void MindIR_Resize_SetMethod(PrimitivePtr *primitive, ResizeMethod method); +int64_t MindIR_Resize_GetNewHeight(ConstPrimitivePtr primitive); +void MindIR_Resize_SetNewHeight(PrimitivePtr *primitive, int64_t new_height); +int64_t MindIR_Resize_GetNewWidth(ConstPrimitivePtr primitive); +void MindIR_Resize_SetNewWidth(PrimitivePtr *primitive, int64_t new_width); +bool MindIR_Resize_GetPreserveAspectRatio(ConstPrimitivePtr primitive); +void MindIR_Resize_SetPreserveAspectRatio(PrimitivePtr *primitive, bool preserve_aspect_ratio); +CoordinateTransformMode MindIR_Resize_GetCoordinateTransformMode(ConstPrimitivePtr primitive); +void MindIR_Resize_SetCoordinateTransformMode(PrimitivePtr *primitive, + CoordinateTransformMode coordinate_transform_mode); +float MindIR_Resize_GetCubicCoeff(ConstPrimitivePtr primitive); +void MindIR_Resize_SetCubicCoeff(PrimitivePtr *primitive, float cubic_coeff); +int64_t MindIR_Resize_GetExcludeOutside(ConstPrimitivePtr primitive); +void MindIR_Resize_SetExcludeOutside(PrimitivePtr *primitive, int64_t exclude_outside); +float MindIR_Resize_GetExtrapolationValue(ConstPrimitivePtr primitive); +void MindIR_Resize_SetExtrapolationValue(PrimitivePtr *primitive, float extrapolation_value); +NearestMode MindIR_Resize_GetNearestMode(ConstPrimitivePtr primitive); +void MindIR_Resize_SetNearestMode(PrimitivePtr *primitive, NearestMode nearest_mode); + +// ********** Rsqrt ********** +PrimitivePtr MindIR_Rsqrt_CreatePrimitive(); + +// ********** ScaleFusion ********** +PrimitivePtr MindIR_ScaleFusion_CreatePrimitive(int64_t axis, ActivationType activation_type); +int64_t MindIR_ScaleFusion_GetAxis(ConstPrimitivePtr primitive); +void MindIR_ScaleFusion_SetAxis(PrimitivePtr *primitive, int64_t axis); +ActivationType MindIR_ScaleFusion_GetActivationType(ConstPrimitivePtr primitive); +void MindIR_ScaleFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type); + +// ********** Shape ********** +PrimitivePtr MindIR_Shape_CreatePrimitive(); + +// ********** SliceFusion ********** +PrimitivePtr MindIR_SliceFusion_CreatePrimitive(const std::vector &axes); +std::vector MindIR_SliceFusion_GetAxes(ConstPrimitivePtr primitive); +void MindIR_SliceFusion_SetAxes(PrimitivePtr *primitive, const std::vector &axes); + +// ********** Softmax ********** +PrimitivePtr MindIR_Softmax_CreatePrimitive(const std::vector &axis); +std::vector MindIR_Softmax_GetAxis(ConstPrimitivePtr primitive); +void MindIR_Softmax_SetAxis(PrimitivePtr *primitive, const std::vector &axis); + +// ********** SpaceToBatchND ********** +PrimitivePtr MindIR_SpaceToBatchND_CreatePrimitive(const std::vector &block_shape, + const std::vector> &paddings); +std::vector MindIR_SpaceToBatchND_GetBlockShape(ConstPrimitivePtr primitive); +void MindIR_SpaceToBatchND_SetBlockShape(PrimitivePtr *primitive, const std::vector &block_shape); +std::vector> MindIR_SpaceToBatchND_GetPaddings(ConstPrimitivePtr primitive); +void MindIR_SpaceToBatchND_SetPaddings(PrimitivePtr *primitive, const std::vector> &paddings); + +// ********** Split ********** +PrimitivePtr MindIR_Split_CreatePrimitive(int64_t output_num, const std::vector &size_splits, int64_t axis); +int64_t MindIR_Split_GetOutputNum(ConstPrimitivePtr primitive); +void MindIR_Split_SetOutputNum(PrimitivePtr *primitive, int64_t output_num); +std::vector MindIR_Split_GetSizeSplits(ConstPrimitivePtr primitive); +void MindIR_Split_SetSizeSplits(PrimitivePtr *primitive, const std::vector &size_splits); +int64_t MindIR_Split_GetAxis(ConstPrimitivePtr primitive); +void MindIR_Split_SetAxis(PrimitivePtr *primitive, int64_t axis); + +// ********** Sqrt ********** +PrimitivePtr MindIR_Sqrt_CreatePrimitive(); + +// ********** SquaredDifference ********** +PrimitivePtr MindIR_SquaredDifference_CreatePrimitive(); + +// ********** Squeeze ********** +PrimitivePtr MindIR_Squeeze_CreatePrimitive(const std::vector &axis); +std::vector MindIR_Squeeze_GetAxis(ConstPrimitivePtr primitive); +void MindIR_Squeeze_SetAxis(PrimitivePtr *primitive, const std::vector &axis); + +// ********** Stack ********** +PrimitivePtr MindIR_Stack_CreatePrimitive(int64_t axis); +int64_t MindIR_Stack_GetAxis(ConstPrimitivePtr primitive); +void MindIR_Stack_SetAxis(PrimitivePtr *primitive, int64_t axis); + +// ********** StridedSlice ********** +PrimitivePtr MindIR_StridedSlice_CreatePrimitive(int64_t begin_mask, int64_t end_mask, int64_t ellipsis_mask, + int64_t new_axis_mask, int64_t shrink_axis_mask); +int64_t MindIR_StridedSlice_GetBeginMask(ConstPrimitivePtr primitive); +void MindIR_StridedSlice_SetBeginMask(PrimitivePtr *primitive, int64_t begin_mask); +int64_t MindIR_StridedSlice_GetEndMask(ConstPrimitivePtr primitive); +void MindIR_StridedSlice_SetEndMask(PrimitivePtr *primitive, int64_t end_mask); +int64_t MindIR_StridedSlice_GetEllipsisMask(ConstPrimitivePtr primitive); +void MindIR_StridedSlice_SetEllipsisMask(PrimitivePtr *primitive, int64_t ellipsis_mask); +int64_t MindIR_StridedSlice_GetNewAxisMask(ConstPrimitivePtr primitive); +void MindIR_StridedSlice_SetNewAxisMask(PrimitivePtr *primitive, int64_t new_axis_mask); +int64_t MindIR_StridedSlice_GetShrinkAxisMask(ConstPrimitivePtr primitive); +void MindIR_StridedSlice_SetShrinkAxisMask(PrimitivePtr *primitive, int64_t shrink_axis_mask); + +// ********** SubFusion ********** +PrimitivePtr MindIR_SubFusion_CreatePrimitive(ActivationType activation_type); +ActivationType MindIR_SubFusion_GetActivationType(ConstPrimitivePtr primitive); +void MindIR_SubFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type); + +// ********** TileFusion ********** +PrimitivePtr MindIR_TileFusion_CreatePrimitive(const std::vector &dims); +std::vector MindIR_TileFusion_GetDims(ConstPrimitivePtr primitive); +void MindIR_TileFusion_SetDims(PrimitivePtr *primitive, const std::vector &dims); + +// ********** TopKFusion ********** +PrimitivePtr MindIR_TopKFusion_CreatePrimitive(bool sorted, int64_t axis); +bool MindIR_TopKFusion_GetSorted(ConstPrimitivePtr primitive); +void MindIR_TopKFusion_SetSorted(PrimitivePtr *primitive, bool sorted); +int64_t MindIR_TopKFusion_GetAxis(ConstPrimitivePtr primitive); +void MindIR_TopKFusion_SetAxis(PrimitivePtr *primitive, int64_t axis); + +// ********** Transpose ********** +PrimitivePtr MindIR_Transpose_CreatePrimitive(); + +// ********** Unsqueeze ********** +PrimitivePtr MindIR_Unsqueeze_CreatePrimitive(const std::vector &axis); +std::vector MindIR_Unsqueeze_GetAxis(ConstPrimitivePtr primitive); +void MindIR_Unsqueeze_SetAxis(PrimitivePtr *primitive, const std::vector &axis); + +} // namespace lite +} // namespace mindspore +#endif diff --git a/mindspore/lite/mindir/include/mindir_lite_graph.h b/mindspore/lite/mindir/include/mindir_lite_graph.h new file mode 100644 index 00000000..24684914 --- /dev/null +++ b/mindspore/lite/mindir/include/mindir_lite_graph.h @@ -0,0 +1,57 @@ +#ifndef LITE_NNRT_NNRT_LITE_GRAPH_H +#define LITE_NNRT_NNRT_LITE_GRAPH_H +#include +#include +#include +namespace mindspore { +namespace lite { + +typedef void *PrimitivePtr; +typedef const void *ConstPrimitivePtr; + +typedef void *TensorPtr; +typedef const void *ConstTensorPtr; + +struct LiteGraph { + struct Node { + std::string name_; + std::string op_type_; // hnn no use + int node_type_; // hnn no use + PrimitivePtr primitive_ = nullptr; + std::shared_ptr base_operator_ = nullptr; // hnn no use + std::vector input_indices_; + std::vector output_indices_; + int quant_type_; + int device_type_ = -1; // hnn no use + }; + + struct SubGraph { + std::string name_; + std::vector input_indices_; + std::vector output_indices_; + std::vector node_indices_; + std::vector tensor_indices_; // hnn no use + }; + + std::string name_; + std::string version_; // hnn no use + std::vector input_indices_; + std::vector output_indices_; + std::vector all_tensors_; + std::vector all_nodes_; + std::vector sub_graphs_; +#ifdef ENABLE_MODEL_OBF + std::vector all_prims_type_; // hnn no use + std::vector all_nodes_stat_; // hnn no use + bool model_obfuscated_ = false; // hnn no use + std::vector deobf_prims_; // hnn no use +#endif +}; + +void MindIR_LiteGraph_Destroy(LiteGraph **lite_graph); +size_t MindIR_LiteGraph_GetConstTensorSize(const LiteGraph *lite_graph); + +} // namespace lite +} // namespace mindspore + +#endif // LITE_NNRT_NNRT_LITE_GRAPH_H diff --git a/mindspore/lite/mindir/include/mindir_primitive.h b/mindspore/lite/mindir/include/mindir_primitive.h new file mode 100644 index 00000000..b67c608a --- /dev/null +++ b/mindspore/lite/mindir/include/mindir_primitive.h @@ -0,0 +1,15 @@ +#ifndef MINDIR_MINDIR_PRIMITIVE_H +#define MINDIR_MINDIR_PRIMITIVE_H +#include "mindir_lite_graph.h" +#include "mindir_types.h" + +namespace mindspore { +namespace lite { + +// ********** PrimitiveBase ********** +NodeType MindIR_Primitive_GetType(PrimitivePtr primitive); +void MindIR_Primitive_Destroy(PrimitivePtr *primitive); + +} // namespace lite +} // namespace mindspore +#endif // MINDIR_MINDIR_PRIMITIVE_H diff --git a/mindspore/lite/mindir/include/mindir_tensor.h b/mindspore/lite/mindir/include/mindir_tensor.h new file mode 100644 index 00000000..ce1b24dc --- /dev/null +++ b/mindspore/lite/mindir/include/mindir_tensor.h @@ -0,0 +1,45 @@ +#ifndef LITE_TENSOR_H +#define LITE_TENSOR_H +#include "mindir_lite_graph.h" +#include "mindir_types.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +struct SharedBuffer; +} // namespace V1_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS + +namespace mindspore { +namespace lite { + +// ********** Tensor ********** +TensorPtr MindIR_Tensor_Create(); +TensorPtr MindIR_Tensor_Create(const std::string &name, DataType data_type, const std::vector &dims, + Format format, const std::vector &data, + const std::vector &quant_params); +std::string MindIR_Tensor_GetName(ConstTensorPtr tensor); +void MindIR_Tensor_SetName(TensorPtr *tensor, const std::string &name); +DataType MindIR_Tensor_GetDataType(ConstTensorPtr tensor); +void MindIR_Tensor_SetDataType(TensorPtr *tensor, DataType data_type); +std::vector MindIR_Tensor_GetDims(ConstTensorPtr tensor); +void MindIR_Tensor_SetDims(TensorPtr *tensor, const std::vector &dims); +Format MindIR_Tensor_GetFormat(ConstTensorPtr tensor); +void MindIR_Tensor_SetFormat(TensorPtr *tensor, Format format); +OHOS::HDI::Nnrt::V1_0::SharedBuffer MindIR_Tensor_GetData(ConstTensorPtr tensor, + const OHOS::HDI::Nnrt::V1_0::SharedBuffer &buffer, + uint8_t *mmap_ptr, unsigned int offset); +void MindIR_Tensor_SetData(TensorPtr *tensor, const std::vector &data); +std::vector MindIR_Tensor_GetData(ConstTensorPtr tensor); +std::vector MindIR_Tensor_GetQuantParams(ConstTensorPtr tensor); +void MindIR_Tensor_SetQuantParams(TensorPtr *tensor, const std::vector &quant_params); + +void MindIR_Tensor_Destroy(TensorPtr *tensor); + +} // namespace lite +} // namespace mindspore + +#endif // LITE_TENSOR_H diff --git a/mindspore/lite/mindir/include/mindir_types.h b/mindspore/lite/mindir/include/mindir_types.h new file mode 100644 index 00000000..8f2a9c70 --- /dev/null +++ b/mindspore/lite/mindir/include/mindir_types.h @@ -0,0 +1,210 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_TYPES_H +#define MINDSPORE_LITE_TYPES_H +#include +namespace mindspore { +namespace lite { + +struct QuantParam { + int32_t zeroPoint; + double scale; + int numBits; +}; + +enum DataType : int8_t { + DATA_TYPE_UNKNOWN = 0, + DATA_TYPE_BOOL = 30, + DATA_TYPE_INT8 = 32, + DATA_TYPE_INT16 = 33, + DATA_TYPE_INT32 = 34, + DATA_TYPE_INT64 = 35, + DATA_TYPE_UINT8 = 37, + DATA_TYPE_UINT16 = 38, + DATA_TYPE_UINT32 = 39, + DATA_TYPE_UINT64 = 40, + DATA_TYPE_FLOAT16 = 42, + DATA_TYPE_FLOAT32 = 43, + DATA_TYPE_FLOAT64 = 44, +}; + +enum Format : int8_t { + FORMAT_NCHW = 0, + FORMAT_NHWC = 1, +}; + +enum QuantType : int8_t { + QUANT_TYPE_NONE, + QUANT_TYPE_ALL, +}; + +enum NodeType : uint32_t { + NODE_TYPE_NONE = 0, + NODE_TYPE_ACTIVATION = 2, + NODE_TYPE_ADD_FUSION = 5, + NODE_TYPE_ARGMAX_FUSION = 11, + NODE_TYPE_AVG_POOL_FUSION = 17, + NODE_TYPE_BATCH_TO_SPACE_ND = 22, + NODE_TYPE_BIAS_ADD = 23, + NODE_TYPE_CAST = 28, + NODE_TYPE_CONCAT = 31, + NODE_TYPE_CONV2D_FUSION = 35, + NODE_TYPE_CONV2D_TRANSPOSE_FUSION = 36, + NODE_TYPE_DIV_FUSION = 47, + NODE_TYPE_ELTWISE = 52, + NODE_TYPE_EXPAND_DIMS = 56, + NODE_TYPE_FILL = 66, + NODE_TYPE_FULL_CONNECTION = 67, + NODE_TYPE_FUSED_BATCH_NORM = 68, + NODE_TYPE_GATHER = 69, + NODE_TYPE_LAYER_NORM_FUSION = 75, + NODE_TYPE_LESS_EQUAL = 78, + NODE_TYPE_MATMUL_FUSION = 89, + NODE_TYPE_MAXIMUM = 90, + NODE_TYPE_MAX_POOL_FUSION = 92, + NODE_TYPE_MUL_FUSION = 99, + NODE_TYPE_ONE_HOT = 105, + NODE_TYPE_PAD_FUSION = 107, + NODE_TYPE_POW_FUSION = 110, + NODE_TYPE_PRELU_FUSION = 112, + NODE_TYPE_QUANT_DTYPE_CAST = 113, + NODE_TYPE_REDUCE_FUSION = 118, + NODE_TYPE_RESHAPE = 119, + NODE_TYPE_RESIZE = 120, + NODE_TYPE_RSQRT = 126, + NODE_TYPE_SCALE_FUSION = 127, + NODE_TYPE_SHAPE = 130, + NODE_TYPE_SLICE_FUSION = 135, + NODE_TYPE_SOFTMAX = 138, + NODE_TYPE_SPACE_TO_BATCH_ND = 141, + NODE_TYPE_SPLIT = 145, + NODE_TYPE_SQRT = 146, + NODE_TYPE_SQUEEZE = 147, + NODE_TYPE_SQUARED_DIFFERENCE = 149, + NODE_TYPE_STACK = 150, + NODE_TYPE_STRIDED_SLICE = 151, + NODE_TYPE_SUB_FUSION = 152, + NODE_TYPE_TILE_FUSION = 160, + NODE_TYPE_TOPK_FUSION = 161, + NODE_TYPE_TRANSPOSE = 162, + NODE_TYPE_UNSQUEEZE = 165, +}; + +enum ResizeMethod : int8_t { + RESIZE_METHOD_UNKNOWN = -1, + RESIZE_METHOD_LINEAR = 0, + RESIZE_METHOD_NEAREST = 1, + RESIZE_METHOD_CUBIC = 2, +}; + +enum CoordinateTransformMode : int8_t { + COORDINATE_TRANSFORM_MODE_ASYMMETRIC = 0, + COORDINATE_TRANSFORM_MODE_ALIGN_CORNERS = 1, + COORDINATE_TRANSFORM_MODE_HALF_PIXEL = 2, +}; + +enum NearestMode : int8_t { + NEAREST_MODE_NORMAL = 0, + NEAREST_MODE_ROUND_HALF_DOWN = 1, + NEAREST_MODE_ROUND_HALF_UP = 2, + NEAREST_MODE_FLOOR = 3, + NEAREST_MODE_CEIL = 4, +}; + +enum ActivationType : int8_t { + ACTIVATION_TYPE_NO_ACTIVATION = 0, + ACTIVATION_TYPE_RELU = 1, + ACTIVATION_TYPE_SIGMOID = 2, + ACTIVATION_TYPE_RELU6 = 3, + ACTIVATION_TYPE_ELU = 4, + ACTIVATION_TYPE_LEAKY_RELU = 5, + ACTIVATION_TYPE_ABS = 6, + ACTIVATION_TYPE_RELU1 = 7, + ACTIVATION_TYPE_SOFTSIGN = 8, + ACTIVATION_TYPE_SOFTPLUS = 9, + ACTIVATION_TYPE_TANH = 10, + ACTIVATION_TYPE_SELU = 11, + ACTIVATION_TYPE_HSWISH = 12, + ACTIVATION_TYPE_HSIGMOID = 13, + ACTIVATION_TYPE_THRESHOLDRELU = 14, + ACTIVATION_TYPE_LINEAR = 15, + ACTIVATION_TYPE_HARD_TANH = 16, + ACTIVATION_TYPE_SIGN = 17, + ACTIVATION_TYPE_SWISH = 18, + ACTIVATION_TYPE_GELU = 19, + ACTIVATION_TYPE_UNKNOWN = 20, +}; + +enum ReduceMode : int8_t { + REDUCE_MODE_MEAN = 0, + REDUCE_MODE_MAX = 1, + REDUCE_MODE_MIN = 2, + REDUCE_MODE_PROD = 3, + REDUCE_MODE_SUM = 4, + REDUCE_MODE_SUM_SQUARE = 5, + REDUCE_MODE_ASUM = 6, + REDUCE_MODE_ALL = 7, +}; + +enum PoolMode : int8_t { + POOL_MODE_MAX_POOLING = 0, + POOL_MODE_MEAN_POOLING = 1, +}; + +enum EltwiseMode : int8_t { + ELTWISE_MODE_PROD = 0, + ELTWISE_MODE_SUM = 1, + ELTWISE_MODE_MAXIMUM = 2, + ELTWISE_MODE_UNKNOWN = 3, +}; + +enum PadMode : int8_t { + PAD_MODE_PAD = 0, + PAD_MODE_SAME = 1, + PAD_MODE_VALID = 2, +}; + +enum RoundMode : int8_t { + ROUND_MODE_FLOOR = 0, + ROUND_MODE_CEIL = 1, +}; + +enum PaddingMode : int8_t { + PADDING_MODE_CONSTANT = 0, + PADDING_MODE_REFLECT = 1, + PADDING_MODE_SYMMETRIC = 2, + PADDING_MODE_RESERVED = 3, +}; + +enum LshProjectionType : int8_t { + UNKNOWN = 0, + SPARSE = 1, + DENSE = 2, +}; + +enum Reduction : int8_t { + REDUCTION_SUM = 0, + MEAN = 1, + NONE = 2, +}; + +struct Attribute { + std::string name; + uint32_t data; +}; +} // namespace lite +} // namespace mindspore +#endif // MIDIR_LITE_TYPES_H diff --git a/mindspore/lite/mindir/inner_headers/lite_graph.h b/mindspore/lite/mindir/inner_headers/lite_graph.h new file mode 100644 index 00000000..f2599cc9 --- /dev/null +++ b/mindspore/lite/mindir/inner_headers/lite_graph.h @@ -0,0 +1,27 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MIDIR_LITE_LITE_GRAPH_H +#define MIDIR_LITE_LITE_GRAPH_H +#include + +namespace mindspore { +namespace lite { + +std::vector Convert(NodeType type, PrimitivePtr primitive); + +} // namespace lite +} // namespace mindspore +#endif // MIDIR_LITE_LITE_GRAPH_H diff --git a/mindspore/lite/mindir/inner_headers/mindir_memory_manager.h b/mindspore/lite/mindir/inner_headers/mindir_memory_manager.h new file mode 100644 index 00000000..29ef0b31 --- /dev/null +++ b/mindspore/lite/mindir/inner_headers/mindir_memory_manager.h @@ -0,0 +1,33 @@ +#ifndef LITE_MINDIR_MEMORY_MANAGER_H +#define LITE_MINDIR_MEMORY_MANAGER_H +#include +#include +#include +#include +#include "include/errorcode.h" +#include "schema/model_generated.h" +#include "mindir_lite_graph.h" +// using namespace OHOS::HDI::Nnrt::V1_0; + +namespace mindspore { +namespace lite { +class MindIRMemoryManager { + public: + static MindIRMemoryManager *GetInstance(); + ~MindIRMemoryManager() = default; + void *CreateTensorFromBuilder(flatbuffers::FlatBufferBuilder &fbb_new, schema::Tensor *tensor); + void DeleteTensor(schema::Tensor *tensor); + void *CreatePrimitiveFromBuilder(flatbuffers::FlatBufferBuilder &fbb_new, schema::Primitive *primitive); + void DeletePrimitive(schema::Primitive *primitive); + void ClearAllMemory(); + + private: + MindIRMemoryManager() = default; + static void *CopyFbbToNewMemory(flatbuffers::FlatBufferBuilder &fbb_new); + std::map primitive_map; + std::map tensor_map; + std::mutex mutex; +}; +} // namespace lite +} // namespace mindspore +#endif // LITE_MINDIR_MEMORY_MANAGER_H diff --git a/mindspore/lite/mindir/inner_headers/utils.h b/mindspore/lite/mindir/inner_headers/utils.h new file mode 100644 index 00000000..0e6eb35d --- /dev/null +++ b/mindspore/lite/mindir/inner_headers/utils.h @@ -0,0 +1,28 @@ +#ifndef MIDIR_LITE_UTILS_H +#define MIDIR_LITE_UTILS_H +#include "mindir_types.h" +#include "mindir_lite_graph.h" +#include "schema/model_generated.h" +namespace mindspore { +namespace lite { + +// ********** PrimitiveBase ********** +PrimitivePtr MindIR_CreatePrimitiveFromBuilder(flatbuffers::FlatBufferBuilder &fbb); + +flatbuffers::Offset CreateVec2D(flatbuffers::FlatBufferBuilder &fbb, + const std::vector> &data); +flatbuffers::Offset CreateVec2D(flatbuffers::FlatBufferBuilder &fbb, + const mindspore::schema::Vec2D *data); + +mindspore::schema::PrimitiveType MindIR_GetPrimitiveType(PrimitivePtr prim); + +flatbuffers::Offset>> ConvertQuantParams( + flatbuffers::FlatBufferBuilder &fbb, const std::vector &quant_params); + +flatbuffers::Offset>> ConvertQuantParams( + flatbuffers::FlatBufferBuilder &fbb, + const flatbuffers::Vector> *quant_params); + +} // namespace lite +} // namespace mindspore +#endif // MIDIR_LITE_UTILS_H diff --git a/mindspore/lite/mindir/src/mindir.cc b/mindspore/lite/mindir/src/mindir.cc new file mode 100644 index 00000000..c2a1cd3f --- /dev/null +++ b/mindspore/lite/mindir/src/mindir.cc @@ -0,0 +1,4258 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "mindir.h" +#include "utils.h" +#include "schema/model_generated.h" +#include "mindir_memory_manager.h" +//----TODO---write an example to run MindIRMemoryManager +namespace mindspore { +namespace lite { + +// ********** Activation ********** +PrimitivePtr MindIR_Activation_CreatePrimitive(ActivationType activation_type, float alpha, float min_val, + float max_val, bool approximate) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateActivation(fbb, static_cast(activation_type), alpha, min_val, + max_val, approximate); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_ACTIVATION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +ActivationType MindIR_Activation_GetActivationType(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Activation(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->activation_type()); + } else { + ActivationType en = static_cast(0); + return en; + } + } else { + ActivationType en = static_cast(0); + return en; + } +} + +void MindIR_Activation_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Activation(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = + schema::CreateActivation(fbb, static_cast(activation_type), value->alpha(), + value->min_val(), value->max_val(), value->approximate()); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_ACTIVATION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +float MindIR_Activation_GetAlpha(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Activation(); + if (prim != nullptr && value != nullptr) { + return value->alpha(); + } else { + return .0; + } + } else { + return .0; + } +} + +void MindIR_Activation_SetAlpha(PrimitivePtr *primitive, float alpha) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Activation(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateActivation(fbb, static_cast(value->activation_type()), + alpha, value->min_val(), value->max_val(), value->approximate()); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_ACTIVATION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +float MindIR_Activation_GetMinVal(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Activation(); + if (prim != nullptr && value != nullptr) { + return value->min_val(); + } else { + return .0; + } + } else { + return .0; + } +} + +void MindIR_Activation_SetMinVal(PrimitivePtr *primitive, float min_val) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Activation(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateActivation(fbb, static_cast(value->activation_type()), + value->alpha(), min_val, value->max_val(), value->approximate()); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_ACTIVATION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +float MindIR_Activation_GetMaxVal(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Activation(); + if (prim != nullptr && value != nullptr) { + return value->max_val(); + } else { + return .0; + } + } else { + return .0; + } +} + +void MindIR_Activation_SetMaxVal(PrimitivePtr *primitive, float max_val) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Activation(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateActivation(fbb, static_cast(value->activation_type()), + value->alpha(), value->min_val(), max_val, value->approximate()); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_ACTIVATION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +bool MindIR_Activation_GetApproximate(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Activation(); + if (prim != nullptr && value != nullptr) { + return value->approximate(); + } else { + return false; + } + } else { + return false; + } +} + +void MindIR_Activation_SetApproximate(PrimitivePtr *primitive, bool approximate) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Activation(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateActivation(fbb, static_cast(value->activation_type()), + value->alpha(), value->min_val(), value->max_val(), approximate); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_ACTIVATION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** AddFusion ********** +PrimitivePtr MindIR_AddFusion_CreatePrimitive(ActivationType activation_type) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateAddFusion(fbb, static_cast(activation_type)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_ADD_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +ActivationType MindIR_AddFusion_GetActivationType(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_AddFusion(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->activation_type()); + } else { + ActivationType en = static_cast(0); + return en; + } + } else { + ActivationType en = static_cast(0); + return en; + } +} + +void MindIR_AddFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_AddFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateAddFusion(fbb, static_cast(activation_type)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_ADD_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** ArgMaxFusion ********** +PrimitivePtr MindIR_ArgMaxFusion_CreatePrimitive(int64_t axis, int64_t top_k, bool keep_dims, bool out_max_value) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateArgMaxFusion(fbb, axis, top_k, keep_dims, out_max_value); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_ARGMAX_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +int64_t MindIR_ArgMaxFusion_GetAxis(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_ArgMaxFusion(); + if (prim != nullptr && value != nullptr) { + return value->axis(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_ArgMaxFusion_SetAxis(PrimitivePtr *primitive, int64_t axis) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_ArgMaxFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = + schema::CreateArgMaxFusion(fbb, axis, value->top_k(), value->keep_dims(), value->out_max_value()); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_ARGMAX_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +int64_t MindIR_ArgMaxFusion_GetTopK(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_ArgMaxFusion(); + if (prim != nullptr && value != nullptr) { + return value->top_k(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_ArgMaxFusion_SetTopK(PrimitivePtr *primitive, int64_t top_k) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_ArgMaxFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = + schema::CreateArgMaxFusion(fbb, value->axis(), top_k, value->keep_dims(), value->out_max_value()); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_ARGMAX_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +bool MindIR_ArgMaxFusion_GetKeepDims(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_ArgMaxFusion(); + if (prim != nullptr && value != nullptr) { + return value->keep_dims(); + } else { + return false; + } + } else { + return false; + } +} + +void MindIR_ArgMaxFusion_SetKeepDims(PrimitivePtr *primitive, bool keep_dims) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_ArgMaxFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = + schema::CreateArgMaxFusion(fbb, value->axis(), value->top_k(), keep_dims, value->out_max_value()); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_ARGMAX_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +bool MindIR_ArgMaxFusion_GetOutMaxValue(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_ArgMaxFusion(); + if (prim != nullptr && value != nullptr) { + return value->out_max_value(); + } else { + return false; + } + } else { + return false; + } +} + +void MindIR_ArgMaxFusion_SetOutMaxValue(PrimitivePtr *primitive, bool out_max_value) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_ArgMaxFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = + schema::CreateArgMaxFusion(fbb, value->axis(), value->top_k(), value->keep_dims(), out_max_value); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_ARGMAX_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** AvgPoolFusion ********** +PrimitivePtr MindIR_AvgPoolFusion_CreatePrimitive(const std::vector &kernel_size, + const std::vector &strides, const std::vector &pad, + PadMode pad_mode, RoundMode round_mode, Format format, bool global, + ActivationType activation_type) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateAvgPoolFusion( + fbb, fbb.CreateVector(kernel_size.data(), kernel_size.size()), fbb.CreateVector(strides.data(), strides.size()), + fbb.CreateVector(pad.data(), pad.size()), static_cast(pad_mode), + static_cast(round_mode), static_cast(format), global, + static_cast(activation_type)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_AVG_POOL_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +std::vector MindIR_AvgPoolFusion_GetKernelSize(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_AvgPoolFusion(); + if (prim != nullptr && value != nullptr) { + std::vector result; + auto src = value->kernel_size(); + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_AvgPoolFusion_SetKernelSize(PrimitivePtr *primitive, const std::vector &kernel_size) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_AvgPoolFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateAvgPoolFusion( + fbb, fbb.CreateVector(kernel_size.data(), kernel_size.size()), + fbb.CreateVector(value->strides()->data(), value->strides()->size()), + fbb.CreateVector(value->pad()->data(), value->pad()->size()), static_cast(value->pad_mode()), + static_cast(value->round_mode()), static_cast(value->format()), + value->global(), static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_AVG_POOL_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +std::vector MindIR_AvgPoolFusion_GetStrides(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_AvgPoolFusion(); + if (prim != nullptr && value != nullptr) { + std::vector result; + auto src = value->strides(); + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_AvgPoolFusion_SetStrides(PrimitivePtr *primitive, const std::vector &strides) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_AvgPoolFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateAvgPoolFusion( + fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(strides.data(), strides.size()), fbb.CreateVector(value->pad()->data(), value->pad()->size()), + static_cast(value->pad_mode()), static_cast(value->round_mode()), + static_cast(value->format()), value->global(), + static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_AVG_POOL_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +std::vector MindIR_AvgPoolFusion_GetPad(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_AvgPoolFusion(); + if (prim != nullptr && value != nullptr) { + std::vector result; + auto src = value->pad(); + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_AvgPoolFusion_SetPad(PrimitivePtr *primitive, const std::vector &pad) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_AvgPoolFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateAvgPoolFusion( + fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->strides()->data(), value->strides()->size()), fbb.CreateVector(pad.data(), pad.size()), + static_cast(value->pad_mode()), static_cast(value->round_mode()), + static_cast(value->format()), value->global(), + static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_AVG_POOL_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +PadMode MindIR_AvgPoolFusion_GetPadMode(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_AvgPoolFusion(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->pad_mode()); + } else { + PadMode en = static_cast(0); + return en; + } + } else { + PadMode en = static_cast(0); + return en; + } +} + +void MindIR_AvgPoolFusion_SetPadMode(PrimitivePtr *primitive, PadMode pad_mode) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_AvgPoolFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateAvgPoolFusion( + fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->strides()->data(), value->strides()->size()), + fbb.CreateVector(value->pad()->data(), value->pad()->size()), static_cast(pad_mode), + static_cast(value->round_mode()), static_cast(value->format()), + value->global(), static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_AVG_POOL_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +RoundMode MindIR_AvgPoolFusion_GetRoundMode(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_AvgPoolFusion(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->round_mode()); + } else { + RoundMode en = static_cast(0); + return en; + } + } else { + RoundMode en = static_cast(0); + return en; + } +} + +void MindIR_AvgPoolFusion_SetRoundMode(PrimitivePtr *primitive, RoundMode round_mode) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_AvgPoolFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateAvgPoolFusion( + fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->strides()->data(), value->strides()->size()), + fbb.CreateVector(value->pad()->data(), value->pad()->size()), static_cast(value->pad_mode()), + static_cast(round_mode), static_cast(value->format()), value->global(), + static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_AVG_POOL_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +Format MindIR_AvgPoolFusion_GetFormat(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_AvgPoolFusion(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->format()); + } else { + Format en = static_cast(0); + return en; + } + } else { + Format en = static_cast(0); + return en; + } +} + +void MindIR_AvgPoolFusion_SetFormat(PrimitivePtr *primitive, Format format) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_AvgPoolFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateAvgPoolFusion( + fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->strides()->data(), value->strides()->size()), + fbb.CreateVector(value->pad()->data(), value->pad()->size()), static_cast(value->pad_mode()), + static_cast(value->round_mode()), static_cast(format), value->global(), + static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_AVG_POOL_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +bool MindIR_AvgPoolFusion_GetGlobal(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_AvgPoolFusion(); + if (prim != nullptr && value != nullptr) { + return value->global(); + } else { + return false; + } + } else { + return false; + } +} + +void MindIR_AvgPoolFusion_SetGlobal(PrimitivePtr *primitive, bool global) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_AvgPoolFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateAvgPoolFusion( + fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->strides()->data(), value->strides()->size()), + fbb.CreateVector(value->pad()->data(), value->pad()->size()), static_cast(value->pad_mode()), + static_cast(value->round_mode()), static_cast(value->format()), global, + static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_AVG_POOL_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +ActivationType MindIR_AvgPoolFusion_GetActivationType(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_AvgPoolFusion(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->activation_type()); + } else { + ActivationType en = static_cast(0); + return en; + } + } else { + ActivationType en = static_cast(0); + return en; + } +} + +void MindIR_AvgPoolFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_AvgPoolFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateAvgPoolFusion( + fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->strides()->data(), value->strides()->size()), + fbb.CreateVector(value->pad()->data(), value->pad()->size()), static_cast(value->pad_mode()), + static_cast(value->round_mode()), static_cast(value->format()), + value->global(), static_cast(activation_type)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_AVG_POOL_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** BatchToSpaceND ********** +PrimitivePtr MindIR_BatchToSpaceND_CreatePrimitive(const std::vector &block_shape, + const std::vector> &crops) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateBatchToSpaceND(fbb, fbb.CreateVector(block_shape.data(), block_shape.size()), + CreateVec2D(fbb, crops)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_BATCH_TO_SPACE_ND), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +std::vector MindIR_BatchToSpaceND_GetBlockShape(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_BatchToSpaceND(); + if (prim != nullptr && value != nullptr) { + std::vector result; + auto src = value->block_shape(); + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_BatchToSpaceND_SetBlockShape(PrimitivePtr *primitive, const std::vector &block_shape) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_BatchToSpaceND(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateBatchToSpaceND(fbb, fbb.CreateVector(block_shape.data(), block_shape.size()), + CreateVec2D(fbb, value->crops())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_BATCH_TO_SPACE_ND), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +std::vector> MindIR_BatchToSpaceND_GetCrops(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_BatchToSpaceND(); + if (prim != nullptr && value != nullptr) { + std::vector> out; + auto src = value->crops(); + for (auto sub_list : *src->data()) { + std::vector result_tmp; + result_tmp.resize(sub_list->data()->size()); + std::transform(sub_list->data()->begin(), sub_list->data()->end(), result_tmp.begin(), + [](int64_t item) { return item; }); + out.emplace_back(result_tmp); + } + return out; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_BatchToSpaceND_SetCrops(PrimitivePtr *primitive, const std::vector> &crops) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_BatchToSpaceND(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateBatchToSpaceND( + fbb, fbb.CreateVector(value->block_shape()->data(), value->block_shape()->size()), CreateVec2D(fbb, crops)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_BATCH_TO_SPACE_ND), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** BiasAdd ********** +PrimitivePtr MindIR_BiasAdd_CreatePrimitive() { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateBiasAdd(fbb); + auto prim_offset = schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_BIAS_ADD), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} + +// ********** Cast ********** +PrimitivePtr MindIR_Cast_CreatePrimitive() { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateCast(fbb); + auto prim_offset = schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_CAST), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} + +// ********** Concat ********** +PrimitivePtr MindIR_Concat_CreatePrimitive(int64_t axis) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateConcat(fbb, axis); + auto prim_offset = schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_CONCAT), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +int64_t MindIR_Concat_GetAxis(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Concat(); + if (prim != nullptr && value != nullptr) { + return value->axis(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_Concat_SetAxis(PrimitivePtr *primitive, int64_t axis) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Concat(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateConcat(fbb, axis); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_CONCAT), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** Conv2DFusion ********** +PrimitivePtr MindIR_Conv2DFusion_CreatePrimitive(const std::vector &kernel_size, + const std::vector &stride, + const std::vector &dilation, PadMode pad_mode, + const std::vector &pad_list, int64_t group, + int64_t in_channel, int64_t out_channel, + ActivationType activation_type) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateConv2DFusion( + fbb, mindspore::schema::Format_NCHW, fbb.CreateVector(kernel_size.data(), kernel_size.size()), + fbb.CreateVector(stride.data(), stride.size()), fbb.CreateVector(dilation.data(), dilation.size()), + static_cast(pad_mode), fbb.CreateVector(pad_list.data(), pad_list.size()), 0, group, in_channel, + out_channel, static_cast(activation_type)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_CONV2D_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +std::vector MindIR_Conv2DFusion_GetKernelSize(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Conv2DFusion(); + if (prim != nullptr && value != nullptr) { + std::vector result; + auto src = value->kernel_size(); + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_Conv2DFusion_SetKernelSize(PrimitivePtr *primitive, const std::vector &kernel_size) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Conv2DFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateConv2DFusion( + fbb, mindspore::schema::Format_NCHW, fbb.CreateVector(kernel_size.data(), kernel_size.size()), + fbb.CreateVector(value->stride()->data(), value->stride()->size()), + fbb.CreateVector(value->dilation()->data(), value->dilation()->size()), + static_cast(value->pad_mode()), + fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), value->in_channel(), + value->out_channel(), static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_CONV2D_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +std::vector MindIR_Conv2DFusion_GetStride(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Conv2DFusion(); + if (prim != nullptr && value != nullptr) { + std::vector result; + auto src = value->stride(); + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_Conv2DFusion_SetStride(PrimitivePtr *primitive, const std::vector &stride) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Conv2DFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateConv2DFusion( + fbb, mindspore::schema::Format_NCHW, + fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(stride.data(), stride.size()), + fbb.CreateVector(value->dilation()->data(), value->dilation()->size()), + static_cast(value->pad_mode()), + fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), value->in_channel(), + value->out_channel(), static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_CONV2D_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +std::vector MindIR_Conv2DFusion_GetDilation(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Conv2DFusion(); + if (prim != nullptr && value != nullptr) { + std::vector result; + auto src = value->dilation(); + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_Conv2DFusion_SetDilation(PrimitivePtr *primitive, const std::vector &dilation) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Conv2DFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateConv2DFusion( + fbb, mindspore::schema::Format_NCHW, + fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->stride()->data(), value->stride()->size()), + fbb.CreateVector(dilation.data(), dilation.size()), static_cast(value->pad_mode()), + fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), value->in_channel(), + value->out_channel(), static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_CONV2D_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +PadMode MindIR_Conv2DFusion_GetPadMode(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Conv2DFusion(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->pad_mode()); + } else { + PadMode en = static_cast(0); + return en; + } + } else { + PadMode en = static_cast(0); + return en; + } +} + +void MindIR_Conv2DFusion_SetPadMode(PrimitivePtr *primitive, PadMode pad_mode) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Conv2DFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateConv2DFusion( + fbb, mindspore::schema::Format_NCHW, + fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->stride()->data(), value->stride()->size()), + fbb.CreateVector(value->dilation()->data(), value->dilation()->size()), static_cast(pad_mode), + fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), value->in_channel(), + value->out_channel(), static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_CONV2D_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +std::vector MindIR_Conv2DFusion_GetPadList(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Conv2DFusion(); + if (prim != nullptr && value != nullptr) { + std::vector result; + auto src = value->pad_list(); + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_Conv2DFusion_SetPadList(PrimitivePtr *primitive, const std::vector &pad_list) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Conv2DFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateConv2DFusion( + fbb, mindspore::schema::Format_NCHW, + fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->stride()->data(), value->stride()->size()), + fbb.CreateVector(value->dilation()->data(), value->dilation()->size()), + static_cast(value->pad_mode()), fbb.CreateVector(pad_list.data(), pad_list.size()), 0, + value->group(), value->in_channel(), value->out_channel(), + static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_CONV2D_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +int64_t MindIR_Conv2DFusion_GetGroup(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Conv2DFusion(); + if (prim != nullptr && value != nullptr) { + return value->group(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_Conv2DFusion_SetGroup(PrimitivePtr *primitive, int64_t group) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Conv2DFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateConv2DFusion( + fbb, mindspore::schema::Format_NCHW, + fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->stride()->data(), value->stride()->size()), + fbb.CreateVector(value->dilation()->data(), value->dilation()->size()), + static_cast(value->pad_mode()), + fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, group, value->in_channel(), + value->out_channel(), static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_CONV2D_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +int64_t MindIR_Conv2DFusion_GetInChannel(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Conv2DFusion(); + if (prim != nullptr && value != nullptr) { + return value->in_channel(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_Conv2DFusion_SetInChannel(PrimitivePtr *primitive, int64_t in_channel) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Conv2DFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateConv2DFusion( + fbb, mindspore::schema::Format_NCHW, + fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->stride()->data(), value->stride()->size()), + fbb.CreateVector(value->dilation()->data(), value->dilation()->size()), + static_cast(value->pad_mode()), + fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), in_channel, + value->out_channel(), static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_CONV2D_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +int64_t MindIR_Conv2DFusion_GetOutChannel(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Conv2DFusion(); + if (prim != nullptr && value != nullptr) { + return value->out_channel(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_Conv2DFusion_SetOutChannel(PrimitivePtr *primitive, int64_t out_channel) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Conv2DFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateConv2DFusion( + fbb, mindspore::schema::Format_NCHW, + fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->stride()->data(), value->stride()->size()), + fbb.CreateVector(value->dilation()->data(), value->dilation()->size()), + static_cast(value->pad_mode()), + fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), value->in_channel(), + out_channel, static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_CONV2D_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +ActivationType MindIR_Conv2DFusion_GetActivationType(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Conv2DFusion(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->activation_type()); + } else { + ActivationType en = static_cast(0); + return en; + } + } else { + ActivationType en = static_cast(0); + return en; + } +} + +void MindIR_Conv2DFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Conv2DFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateConv2DFusion( + fbb, mindspore::schema::Format_NCHW, + fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->stride()->data(), value->stride()->size()), + fbb.CreateVector(value->dilation()->data(), value->dilation()->size()), + static_cast(value->pad_mode()), + fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), value->in_channel(), + value->out_channel(), static_cast(activation_type)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_CONV2D_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** Conv2dTransposeFusion ********** +PrimitivePtr MindIR_Conv2dTransposeFusion_CreatePrimitive( + const std::vector &kernel_size, const std::vector &stride, const std::vector &dilation, + PadMode pad_mode, const std::vector &pad_list, int64_t group, int64_t in_channel, int64_t out_channel, + ActivationType activation_type, const std::vector &output_paddings) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateConv2dTransposeFusion( + fbb, mindspore::schema::Format_NCHW, fbb.CreateVector(kernel_size.data(), kernel_size.size()), + fbb.CreateVector(stride.data(), stride.size()), fbb.CreateVector(dilation.data(), dilation.size()), + static_cast(pad_mode), 0, fbb.CreateVector(pad_list.data(), pad_list.size()), 0, group, in_channel, + out_channel, static_cast(activation_type), + fbb.CreateVector(output_paddings.data(), output_paddings.size())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_CONV2D_TRANSPOSE_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +std::vector MindIR_Conv2dTransposeFusion_GetKernelSize(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Conv2dTransposeFusion(); + if (prim != nullptr && value != nullptr) { + std::vector result; + auto src = value->kernel_size(); + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_Conv2dTransposeFusion_SetKernelSize(PrimitivePtr *primitive, const std::vector &kernel_size) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Conv2dTransposeFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateConv2dTransposeFusion( + fbb, mindspore::schema::Format_NCHW, fbb.CreateVector(kernel_size.data(), kernel_size.size()), + fbb.CreateVector(value->stride()->data(), value->stride()->size()), + fbb.CreateVector(value->dilation()->data(), value->dilation()->size()), + static_cast(value->pad_mode()), 0, + fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), value->in_channel(), + value->out_channel(), static_cast(value->activation_type()), + fbb.CreateVector(value->output_paddings()->data(), value->output_paddings()->size())); + auto prim_offset = schema::CreatePrimitive( + fbb, static_cast(NODE_TYPE_CONV2D_TRANSPOSE_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +std::vector MindIR_Conv2dTransposeFusion_GetStride(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Conv2dTransposeFusion(); + if (prim != nullptr && value != nullptr) { + std::vector result; + auto src = value->stride(); + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_Conv2dTransposeFusion_SetStride(PrimitivePtr *primitive, const std::vector &stride) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Conv2dTransposeFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateConv2dTransposeFusion( + fbb, mindspore::schema::Format_NCHW, + fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(stride.data(), stride.size()), + fbb.CreateVector(value->dilation()->data(), value->dilation()->size()), + static_cast(value->pad_mode()), 0, + fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), value->in_channel(), + value->out_channel(), static_cast(value->activation_type()), + fbb.CreateVector(value->output_paddings()->data(), value->output_paddings()->size())); + auto prim_offset = schema::CreatePrimitive( + fbb, static_cast(NODE_TYPE_CONV2D_TRANSPOSE_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +std::vector MindIR_Conv2dTransposeFusion_GetDilation(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Conv2dTransposeFusion(); + if (prim != nullptr && value != nullptr) { + std::vector result; + auto src = value->dilation(); + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_Conv2dTransposeFusion_SetDilation(PrimitivePtr *primitive, const std::vector &dilation) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Conv2dTransposeFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateConv2dTransposeFusion( + fbb, mindspore::schema::Format_NCHW, + fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->stride()->data(), value->stride()->size()), + fbb.CreateVector(dilation.data(), dilation.size()), static_cast(value->pad_mode()), 0, + fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), value->in_channel(), + value->out_channel(), static_cast(value->activation_type()), + fbb.CreateVector(value->output_paddings()->data(), value->output_paddings()->size())); + auto prim_offset = schema::CreatePrimitive( + fbb, static_cast(NODE_TYPE_CONV2D_TRANSPOSE_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +PadMode MindIR_Conv2dTransposeFusion_GetPadMode(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Conv2dTransposeFusion(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->pad_mode()); + } else { + PadMode en = static_cast(0); + return en; + } + } else { + PadMode en = static_cast(0); + return en; + } +} + +void MindIR_Conv2dTransposeFusion_SetPadMode(PrimitivePtr *primitive, PadMode pad_mode) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Conv2dTransposeFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateConv2dTransposeFusion( + fbb, mindspore::schema::Format_NCHW, + fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->stride()->data(), value->stride()->size()), + fbb.CreateVector(value->dilation()->data(), value->dilation()->size()), static_cast(pad_mode), + 0, fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), + value->in_channel(), value->out_channel(), static_cast(value->activation_type()), + fbb.CreateVector(value->output_paddings()->data(), value->output_paddings()->size())); + auto prim_offset = schema::CreatePrimitive( + fbb, static_cast(NODE_TYPE_CONV2D_TRANSPOSE_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +std::vector MindIR_Conv2dTransposeFusion_GetPadList(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Conv2dTransposeFusion(); + if (prim != nullptr && value != nullptr) { + std::vector result; + auto src = value->pad_list(); + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_Conv2dTransposeFusion_SetPadList(PrimitivePtr *primitive, const std::vector &pad_list) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Conv2dTransposeFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateConv2dTransposeFusion( + fbb, mindspore::schema::Format_NCHW, + fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->stride()->data(), value->stride()->size()), + fbb.CreateVector(value->dilation()->data(), value->dilation()->size()), + static_cast(value->pad_mode()), 0, fbb.CreateVector(pad_list.data(), pad_list.size()), 0, + value->group(), value->in_channel(), value->out_channel(), + static_cast(value->activation_type()), + fbb.CreateVector(value->output_paddings()->data(), value->output_paddings()->size())); + auto prim_offset = schema::CreatePrimitive( + fbb, static_cast(NODE_TYPE_CONV2D_TRANSPOSE_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +int64_t MindIR_Conv2dTransposeFusion_GetGroup(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Conv2dTransposeFusion(); + if (prim != nullptr && value != nullptr) { + return value->group(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_Conv2dTransposeFusion_SetGroup(PrimitivePtr *primitive, int64_t group) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Conv2dTransposeFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateConv2dTransposeFusion( + fbb, mindspore::schema::Format_NCHW, + fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->stride()->data(), value->stride()->size()), + fbb.CreateVector(value->dilation()->data(), value->dilation()->size()), + static_cast(value->pad_mode()), 0, + fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, group, value->in_channel(), + value->out_channel(), static_cast(value->activation_type()), + fbb.CreateVector(value->output_paddings()->data(), value->output_paddings()->size())); + auto prim_offset = schema::CreatePrimitive( + fbb, static_cast(NODE_TYPE_CONV2D_TRANSPOSE_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +int64_t MindIR_Conv2dTransposeFusion_GetInChannel(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Conv2dTransposeFusion(); + if (prim != nullptr && value != nullptr) { + return value->in_channel(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_Conv2dTransposeFusion_SetInChannel(PrimitivePtr *primitive, int64_t in_channel) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Conv2dTransposeFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateConv2dTransposeFusion( + fbb, mindspore::schema::Format_NCHW, + fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->stride()->data(), value->stride()->size()), + fbb.CreateVector(value->dilation()->data(), value->dilation()->size()), + static_cast(value->pad_mode()), 0, + fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), in_channel, + value->out_channel(), static_cast(value->activation_type()), + fbb.CreateVector(value->output_paddings()->data(), value->output_paddings()->size())); + auto prim_offset = schema::CreatePrimitive( + fbb, static_cast(NODE_TYPE_CONV2D_TRANSPOSE_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +int64_t MindIR_Conv2dTransposeFusion_GetOutChannel(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Conv2dTransposeFusion(); + if (prim != nullptr && value != nullptr) { + return value->out_channel(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_Conv2dTransposeFusion_SetOutChannel(PrimitivePtr *primitive, int64_t out_channel) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Conv2dTransposeFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateConv2dTransposeFusion( + fbb, mindspore::schema::Format_NCHW, + fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->stride()->data(), value->stride()->size()), + fbb.CreateVector(value->dilation()->data(), value->dilation()->size()), + static_cast(value->pad_mode()), 0, + fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), value->in_channel(), + out_channel, static_cast(value->activation_type()), + fbb.CreateVector(value->output_paddings()->data(), value->output_paddings()->size())); + auto prim_offset = schema::CreatePrimitive( + fbb, static_cast(NODE_TYPE_CONV2D_TRANSPOSE_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +ActivationType MindIR_Conv2dTransposeFusion_GetActivationType(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Conv2dTransposeFusion(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->activation_type()); + } else { + ActivationType en = static_cast(0); + return en; + } + } else { + ActivationType en = static_cast(0); + return en; + } +} + +void MindIR_Conv2dTransposeFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Conv2dTransposeFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateConv2dTransposeFusion( + fbb, mindspore::schema::Format_NCHW, + fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->stride()->data(), value->stride()->size()), + fbb.CreateVector(value->dilation()->data(), value->dilation()->size()), + static_cast(value->pad_mode()), 0, + fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), value->in_channel(), + value->out_channel(), static_cast(activation_type), + fbb.CreateVector(value->output_paddings()->data(), value->output_paddings()->size())); + auto prim_offset = schema::CreatePrimitive( + fbb, static_cast(NODE_TYPE_CONV2D_TRANSPOSE_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +std::vector MindIR_Conv2dTransposeFusion_GetOutputPaddings(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Conv2dTransposeFusion(); + if (prim != nullptr && value != nullptr) { + std::vector result; + auto src = value->output_paddings(); + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_Conv2dTransposeFusion_SetOutputPaddings(PrimitivePtr *primitive, + const std::vector &output_paddings) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Conv2dTransposeFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateConv2dTransposeFusion( + fbb, mindspore::schema::Format_NCHW, + fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->stride()->data(), value->stride()->size()), + fbb.CreateVector(value->dilation()->data(), value->dilation()->size()), + static_cast(value->pad_mode()), 0, + fbb.CreateVector(value->pad_list()->data(), value->pad_list()->size()), 0, value->group(), value->in_channel(), + value->out_channel(), static_cast(value->activation_type()), + fbb.CreateVector(output_paddings.data(), output_paddings.size())); + auto prim_offset = schema::CreatePrimitive( + fbb, static_cast(NODE_TYPE_CONV2D_TRANSPOSE_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** DivFusion ********** +PrimitivePtr MindIR_DivFusion_CreatePrimitive(ActivationType activation_type) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateDivFusion(fbb, static_cast(activation_type)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_DIV_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +ActivationType MindIR_DivFusion_GetActivationType(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_DivFusion(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->activation_type()); + } else { + ActivationType en = static_cast(0); + return en; + } + } else { + ActivationType en = static_cast(0); + return en; + } +} + +void MindIR_DivFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_DivFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateDivFusion(fbb, static_cast(activation_type)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_DIV_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** Eltwise ********** +PrimitivePtr MindIR_Eltwise_CreatePrimitive(EltwiseMode mode) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateEltwise(fbb, static_cast(mode)); + auto prim_offset = schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_ELTWISE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +EltwiseMode MindIR_Eltwise_GetMode(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Eltwise(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->mode()); + } else { + EltwiseMode en = static_cast(0); + return en; + } + } else { + EltwiseMode en = static_cast(0); + return en; + } +} + +void MindIR_Eltwise_SetMode(PrimitivePtr *primitive, EltwiseMode mode) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Eltwise(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateEltwise(fbb, static_cast(mode)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_ELTWISE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** ExpandDims ********** +PrimitivePtr MindIR_ExpandDims_CreatePrimitive() { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateExpandDims(fbb); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_EXPAND_DIMS), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} + +// ********** Fill ********** +PrimitivePtr MindIR_Fill_CreatePrimitive() { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateFill(fbb); + auto prim_offset = schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_FILL), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} + +// ********** FullConnection ********** +PrimitivePtr MindIR_FullConnection_CreatePrimitive(bool has_bias, bool use_axis, int64_t axis, + ActivationType activation_type) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = + schema::CreateFullConnection(fbb, has_bias, use_axis, axis, static_cast(activation_type)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_FULL_CONNECTION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +bool MindIR_FullConnection_GetHasBias(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_FullConnection(); + if (prim != nullptr && value != nullptr) { + return value->has_bias(); + } else { + return false; + } + } else { + return false; + } +} + +void MindIR_FullConnection_SetHasBias(PrimitivePtr *primitive, bool has_bias) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_FullConnection(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateFullConnection(fbb, has_bias, value->use_axis(), value->axis(), + static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_FULL_CONNECTION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +bool MindIR_FullConnection_GetUseAxis(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_FullConnection(); + if (prim != nullptr && value != nullptr) { + return value->use_axis(); + } else { + return false; + } + } else { + return false; + } +} + +void MindIR_FullConnection_SetUseAxis(PrimitivePtr *primitive, bool use_axis) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_FullConnection(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateFullConnection(fbb, value->has_bias(), use_axis, value->axis(), + static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_FULL_CONNECTION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +int64_t MindIR_FullConnection_GetAxis(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_FullConnection(); + if (prim != nullptr && value != nullptr) { + return value->axis(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_FullConnection_SetAxis(PrimitivePtr *primitive, int64_t axis) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_FullConnection(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateFullConnection(fbb, value->has_bias(), value->use_axis(), axis, + static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_FULL_CONNECTION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +ActivationType MindIR_FullConnection_GetActivationType(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_FullConnection(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->activation_type()); + } else { + ActivationType en = static_cast(0); + return en; + } + } else { + ActivationType en = static_cast(0); + return en; + } +} + +void MindIR_FullConnection_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_FullConnection(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateFullConnection(fbb, value->has_bias(), value->use_axis(), value->axis(), + static_cast(activation_type)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_FULL_CONNECTION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** FusedBatchNorm ********** +PrimitivePtr MindIR_FusedBatchNorm_CreatePrimitive(float epsilon) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateFusedBatchNorm(fbb, 0.9, 0); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_FUSED_BATCH_NORM), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +float MindIR_FusedBatchNorm_GetEpsilon(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_FusedBatchNorm(); + if (prim != nullptr && value != nullptr) { + return value->epsilon(); + } else { + return .0; + } + } else { + return .0; + } +} + +void MindIR_FusedBatchNorm_SetEpsilon(PrimitivePtr *primitive, float epsilon) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_FusedBatchNorm(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateFusedBatchNorm(fbb, epsilon, 0.9, 0); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_FUSED_BATCH_NORM), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** Gather ********** +PrimitivePtr MindIR_Gather_CreatePrimitive() { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateGather(fbb); + auto prim_offset = schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_GATHER), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} + +// ********** LayerNormFusion ********** +PrimitivePtr MindIR_LayerNormFusion_CreatePrimitive(int64_t begin_norm_axis, float epsilon, bool elementwise_affine, + int64_t begin_params_axis) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateLayerNormFusion(fbb, begin_norm_axis, epsilon, elementwise_affine, begin_params_axis); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_LAYER_NORM_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +int64_t MindIR_LayerNormFusion_GetBeginNormAxis(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_LayerNormFusion(); + if (prim != nullptr && value != nullptr) { + return value->begin_norm_axis(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_LayerNormFusion_SetBeginNormAxis(PrimitivePtr *primitive, int64_t begin_norm_axis) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_LayerNormFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateLayerNormFusion(fbb, begin_norm_axis, value->epsilon(), + value->elementwise_affine(), value->begin_params_axis()); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_LAYER_NORM_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +float MindIR_LayerNormFusion_GetEpsilon(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_LayerNormFusion(); + if (prim != nullptr && value != nullptr) { + return value->epsilon(); + } else { + return .0; + } + } else { + return .0; + } +} + +void MindIR_LayerNormFusion_SetEpsilon(PrimitivePtr *primitive, float epsilon) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_LayerNormFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateLayerNormFusion(fbb, value->begin_norm_axis(), epsilon, + value->elementwise_affine(), value->begin_params_axis()); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_LAYER_NORM_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +bool MindIR_LayerNormFusion_GetElementwiseAffine(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_LayerNormFusion(); + if (prim != nullptr && value != nullptr) { + return value->elementwise_affine(); + } else { + return false; + } + } else { + return false; + } +} + +void MindIR_LayerNormFusion_SetElementwiseAffine(PrimitivePtr *primitive, bool elementwise_affine) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_LayerNormFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateLayerNormFusion(fbb, value->begin_norm_axis(), value->epsilon(), + elementwise_affine, value->begin_params_axis()); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_LAYER_NORM_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +int64_t MindIR_LayerNormFusion_GetBeginParamsAxis(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_LayerNormFusion(); + if (prim != nullptr && value != nullptr) { + return value->begin_params_axis(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_LayerNormFusion_SetBeginParamsAxis(PrimitivePtr *primitive, int64_t begin_params_axis) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_LayerNormFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateLayerNormFusion(fbb, value->begin_norm_axis(), value->epsilon(), + value->elementwise_affine(), begin_params_axis); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_LAYER_NORM_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** LessEqual ********** +PrimitivePtr MindIR_LessEqual_CreatePrimitive() { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateLessEqual(fbb); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_LESS_EQUAL), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} + +// ********** MatMulFusion ********** +PrimitivePtr MindIR_MatMulFusion_CreatePrimitive(bool transpose_a, bool transpose_b, ActivationType activation_type) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = + schema::CreateMatMulFusion(fbb, transpose_a, transpose_b, static_cast(activation_type)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_MATMUL_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +bool MindIR_MatMulFusion_GetTransposeA(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_MatMulFusion(); + if (prim != nullptr && value != nullptr) { + return value->transpose_a(); + } else { + return false; + } + } else { + return false; + } +} + +void MindIR_MatMulFusion_SetTransposeA(PrimitivePtr *primitive, bool transpose_a) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_MatMulFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateMatMulFusion(fbb, transpose_a, value->transpose_b(), + static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_MATMUL_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +bool MindIR_MatMulFusion_GetTransposeB(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_MatMulFusion(); + if (prim != nullptr && value != nullptr) { + return value->transpose_b(); + } else { + return false; + } + } else { + return false; + } +} + +void MindIR_MatMulFusion_SetTransposeB(PrimitivePtr *primitive, bool transpose_b) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_MatMulFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateMatMulFusion(fbb, value->transpose_a(), transpose_b, + static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_MATMUL_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +ActivationType MindIR_MatMulFusion_GetActivationType(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_MatMulFusion(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->activation_type()); + } else { + ActivationType en = static_cast(0); + return en; + } + } else { + ActivationType en = static_cast(0); + return en; + } +} + +void MindIR_MatMulFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_MatMulFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateMatMulFusion(fbb, value->transpose_a(), value->transpose_b(), + static_cast(activation_type)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_MATMUL_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** Maximum ********** +PrimitivePtr MindIR_Maximum_CreatePrimitive() { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateMaximum(fbb); + auto prim_offset = schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_MAXIMUM), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} + +// ********** MaxPoolFusion ********** +PrimitivePtr MindIR_MaxPoolFusion_CreatePrimitive(const std::vector &kernel_size, + const std::vector &strides, const std::vector &pad, + PadMode pad_mode, Format format, bool global, + ActivationType activation_type) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateMaxPoolFusion( + fbb, fbb.CreateVector(kernel_size.data(), kernel_size.size()), fbb.CreateVector(strides.data(), strides.size()), + fbb.CreateVector(pad.data(), pad.size()), static_cast(pad_mode), + mindspore::schema::RoundMode_FLOOR, static_cast(format), global, + static_cast(activation_type)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_MAX_POOL_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +std::vector MindIR_MaxPoolFusion_GetKernelSize(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_MaxPoolFusion(); + if (prim != nullptr && value != nullptr) { + std::vector result; + auto src = value->kernel_size(); + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_MaxPoolFusion_SetKernelSize(PrimitivePtr *primitive, const std::vector &kernel_size) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_MaxPoolFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateMaxPoolFusion( + fbb, fbb.CreateVector(kernel_size.data(), kernel_size.size()), + fbb.CreateVector(value->strides()->data(), value->strides()->size()), + fbb.CreateVector(value->pad()->data(), value->pad()->size()), static_cast(value->pad_mode()), + mindspore::schema::RoundMode_FLOOR, static_cast(value->format()), value->global(), + static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_MAX_POOL_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +std::vector MindIR_MaxPoolFusion_GetStrides(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_MaxPoolFusion(); + if (prim != nullptr && value != nullptr) { + std::vector result; + auto src = value->strides(); + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_MaxPoolFusion_SetStrides(PrimitivePtr *primitive, const std::vector &strides) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_MaxPoolFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateMaxPoolFusion( + fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(strides.data(), strides.size()), fbb.CreateVector(value->pad()->data(), value->pad()->size()), + static_cast(value->pad_mode()), mindspore::schema::RoundMode_FLOOR, + static_cast(value->format()), value->global(), + static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_MAX_POOL_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +std::vector MindIR_MaxPoolFusion_GetPad(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_MaxPoolFusion(); + if (prim != nullptr && value != nullptr) { + std::vector result; + auto src = value->pad(); + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_MaxPoolFusion_SetPad(PrimitivePtr *primitive, const std::vector &pad) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_MaxPoolFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateMaxPoolFusion( + fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->strides()->data(), value->strides()->size()), fbb.CreateVector(pad.data(), pad.size()), + static_cast(value->pad_mode()), mindspore::schema::RoundMode_FLOOR, + static_cast(value->format()), value->global(), + static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_MAX_POOL_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +PadMode MindIR_MaxPoolFusion_GetPadMode(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_MaxPoolFusion(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->pad_mode()); + } else { + PadMode en = static_cast(0); + return en; + } + } else { + PadMode en = static_cast(0); + return en; + } +} + +void MindIR_MaxPoolFusion_SetPadMode(PrimitivePtr *primitive, PadMode pad_mode) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_MaxPoolFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateMaxPoolFusion( + fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->strides()->data(), value->strides()->size()), + fbb.CreateVector(value->pad()->data(), value->pad()->size()), static_cast(pad_mode), + mindspore::schema::RoundMode_FLOOR, static_cast(value->format()), value->global(), + static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_MAX_POOL_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +Format MindIR_MaxPoolFusion_GetFormat(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_MaxPoolFusion(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->format()); + } else { + Format en = static_cast(0); + return en; + } + } else { + Format en = static_cast(0); + return en; + } +} + +void MindIR_MaxPoolFusion_SetFormat(PrimitivePtr *primitive, Format format) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_MaxPoolFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateMaxPoolFusion( + fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->strides()->data(), value->strides()->size()), + fbb.CreateVector(value->pad()->data(), value->pad()->size()), static_cast(value->pad_mode()), + mindspore::schema::RoundMode_FLOOR, static_cast(format), value->global(), + static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_MAX_POOL_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +bool MindIR_MaxPoolFusion_GetGlobal(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_MaxPoolFusion(); + if (prim != nullptr && value != nullptr) { + return value->global(); + } else { + return false; + } + } else { + return false; + } +} + +void MindIR_MaxPoolFusion_SetGlobal(PrimitivePtr *primitive, bool global) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_MaxPoolFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateMaxPoolFusion( + fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->strides()->data(), value->strides()->size()), + fbb.CreateVector(value->pad()->data(), value->pad()->size()), static_cast(value->pad_mode()), + mindspore::schema::RoundMode_FLOOR, static_cast(value->format()), global, + static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_MAX_POOL_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +ActivationType MindIR_MaxPoolFusion_GetActivationType(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_MaxPoolFusion(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->activation_type()); + } else { + ActivationType en = static_cast(0); + return en; + } + } else { + ActivationType en = static_cast(0); + return en; + } +} + +void MindIR_MaxPoolFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_MaxPoolFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateMaxPoolFusion( + fbb, fbb.CreateVector(value->kernel_size()->data(), value->kernel_size()->size()), + fbb.CreateVector(value->strides()->data(), value->strides()->size()), + fbb.CreateVector(value->pad()->data(), value->pad()->size()), static_cast(value->pad_mode()), + mindspore::schema::RoundMode_FLOOR, static_cast(value->format()), value->global(), + static_cast(activation_type)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_MAX_POOL_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** MulFusion ********** +PrimitivePtr MindIR_MulFusion_CreatePrimitive(ActivationType activation_type) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateMulFusion(fbb, static_cast(activation_type)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_MUL_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +ActivationType MindIR_MulFusion_GetActivationType(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_MulFusion(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->activation_type()); + } else { + ActivationType en = static_cast(0); + return en; + } + } else { + ActivationType en = static_cast(0); + return en; + } +} + +void MindIR_MulFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_MulFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateMulFusion(fbb, static_cast(activation_type)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_MUL_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** OneHot ********** +PrimitivePtr MindIR_OneHot_CreatePrimitive(int64_t axis) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateOneHot(fbb, axis); + auto prim_offset = schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_ONE_HOT), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +int64_t MindIR_OneHot_GetAxis(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_OneHot(); + if (prim != nullptr && value != nullptr) { + return value->axis(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_OneHot_SetAxis(PrimitivePtr *primitive, int64_t axis) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_OneHot(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateOneHot(fbb, axis); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_ONE_HOT), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** PadFusion ********** +PrimitivePtr MindIR_PadFusion_CreatePrimitive(const std::vector> &paddings, + PaddingMode padding_mode, float constant_value) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreatePadFusion(fbb, CreateVec2D(fbb, paddings), + static_cast(padding_mode), constant_value); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_PAD_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +std::vector> MindIR_PadFusion_GetPaddings(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_PadFusion(); + if (prim != nullptr && value != nullptr) { + std::vector> out; + auto src = value->paddings(); + for (auto sub_list : *src->data()) { + std::vector result_tmp; + result_tmp.resize(sub_list->data()->size()); + std::transform(sub_list->data()->begin(), sub_list->data()->end(), result_tmp.begin(), + [](int64_t item) { return item; }); + out.emplace_back(result_tmp); + } + return out; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_PadFusion_SetPaddings(PrimitivePtr *primitive, const std::vector> &paddings) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_PadFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = + schema::CreatePadFusion(fbb, CreateVec2D(fbb, paddings), + static_cast(value->padding_mode()), value->constant_value()); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_PAD_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +PaddingMode MindIR_PadFusion_GetPaddingMode(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_PadFusion(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->padding_mode()); + } else { + PaddingMode en = static_cast(0); + return en; + } + } else { + PaddingMode en = static_cast(0); + return en; + } +} + +void MindIR_PadFusion_SetPaddingMode(PrimitivePtr *primitive, PaddingMode padding_mode) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_PadFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = + schema::CreatePadFusion(fbb, CreateVec2D(fbb, value->paddings()), + static_cast(padding_mode), value->constant_value()); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_PAD_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +float MindIR_PadFusion_GetConstantValue(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_PadFusion(); + if (prim != nullptr && value != nullptr) { + return value->constant_value(); + } else { + return .0; + } + } else { + return .0; + } +} + +void MindIR_PadFusion_SetConstantValue(PrimitivePtr *primitive, float constant_value) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_PadFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = + schema::CreatePadFusion(fbb, CreateVec2D(fbb, value->paddings()), + static_cast(value->padding_mode()), constant_value); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_PAD_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** PowFusion ********** +PrimitivePtr MindIR_PowFusion_CreatePrimitive(float scale, float shift) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreatePowFusion(fbb, scale, shift); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_POW_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +float MindIR_PowFusion_GetScale(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_PowFusion(); + if (prim != nullptr && value != nullptr) { + return value->scale(); + } else { + return .0; + } + } else { + return .0; + } +} + +void MindIR_PowFusion_SetScale(PrimitivePtr *primitive, float scale) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_PowFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreatePowFusion(fbb, scale, value->shift()); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_POW_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +float MindIR_PowFusion_GetShift(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_PowFusion(); + if (prim != nullptr && value != nullptr) { + return value->shift(); + } else { + return .0; + } + } else { + return .0; + } +} + +void MindIR_PowFusion_SetShift(PrimitivePtr *primitive, float shift) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_PowFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreatePowFusion(fbb, value->scale(), shift); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_POW_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** PReLUFusion ********** +PrimitivePtr MindIR_PReLUFusion_CreatePrimitive(bool channel_shared) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreatePReLUFusion(fbb, channel_shared); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_PRELU_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +bool MindIR_PReLUFusion_GetChannelShared(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_PReLUFusion(); + if (prim != nullptr && value != nullptr) { + return value->channel_shared(); + } else { + return false; + } + } else { + return false; + } +} + +void MindIR_PReLUFusion_SetChannelShared(PrimitivePtr *primitive, bool channel_shared) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_PReLUFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreatePReLUFusion(fbb, channel_shared); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_PRELU_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** QuantDTypeCast ********** +PrimitivePtr MindIR_QuantDTypeCast_CreatePrimitive(int64_t src_t, int64_t dst_t) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateQuantDTypeCast(fbb, src_t, dst_t); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_QUANT_DTYPE_CAST), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +int64_t MindIR_QuantDTypeCast_GetSrcT(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_QuantDTypeCast(); + if (prim != nullptr && value != nullptr) { + return value->src_t(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_QuantDTypeCast_SetSrcT(PrimitivePtr *primitive, int64_t src_t) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_QuantDTypeCast(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateQuantDTypeCast(fbb, src_t, value->dst_t()); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_QUANT_DTYPE_CAST), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +int64_t MindIR_QuantDTypeCast_GetDstT(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_QuantDTypeCast(); + if (prim != nullptr && value != nullptr) { + return value->dst_t(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_QuantDTypeCast_SetDstT(PrimitivePtr *primitive, int64_t dst_t) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_QuantDTypeCast(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateQuantDTypeCast(fbb, value->src_t(), dst_t); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_QUANT_DTYPE_CAST), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** ReduceFusion ********** +PrimitivePtr MindIR_ReduceFusion_CreatePrimitive(bool keep_dims, ReduceMode mode, bool reduce_to_end, float coeff) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = + schema::CreateReduceFusion(fbb, keep_dims, static_cast(mode), reduce_to_end, coeff); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_REDUCE_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +bool MindIR_ReduceFusion_GetKeepDims(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_ReduceFusion(); + if (prim != nullptr && value != nullptr) { + return value->keep_dims(); + } else { + return false; + } + } else { + return false; + } +} + +void MindIR_ReduceFusion_SetKeepDims(PrimitivePtr *primitive, bool keep_dims) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_ReduceFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateReduceFusion(fbb, keep_dims, static_cast(value->mode()), + value->reduce_to_end(), value->coeff()); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_REDUCE_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +ReduceMode MindIR_ReduceFusion_GetMode(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_ReduceFusion(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->mode()); + } else { + ReduceMode en = static_cast(0); + return en; + } + } else { + ReduceMode en = static_cast(0); + return en; + } +} + +void MindIR_ReduceFusion_SetMode(PrimitivePtr *primitive, ReduceMode mode) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_ReduceFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateReduceFusion(fbb, value->keep_dims(), static_cast(mode), + value->reduce_to_end(), value->coeff()); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_REDUCE_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +bool MindIR_ReduceFusion_GetReduceToEnd(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_ReduceFusion(); + if (prim != nullptr && value != nullptr) { + return value->reduce_to_end(); + } else { + return false; + } + } else { + return false; + } +} + +void MindIR_ReduceFusion_SetReduceToEnd(PrimitivePtr *primitive, bool reduce_to_end) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_ReduceFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateReduceFusion( + fbb, value->keep_dims(), static_cast(value->mode()), reduce_to_end, value->coeff()); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_REDUCE_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +float MindIR_ReduceFusion_GetCoeff(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_ReduceFusion(); + if (prim != nullptr && value != nullptr) { + return value->coeff(); + } else { + return .0; + } + } else { + return .0; + } +} + +void MindIR_ReduceFusion_SetCoeff(PrimitivePtr *primitive, float coeff) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_ReduceFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateReduceFusion( + fbb, value->keep_dims(), static_cast(value->mode()), value->reduce_to_end(), coeff); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_REDUCE_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** Reshape ********** +PrimitivePtr MindIR_Reshape_CreatePrimitive() { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateReshape(fbb); + auto prim_offset = schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_RESHAPE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} + +// ********** Resize ********** +PrimitivePtr MindIR_Resize_CreatePrimitive(ResizeMethod method, int64_t new_height, int64_t new_width, + bool preserve_aspect_ratio, + CoordinateTransformMode coordinate_transform_mode, float cubic_coeff, + int64_t exclude_outside, float extrapolation_value, + NearestMode nearest_mode) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateResize( + fbb, mindspore::schema::Format_NCHW, static_cast(method), new_height, new_width, + preserve_aspect_ratio, static_cast(coordinate_transform_mode), cubic_coeff, + exclude_outside, extrapolation_value, static_cast(nearest_mode)); + auto prim_offset = schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_RESIZE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +ResizeMethod MindIR_Resize_GetMethod(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Resize(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->method()); + } else { + ResizeMethod en = static_cast(0); + return en; + } + } else { + ResizeMethod en = static_cast(0); + return en; + } +} + +void MindIR_Resize_SetMethod(PrimitivePtr *primitive, ResizeMethod method) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Resize(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = + schema::CreateResize(fbb, mindspore::schema::Format_NCHW, static_cast(method), + value->new_height(), value->new_width(), value->preserve_aspect_ratio(), + static_cast(value->coordinate_transform_mode()), + value->cubic_coeff(), value->exclude_outside(), value->extrapolation_value(), + static_cast(value->nearest_mode())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_RESIZE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +int64_t MindIR_Resize_GetNewHeight(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Resize(); + if (prim != nullptr && value != nullptr) { + return value->new_height(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_Resize_SetNewHeight(PrimitivePtr *primitive, int64_t new_height) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Resize(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = + schema::CreateResize(fbb, mindspore::schema::Format_NCHW, static_cast(value->method()), + new_height, value->new_width(), value->preserve_aspect_ratio(), + static_cast(value->coordinate_transform_mode()), + value->cubic_coeff(), value->exclude_outside(), value->extrapolation_value(), + static_cast(value->nearest_mode())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_RESIZE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +int64_t MindIR_Resize_GetNewWidth(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Resize(); + if (prim != nullptr && value != nullptr) { + return value->new_width(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_Resize_SetNewWidth(PrimitivePtr *primitive, int64_t new_width) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Resize(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = + schema::CreateResize(fbb, mindspore::schema::Format_NCHW, static_cast(value->method()), + value->new_height(), new_width, value->preserve_aspect_ratio(), + static_cast(value->coordinate_transform_mode()), + value->cubic_coeff(), value->exclude_outside(), value->extrapolation_value(), + static_cast(value->nearest_mode())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_RESIZE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +bool MindIR_Resize_GetPreserveAspectRatio(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Resize(); + if (prim != nullptr && value != nullptr) { + return value->preserve_aspect_ratio(); + } else { + return false; + } + } else { + return false; + } +} + +void MindIR_Resize_SetPreserveAspectRatio(PrimitivePtr *primitive, bool preserve_aspect_ratio) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Resize(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = + schema::CreateResize(fbb, mindspore::schema::Format_NCHW, static_cast(value->method()), + value->new_height(), value->new_width(), preserve_aspect_ratio, + static_cast(value->coordinate_transform_mode()), + value->cubic_coeff(), value->exclude_outside(), value->extrapolation_value(), + static_cast(value->nearest_mode())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_RESIZE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +CoordinateTransformMode MindIR_Resize_GetCoordinateTransformMode(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Resize(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->coordinate_transform_mode()); + } else { + CoordinateTransformMode en = static_cast(0); + return en; + } + } else { + CoordinateTransformMode en = static_cast(0); + return en; + } +} + +void MindIR_Resize_SetCoordinateTransformMode(PrimitivePtr *primitive, + CoordinateTransformMode coordinate_transform_mode) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Resize(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = + schema::CreateResize(fbb, mindspore::schema::Format_NCHW, static_cast(value->method()), + value->new_height(), value->new_width(), value->preserve_aspect_ratio(), + static_cast(coordinate_transform_mode), + value->cubic_coeff(), value->exclude_outside(), value->extrapolation_value(), + static_cast(value->nearest_mode())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_RESIZE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +float MindIR_Resize_GetCubicCoeff(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Resize(); + if (prim != nullptr && value != nullptr) { + return value->cubic_coeff(); + } else { + return .0; + } + } else { + return .0; + } +} + +void MindIR_Resize_SetCubicCoeff(PrimitivePtr *primitive, float cubic_coeff) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Resize(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = + schema::CreateResize(fbb, mindspore::schema::Format_NCHW, static_cast(value->method()), + value->new_height(), value->new_width(), value->preserve_aspect_ratio(), + static_cast(value->coordinate_transform_mode()), + cubic_coeff, value->exclude_outside(), value->extrapolation_value(), + static_cast(value->nearest_mode())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_RESIZE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +int64_t MindIR_Resize_GetExcludeOutside(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Resize(); + if (prim != nullptr && value != nullptr) { + return value->exclude_outside(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_Resize_SetExcludeOutside(PrimitivePtr *primitive, int64_t exclude_outside) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Resize(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateResize( + fbb, mindspore::schema::Format_NCHW, static_cast(value->method()), value->new_height(), + value->new_width(), value->preserve_aspect_ratio(), + static_cast(value->coordinate_transform_mode()), value->cubic_coeff(), + exclude_outside, value->extrapolation_value(), static_cast(value->nearest_mode())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_RESIZE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +float MindIR_Resize_GetExtrapolationValue(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Resize(); + if (prim != nullptr && value != nullptr) { + return value->extrapolation_value(); + } else { + return .0; + } + } else { + return .0; + } +} + +void MindIR_Resize_SetExtrapolationValue(PrimitivePtr *primitive, float extrapolation_value) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Resize(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateResize( + fbb, mindspore::schema::Format_NCHW, static_cast(value->method()), value->new_height(), + value->new_width(), value->preserve_aspect_ratio(), + static_cast(value->coordinate_transform_mode()), value->cubic_coeff(), + value->exclude_outside(), extrapolation_value, static_cast(value->nearest_mode())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_RESIZE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +NearestMode MindIR_Resize_GetNearestMode(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Resize(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->nearest_mode()); + } else { + NearestMode en = static_cast(0); + return en; + } + } else { + NearestMode en = static_cast(0); + return en; + } +} + +void MindIR_Resize_SetNearestMode(PrimitivePtr *primitive, NearestMode nearest_mode) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Resize(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateResize( + fbb, mindspore::schema::Format_NCHW, static_cast(value->method()), value->new_height(), + value->new_width(), value->preserve_aspect_ratio(), + static_cast(value->coordinate_transform_mode()), value->cubic_coeff(), + value->exclude_outside(), value->extrapolation_value(), static_cast(nearest_mode)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_RESIZE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** Rsqrt ********** +PrimitivePtr MindIR_Rsqrt_CreatePrimitive() { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateRsqrt(fbb); + auto prim_offset = schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_RSQRT), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} + +// ********** ScaleFusion ********** +PrimitivePtr MindIR_ScaleFusion_CreatePrimitive(int64_t axis, ActivationType activation_type) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateScaleFusion(fbb, axis, static_cast(activation_type)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_SCALE_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +int64_t MindIR_ScaleFusion_GetAxis(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_ScaleFusion(); + if (prim != nullptr && value != nullptr) { + return value->axis(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_ScaleFusion_SetAxis(PrimitivePtr *primitive, int64_t axis) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_ScaleFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = + schema::CreateScaleFusion(fbb, axis, static_cast(value->activation_type())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_SCALE_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +ActivationType MindIR_ScaleFusion_GetActivationType(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_ScaleFusion(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->activation_type()); + } else { + ActivationType en = static_cast(0); + return en; + } + } else { + ActivationType en = static_cast(0); + return en; + } +} + +void MindIR_ScaleFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_ScaleFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = + schema::CreateScaleFusion(fbb, value->axis(), static_cast(activation_type)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_SCALE_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** Shape ********** +PrimitivePtr MindIR_Shape_CreatePrimitive() { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateShape(fbb); + auto prim_offset = schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_SHAPE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} + +// ********** SliceFusion ********** +PrimitivePtr MindIR_SliceFusion_CreatePrimitive(const std::vector &axes) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateSliceFusion(fbb, fbb.CreateVector(axes.data(), axes.size())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_SLICE_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +std::vector MindIR_SliceFusion_GetAxes(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_SliceFusion(); + if (prim != nullptr && value != nullptr) { + std::vector result; + auto src = value->axes(); + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_SliceFusion_SetAxes(PrimitivePtr *primitive, const std::vector &axes) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_SliceFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateSliceFusion(fbb, fbb.CreateVector(axes.data(), axes.size())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_SLICE_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** Softmax ********** +PrimitivePtr MindIR_Softmax_CreatePrimitive(const std::vector &axis) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateSoftmax(fbb, fbb.CreateVector(axis.data(), axis.size())); + auto prim_offset = schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_SOFTMAX), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +std::vector MindIR_Softmax_GetAxis(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Softmax(); + if (prim != nullptr && value != nullptr) { + std::vector result; + auto src = value->axis(); + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_Softmax_SetAxis(PrimitivePtr *primitive, const std::vector &axis) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Softmax(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateSoftmax(fbb, fbb.CreateVector(axis.data(), axis.size())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_SOFTMAX), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** SpaceToBatchND ********** +PrimitivePtr MindIR_SpaceToBatchND_CreatePrimitive(const std::vector &block_shape, + const std::vector> &paddings) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateSpaceToBatchND(fbb, fbb.CreateVector(block_shape.data(), block_shape.size()), + CreateVec2D(fbb, paddings)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_SPACE_TO_BATCH_ND), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +std::vector MindIR_SpaceToBatchND_GetBlockShape(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_SpaceToBatchND(); + if (prim != nullptr && value != nullptr) { + std::vector result; + auto src = value->block_shape(); + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_SpaceToBatchND_SetBlockShape(PrimitivePtr *primitive, const std::vector &block_shape) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_SpaceToBatchND(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateSpaceToBatchND(fbb, fbb.CreateVector(block_shape.data(), block_shape.size()), + CreateVec2D(fbb, value->paddings())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_SPACE_TO_BATCH_ND), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +std::vector> MindIR_SpaceToBatchND_GetPaddings(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_SpaceToBatchND(); + if (prim != nullptr && value != nullptr) { + std::vector> out; + auto src = value->paddings(); + for (auto sub_list : *src->data()) { + std::vector result_tmp; + result_tmp.resize(sub_list->data()->size()); + std::transform(sub_list->data()->begin(), sub_list->data()->end(), result_tmp.begin(), + [](int64_t item) { return item; }); + out.emplace_back(result_tmp); + } + return out; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_SpaceToBatchND_SetPaddings(PrimitivePtr *primitive, const std::vector> &paddings) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_SpaceToBatchND(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateSpaceToBatchND( + fbb, fbb.CreateVector(value->block_shape()->data(), value->block_shape()->size()), CreateVec2D(fbb, paddings)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_SPACE_TO_BATCH_ND), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** Split ********** +PrimitivePtr MindIR_Split_CreatePrimitive(int64_t output_num, const std::vector &size_splits, int64_t axis) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = + schema::CreateSplit(fbb, output_num, fbb.CreateVector(size_splits.data(), size_splits.size()), axis); + auto prim_offset = schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_SPLIT), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +int64_t MindIR_Split_GetOutputNum(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Split(); + if (prim != nullptr && value != nullptr) { + return value->output_num(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_Split_SetOutputNum(PrimitivePtr *primitive, int64_t output_num) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Split(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateSplit( + fbb, output_num, fbb.CreateVector(value->size_splits()->data(), value->size_splits()->size()), value->axis()); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_SPLIT), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +std::vector MindIR_Split_GetSizeSplits(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Split(); + if (prim != nullptr && value != nullptr) { + std::vector result; + auto src = value->size_splits(); + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_Split_SetSizeSplits(PrimitivePtr *primitive, const std::vector &size_splits) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Split(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateSplit(fbb, value->output_num(), + fbb.CreateVector(size_splits.data(), size_splits.size()), value->axis()); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_SPLIT), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +int64_t MindIR_Split_GetAxis(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Split(); + if (prim != nullptr && value != nullptr) { + return value->axis(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_Split_SetAxis(PrimitivePtr *primitive, int64_t axis) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Split(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateSplit( + fbb, value->output_num(), fbb.CreateVector(value->size_splits()->data(), value->size_splits()->size()), axis); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_SPLIT), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** Sqrt ********** +PrimitivePtr MindIR_Sqrt_CreatePrimitive() { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateSqrt(fbb); + auto prim_offset = schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_SQRT), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} + +// ********** SquaredDifference ********** +PrimitivePtr MindIR_SquaredDifference_CreatePrimitive() { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateSquaredDifference(fbb); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_SQUARED_DIFFERENCE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} + +// ********** Squeeze ********** +PrimitivePtr MindIR_Squeeze_CreatePrimitive(const std::vector &axis) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateSqueeze(fbb, fbb.CreateVector(axis.data(), axis.size())); + auto prim_offset = schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_SQUEEZE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +std::vector MindIR_Squeeze_GetAxis(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Squeeze(); + if (prim != nullptr && value != nullptr) { + std::vector result; + auto src = value->axis(); + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_Squeeze_SetAxis(PrimitivePtr *primitive, const std::vector &axis) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Squeeze(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateSqueeze(fbb, fbb.CreateVector(axis.data(), axis.size())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_SQUEEZE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** Stack ********** +PrimitivePtr MindIR_Stack_CreatePrimitive(int64_t axis) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateStack(fbb, axis); + auto prim_offset = schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_STACK), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +int64_t MindIR_Stack_GetAxis(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Stack(); + if (prim != nullptr && value != nullptr) { + return value->axis(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_Stack_SetAxis(PrimitivePtr *primitive, int64_t axis) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Stack(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateStack(fbb, axis); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_STACK), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** StridedSlice ********** +PrimitivePtr MindIR_StridedSlice_CreatePrimitive(int64_t begin_mask, int64_t end_mask, int64_t ellipsis_mask, + int64_t new_axis_mask, int64_t shrink_axis_mask) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = + schema::CreateStridedSlice(fbb, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_STRIDED_SLICE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +int64_t MindIR_StridedSlice_GetBeginMask(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_StridedSlice(); + if (prim != nullptr && value != nullptr) { + return value->begin_mask(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_StridedSlice_SetBeginMask(PrimitivePtr *primitive, int64_t begin_mask) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_StridedSlice(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateStridedSlice(fbb, begin_mask, value->end_mask(), value->ellipsis_mask(), + value->new_axis_mask(), value->shrink_axis_mask()); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_STRIDED_SLICE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +int64_t MindIR_StridedSlice_GetEndMask(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_StridedSlice(); + if (prim != nullptr && value != nullptr) { + return value->end_mask(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_StridedSlice_SetEndMask(PrimitivePtr *primitive, int64_t end_mask) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_StridedSlice(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateStridedSlice(fbb, value->begin_mask(), end_mask, value->ellipsis_mask(), + value->new_axis_mask(), value->shrink_axis_mask()); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_STRIDED_SLICE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +int64_t MindIR_StridedSlice_GetEllipsisMask(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_StridedSlice(); + if (prim != nullptr && value != nullptr) { + return value->ellipsis_mask(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_StridedSlice_SetEllipsisMask(PrimitivePtr *primitive, int64_t ellipsis_mask) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_StridedSlice(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateStridedSlice(fbb, value->begin_mask(), value->end_mask(), ellipsis_mask, + value->new_axis_mask(), value->shrink_axis_mask()); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_STRIDED_SLICE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +int64_t MindIR_StridedSlice_GetNewAxisMask(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_StridedSlice(); + if (prim != nullptr && value != nullptr) { + return value->new_axis_mask(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_StridedSlice_SetNewAxisMask(PrimitivePtr *primitive, int64_t new_axis_mask) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_StridedSlice(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateStridedSlice(fbb, value->begin_mask(), value->end_mask(), value->ellipsis_mask(), + new_axis_mask, value->shrink_axis_mask()); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_STRIDED_SLICE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +int64_t MindIR_StridedSlice_GetShrinkAxisMask(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_StridedSlice(); + if (prim != nullptr && value != nullptr) { + return value->shrink_axis_mask(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_StridedSlice_SetShrinkAxisMask(PrimitivePtr *primitive, int64_t shrink_axis_mask) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_StridedSlice(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateStridedSlice(fbb, value->begin_mask(), value->end_mask(), value->ellipsis_mask(), + value->new_axis_mask(), shrink_axis_mask); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_STRIDED_SLICE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** SubFusion ********** +PrimitivePtr MindIR_SubFusion_CreatePrimitive(ActivationType activation_type) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateSubFusion(fbb, static_cast(activation_type)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_SUB_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +ActivationType MindIR_SubFusion_GetActivationType(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_SubFusion(); + if (prim != nullptr && value != nullptr) { + return static_cast(value->activation_type()); + } else { + ActivationType en = static_cast(0); + return en; + } + } else { + ActivationType en = static_cast(0); + return en; + } +} + +void MindIR_SubFusion_SetActivationType(PrimitivePtr *primitive, ActivationType activation_type) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_SubFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateSubFusion(fbb, static_cast(activation_type)); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_SUB_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** TileFusion ********** +PrimitivePtr MindIR_TileFusion_CreatePrimitive(const std::vector &dims) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateTileFusion(fbb, fbb.CreateVector(dims.data(), dims.size())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_TILE_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +std::vector MindIR_TileFusion_GetDims(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_TileFusion(); + if (prim != nullptr && value != nullptr) { + std::vector result; + auto src = value->dims(); + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_TileFusion_SetDims(PrimitivePtr *primitive, const std::vector &dims) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_TileFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateTileFusion(fbb, fbb.CreateVector(dims.data(), dims.size())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_TILE_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** TopKFusion ********** +PrimitivePtr MindIR_TopKFusion_CreatePrimitive(bool sorted, int64_t axis) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateTopKFusion(fbb, sorted, axis); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_TOPK_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +bool MindIR_TopKFusion_GetSorted(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_TopKFusion(); + if (prim != nullptr && value != nullptr) { + return value->sorted(); + } else { + return false; + } + } else { + return false; + } +} + +void MindIR_TopKFusion_SetSorted(PrimitivePtr *primitive, bool sorted) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_TopKFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateTopKFusion(fbb, sorted, value->axis()); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_TOPK_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} +int64_t MindIR_TopKFusion_GetAxis(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_TopKFusion(); + if (prim != nullptr && value != nullptr) { + return value->axis(); + } else { + return 0; + } + } else { + return 0; + } +} + +void MindIR_TopKFusion_SetAxis(PrimitivePtr *primitive, int64_t axis) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_TopKFusion(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateTopKFusion(fbb, value->sorted(), axis); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_TOPK_FUSION), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +// ********** Transpose ********** +PrimitivePtr MindIR_Transpose_CreatePrimitive() { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateTranspose(fbb); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_TRANSPOSE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} + +// ********** Unsqueeze ********** +PrimitivePtr MindIR_Unsqueeze_CreatePrimitive(const std::vector &axis) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateUnsqueeze(fbb, fbb.CreateVector(axis.data(), axis.size())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_UNSQUEEZE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} +std::vector MindIR_Unsqueeze_GetAxis(ConstPrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Unsqueeze(); + if (prim != nullptr && value != nullptr) { + std::vector result; + auto src = value->axis(); + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](int64_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_Unsqueeze_SetAxis(PrimitivePtr *primitive, const std::vector &axis) { + if (primitive != nullptr && *primitive != nullptr) { + auto prim = static_cast(*primitive); + auto value = prim->value_as_Unsqueeze(); + if (prim != nullptr && value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + auto ops_offset = schema::CreateUnsqueeze(fbb, fbb.CreateVector(axis.data(), axis.size())); + auto prim_offset = + schema::CreatePrimitive(fbb, static_cast(NODE_TYPE_UNSQUEEZE), ops_offset.o); + fbb.Finish(prim_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreatePrimitiveFromBuilder(fbb, prim); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *primitive = ret_value; + } + } +} + +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/mindir/src/mindir_memory_manager.cc b/mindspore/lite/mindir/src/mindir_memory_manager.cc new file mode 100644 index 00000000..c775fa70 --- /dev/null +++ b/mindspore/lite/mindir/src/mindir_memory_manager.cc @@ -0,0 +1,122 @@ +#include "mindir_memory_manager.h" +#include "src/common/log.h" +#include "utils.h" +#include +namespace mindspore { +namespace lite { +namespace { +template +void ClearMap(std::map &map) { + for (auto iter = map.begin(); iter != map.end();) { + if (iter->second != nullptr) { + free(iter->second); + map.erase(iter++); + } else { + iter++; + } + } +} +} // namespace +MindIRMemoryManager *MindIRMemoryManager::GetInstance() { + static MindIRMemoryManager instance; + return &instance; +} + +void *MindIRMemoryManager::CopyFbbToNewMemory(flatbuffers::FlatBufferBuilder &fbb) { + auto buff = reinterpret_cast(malloc(fbb.GetSize())); + if (buff == nullptr) { + MS_LOG(ERROR) << "malloc memory for primitive failed!"; + fbb.Clear(); + return nullptr; + } + memcpy(buff, fbb.GetBufferPointer(), fbb.GetSize()); + fbb.Clear(); + return buff; +} +void *MindIRMemoryManager::CreateTensorFromBuilder(flatbuffers::FlatBufferBuilder &fbb_new, schema::Tensor *tensor) { + std::lock_guard lck(mutex); + if (tensor != nullptr) { + // find primitive exist + if (tensor_map.find(tensor) != tensor_map.end()) { + // if find, then delete + void *flatbuffer_ptr = tensor_map[tensor]; + if (flatbuffer_ptr != nullptr) { + free(flatbuffer_ptr); + tensor_map[tensor] = nullptr; + tensor_map.erase(tensor_map.find(tensor)); + } + } + } + // then copy fbb + auto new_memory_ptr = CopyFbbToNewMemory(fbb_new); + auto tensor_root = flatbuffers::GetMutableRoot(new_memory_ptr); + tensor_map[tensor_root] = new_memory_ptr; + return new_memory_ptr; +} + +void *MindIRMemoryManager::CreatePrimitiveFromBuilder(flatbuffers::FlatBufferBuilder &fbb_new, + schema::Primitive *primitive) { + std::lock_guard lck(mutex); + if (primitive != nullptr) { + // find primitive exist + if (primitive_map.find(primitive) != primitive_map.end()) { + // if find, then delete + void *flatbuffer_ptr = primitive_map[primitive]; + if (flatbuffer_ptr != nullptr) { + free(flatbuffer_ptr); + primitive_map[primitive] = nullptr; + primitive_map.erase(primitive_map.find(primitive)); + } + } + } + // then copy fbb + auto new_memory_ptr = CopyFbbToNewMemory(fbb_new); + auto primitive_root = flatbuffers::GetMutableRoot(new_memory_ptr); + primitive_map[primitive_root] = new_memory_ptr; + return new_memory_ptr; +} + +void MindIRMemoryManager::DeletePrimitive(schema::Primitive *primitive) { + std::lock_guard lck(mutex); + if (primitive == nullptr) { + MS_LOG(ERROR) << "primitive is nullptr, no need to delete."; + return; + } + if (primitive_map.find(primitive) != primitive_map.end()) { + // if find, then delete + void *flatbuffer_ptr = primitive_map[primitive]; + if (flatbuffer_ptr != nullptr) { + free(flatbuffer_ptr); + primitive_map[primitive] = nullptr; + primitive_map.erase(primitive_map.find(primitive)); + } + } +} + +void MindIRMemoryManager::DeleteTensor(schema::Tensor *tensor) { + std::lock_guard lck(mutex); + if (tensor == nullptr) { + MS_LOG(ERROR) << "tensor is nullptr, no need to delete."; + return; + } + if (tensor != nullptr) { + // find primitive exist + if (tensor_map.find(tensor) != tensor_map.end()) { + // if find, then delete + void *flatbuffer_ptr = tensor_map[tensor]; + if (flatbuffer_ptr != nullptr) { + free(flatbuffer_ptr); + tensor_map[tensor] = nullptr; + tensor_map.erase(tensor_map.find(tensor)); + } + } + } +} + +void MindIRMemoryManager::ClearAllMemory() { + std::lock_guard lck(mutex); + ClearMap(primitive_map); + ClearMap(tensor_map); +} +} // namespace lite +} // namespace mindspore \ No newline at end of file diff --git a/mindspore/lite/mindir/src/mindir_nnrt_lite_graph.cc b/mindspore/lite/mindir/src/mindir_nnrt_lite_graph.cc new file mode 100644 index 00000000..a914fa6b --- /dev/null +++ b/mindspore/lite/mindir/src/mindir_nnrt_lite_graph.cc @@ -0,0 +1,87 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "mindir_lite_graph.h" +#include "mindir_tensor.h" +#include "mindir_primitive.h" +#include "src/common/log.h" +#include "schema/model_generated.h" +#include "mindir_memory_manager.h" +#include +namespace mindspore { +namespace lite { +void MindIR_LiteGraph_Destroy(LiteGraph **lite_graph) { + if (lite_graph != nullptr && *lite_graph != nullptr) { + MS_LOG(INFO) << "start to destroy LiteGraph."; + auto graph = *lite_graph; + graph->name_.clear(); + graph->input_indices_.clear(); + graph->output_indices_.clear(); + MS_LOG(INFO) << "Destroying nodes."; + // node + for (size_t idx = 0; idx < graph->all_nodes_.size(); idx++) { + if (graph->all_nodes_[idx] != nullptr) { + MindIRMemoryManager::GetInstance()->DeletePrimitive( + static_cast(graph->all_nodes_[idx]->primitive_)); + delete graph->all_nodes_[idx]; + } + } + MS_LOG(INFO) << "Destroying subgraphs."; + // subgraph + for (size_t idx = 0; idx < graph->sub_graphs_.size(); idx++) { + if (graph->sub_graphs_[idx] != nullptr) { + delete graph->sub_graphs_[idx]; + } + } + MS_LOG(INFO) << "Destroying tensors."; + // tensor + for (size_t idx = 0; idx < graph->all_tensors_.size(); idx++) { + if (graph->all_tensors_[idx] != nullptr) { + MindIRMemoryManager::GetInstance()->DeleteTensor(static_cast(graph->all_tensors_[idx])); + } + } + // graph + delete graph; + *lite_graph = nullptr; + } else { + MS_LOG(ERROR) << "nnrt_lite_graph is nullptr, can not delete."; + } +} + +size_t MindIR_LiteGraph_GetConstTensorSize(const LiteGraph *lite_graph) { + if (lite_graph != nullptr) { + size_t size = 0; + for (auto tensor : lite_graph->all_tensors_) { + if (tensor != nullptr) { + auto value = static_cast(tensor); + if (value != nullptr) { + auto src = value->data(); + if (src == nullptr) { + continue; + } + size += src->size(); + } + } + } + MS_LOG(DEBUG) << "lite_graph has " << lite_graph->all_tensors_.size() << "tensors ,const tensor size = " << size; + return size; + } else { + MS_LOG(ERROR) << "lite_graph is nullptr"; + return 0; + } +} + +} // namespace lite +} // namespace mindspore \ No newline at end of file diff --git a/mindspore/lite/mindir/src/mindir_nnrt_lite_graph_to_model.cc b/mindspore/lite/mindir/src/mindir_nnrt_lite_graph_to_model.cc new file mode 100644 index 00000000..dd9202e2 --- /dev/null +++ b/mindspore/lite/mindir/src/mindir_nnrt_lite_graph_to_model.cc @@ -0,0 +1,1496 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "mindir.h" +#include +#include +#include +#include "src/common/log.h" +#include "lite_graph.h" +#include "schema/model_generated.h" +#include "mindir_types.h" +#include "message_parcel.h" +#include "nnrt/v1_0/nnrt_types.h" +#include "nnrt/v1_0/node_attr_types.h" +#include "nnrt/v1_0/model_types.h" + +using namespace OHOS::HDI::Nnrt::V1_0; +namespace mindspore { +namespace lite { + +constexpr size_t kNumTwo = 2; +constexpr size_t kNumFour = 4; +constexpr size_t kNumEight = 8; + +inline std::vector MindIR_Tensor_GetQuantParams_OHOS(TensorPtr tensor) { + if (tensor != nullptr) { + auto value = static_cast(tensor); + + if (value != nullptr) { + std::vector result; + auto src = value->quantParams(); + if (src == nullptr) { + return {}; + } + size_t size = src->size(); + result.reserve(src->size()); + for (size_t i = 0; i < size; i++) { + auto tmp = src->Get(i); + OHOS::HDI::Nnrt::V1_0::QuantParam quantParam{tmp->numBits(), tmp->zeroPoint(), tmp->scale()}; + result.emplace_back(quantParam); + } + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_Model_Destroy(OHOS::HDI::Nnrt::V1_0::Model **model) { + if (model != nullptr) { + auto model_data = *model; + if (model_data != nullptr) { + delete (model_data); + *model = nullptr; + } else { + MS_LOG(ERROR) << "*model is nullptr, desrtoy model fail."; + } + } +} + +OHOS::HDI::Nnrt::V1_0::Model *MindIR_LiteGraph_To_Model(const LiteGraph *lite_graph, const SharedBuffer &buffer) { + if (lite_graph != nullptr) { + MS_LOG(INFO) << "MindIR_LiteGraph_To_Model begin"; + if (!lite_graph->name_.empty()) { + MS_LOG(INFO) << "Start converting lite graph,name =" << lite_graph->name_; + } else { + MS_LOG(INFO) << "Start converting lite graph, but lite graph has no name."; + } + std::vector inputIndex; + std::vector outputIndex; + std::vector nodes; + std::vector allTensors; + std::vector subGraph; + // nodes + MS_LOG(INFO) << "Start converting nodes, vector size = " << lite_graph->all_nodes_.size(); + nodes.reserve(lite_graph->all_nodes_.size()); + for (auto node : lite_graph->all_nodes_) { + if (node == nullptr) { + MS_LOG(ERROR) << "node is nullptr, convert fail."; + return nullptr; + } + OHOS::HDI::Nnrt::V1_0::Node tmp; + tmp.name = node->name_; + if (node->primitive_ == nullptr) { + MS_LOG(ERROR) << "node primitive is nullptr, convert fail."; + return nullptr; + } + auto prim = static_cast(node->primitive_); + auto value = prim->value_type(); + tmp.nodeType = static_cast(value); + tmp.nodeAttr = Convert(static_cast(value), node->primitive_); + tmp.inputIndex = node->input_indices_; + tmp.outputIndex = node->output_indices_; + tmp.quantType = static_cast(node->quant_type_); + nodes.emplace_back(tmp); + } + + MS_LOG(INFO) << "Start converting Tensor,Tensor size=" << lite_graph->all_tensors_.size(); + // Tensor + allTensors.reserve(lite_graph->all_tensors_.size()); + unsigned int tensor_buffer_offset = 0; + uint8_t *mmap_ptr = nullptr; + if (buffer.fd != -1) { + mmap_ptr = + static_cast(mmap(nullptr, buffer.bufferSize, PROT_READ | PROT_WRITE, MAP_SHARED, buffer.fd, 0)); + if (mmap_ptr == MAP_FAILED) { + MS_LOG(ERROR) << "mmap failed"; + return nullptr; + } + } + MS_LOG(INFO) << "Start parsing tensor, mmap buffer size = " << buffer.bufferSize; + for (auto tensor : lite_graph->all_tensors_) { + OHOS::HDI::Nnrt::V1_0::Tensor tmp; + tmp.name = MindIR_Tensor_GetName(tensor); + tmp.dataType = static_cast(MindIR_Tensor_GetDataType(tensor)); + tmp.dims = MindIR_Tensor_GetDims(tensor); + tmp.format = static_cast(MindIR_Tensor_GetFormat(tensor)); + tmp.data = MindIR_Tensor_GetData(tensor, buffer, mmap_ptr, tensor_buffer_offset); + tmp.quantParams = MindIR_Tensor_GetQuantParams_OHOS(tensor); + allTensors.emplace_back(tmp); + tensor_buffer_offset = tmp.data.offset + tmp.data.dataSize; + } + MS_LOG(INFO) << ("Parsing tensor finish."); + if (buffer.fd != -1) { + auto munmap_res = munmap(mmap_ptr, buffer.bufferSize); + if (munmap_res != 0) { + MS_LOG(ERROR) << "unmap failed."; + return nullptr; + } + } + + MS_LOG(INFO) << "Start converting SubGraph,SubGraph size=" << lite_graph->sub_graphs_.size(); + // SubGraph + subGraph.reserve(lite_graph->sub_graphs_.size()); + for (auto graph : lite_graph->sub_graphs_) { + OHOS::HDI::Nnrt::V1_0::SubGraph tmp; + tmp.name = graph->name_; + tmp.inputIndices = std::vector(graph->input_indices_); + tmp.outputIndices = std::vector(graph->output_indices_); + tmp.nodeIndices = std::vector(graph->node_indices_); + subGraph.emplace_back(tmp); + } + + MS_LOG(INFO) << "Start copying model"; + auto *ret_model = new (std::nothrow) Model(); + if (ret_model == nullptr) { + MS_LOG(ERROR) << "new Model failed."; + return nullptr; + } + ret_model->name = lite_graph->name_; + ret_model->inputIndex = lite_graph->input_indices_; + ret_model->outputIndex = lite_graph->output_indices_; + ret_model->nodes = nodes; + ret_model->allTensors = allTensors; + ret_model->subGraph = subGraph; + MS_LOG(INFO) << "MindIR_LiteGraph_To_Model success"; + return ret_model; + } else { + MS_LOG(ERROR) << "lite graph is nullptr"; + return nullptr; + } +} + +std::vector ConvertActivation(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Activation(); + if (value != nullptr) { + Activation activation{}; + activation.activationType = + static_cast(value->activation_type()); + activation.alpha = value->alpha(); + activation.minVal = value->min_val(); + activation.maxVal = value->max_val(); + activation.approximate = value->approximate(); + OHOS::MessageParcel data; + (void)ActivationBlockMarshalling(data, activation); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertAddFusion(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_AddFusion(); + if (value != nullptr) { + AddFusion add_fusion{}; + add_fusion.activationType = static_cast(value->activation_type()); + OHOS::MessageParcel data; + (void)AddFusionBlockMarshalling(data, add_fusion); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertArgMaxFusion(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_ArgMaxFusion(); + if (value != nullptr) { + ArgMaxFusion arg_max_fusion{}; + arg_max_fusion.axis = value->axis(); + arg_max_fusion.topK = value->top_k(); + arg_max_fusion.keepDims = value->keep_dims(); + arg_max_fusion.outMaxValue = value->out_max_value(); + OHOS::MessageParcel data; + (void)ArgMaxFusionBlockMarshalling(data, arg_max_fusion); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertAvgPoolFusion(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_AvgPoolFusion(); + if (value != nullptr) { + AvgPoolFusion avg_pool_fusion{}; + std::vector kernel_size; + kernel_size.reserve(kNumTwo); + if (value->kernel_size() == nullptr || value->kernel_size()->size() < kNumTwo) { + kernel_size = {}; + } else { + kernel_size = std::vector(value->kernel_size()->begin(), value->kernel_size()->end()); + } + std::vector strides; + strides.reserve(kNumTwo); + if (value->strides() == nullptr || value->strides()->size() < kNumTwo) { + strides = {}; + } else { + strides = std::vector(value->strides()->begin(), value->strides()->end()); + } + std::vector padList; + strides.reserve(kNumTwo); + if (value->pad() == nullptr || value->pad()->size() < kNumFour) { + padList = {}; + } else { + padList = std::vector(value->pad()->begin(), value->pad()->end()); + } + avg_pool_fusion.kernelSize = kernel_size; + avg_pool_fusion.strides = strides; + avg_pool_fusion.pad = padList; + avg_pool_fusion.padMode = static_cast(value->pad_mode()); + avg_pool_fusion.roundMode = static_cast(value->round_mode()); + avg_pool_fusion.format = static_cast(value->format()); + avg_pool_fusion.global = value->global(); + avg_pool_fusion.activationType = static_cast(value->activation_type()); + OHOS::MessageParcel data; + (void)AvgPoolFusionBlockMarshalling(data, avg_pool_fusion); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertBatchToSpaceND(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_BatchToSpaceND(); + if (value != nullptr) { + BatchToSpaceND batch_to_space_n_d{}; + std::vector blockShape; + blockShape.reserve(kNumTwo); + if (value->block_shape() == nullptr || value->block_shape()->size() < kNumTwo) { + blockShape = {0, 0}; + } else { + blockShape = std::vector(value->block_shape()->begin(), value->block_shape()->end()); + } + batch_to_space_n_d.blockShape = blockShape; + auto crops = value->crops(); + std::vector> crops_vec2d; + if (crops->data() == nullptr) { + MS_LOG(ERROR) << "crops_data is nullptr"; + crops_vec2d = {{}}; + } else { + crops_vec2d.reserve(crops->data()->size()); + for (size_t i = 0; i < crops->data()->size(); i++) { + auto vet = crops->data()->Get(i); + crops_vec2d.emplace_back(std::vector(vet->data()->begin(), vet->data()->end())); + } + } + batch_to_space_n_d.crops = crops_vec2d; + OHOS::MessageParcel data; + (void)BatchToSpaceNDBlockMarshalling(data, batch_to_space_n_d); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertBiasAdd(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_BiasAdd(); + if (value != nullptr) { + BiasAdd bias_add{}; + OHOS::MessageParcel data; + (void)BiasAddBlockMarshalling(data, bias_add); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertCast(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Cast(); + if (value != nullptr) { + Cast cast{}; + OHOS::MessageParcel data; + (void)CastBlockMarshalling(data, cast); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertConcat(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Concat(); + if (value != nullptr) { + Concat concat{}; + concat.axis = value->axis(); + OHOS::MessageParcel data; + (void)ConcatBlockMarshalling(data, concat); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertConv2DFusion(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Conv2DFusion(); + if (value != nullptr) { + Conv2DFusion conv2_d_fusion{}; + std::vector kernel_size; + kernel_size.reserve(kNumTwo); + if (value->kernel_size() == nullptr || value->kernel_size()->size() < kNumTwo) { + kernel_size = {}; + } else { + kernel_size = std::vector(value->kernel_size()->begin(), value->kernel_size()->end()); + } + std::vector strides; + strides.reserve(kNumTwo); + if (value->stride() == nullptr || value->stride()->size() < kNumTwo) { + strides = {}; + } else { + strides = std::vector(value->stride()->begin(), value->stride()->end()); + } + std::vector dilation; + dilation.reserve(kNumTwo); + if (value->dilation() == nullptr || value->dilation()->size() < kNumTwo) { + dilation = {}; + } else { + dilation = std::vector(value->dilation()->begin(), value->dilation()->end()); + } + std::vector padList; + strides.reserve(kNumTwo); + if (value->pad_list() == nullptr || value->pad_list()->size() < kNumFour) { + padList = {}; + } else { + padList = std::vector(value->pad_list()->begin(), value->pad_list()->end()); + } + conv2_d_fusion.kernelSize = kernel_size; + conv2_d_fusion.stride = strides; + conv2_d_fusion.dilation = dilation; + conv2_d_fusion.padMode = static_cast(value->pad_mode()); + conv2_d_fusion.padList = padList; + conv2_d_fusion.group = value->group(); + conv2_d_fusion.inChannel = value->in_channel(); + conv2_d_fusion.outChannel = value->out_channel(); + conv2_d_fusion.activationType = static_cast(value->activation_type()); + OHOS::MessageParcel data; + (void)Conv2DFusionBlockMarshalling(data, conv2_d_fusion); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertConv2dTransposeFusion(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Conv2dTransposeFusion(); + if (value != nullptr) { + Conv2dTransposeFusion conv2d_transpose_fusion{}; + std::vector kernel_size; + kernel_size.reserve(kNumTwo); + if (value->kernel_size() == nullptr || value->kernel_size()->size() < kNumTwo) { + kernel_size = {}; + } else { + kernel_size = std::vector(value->kernel_size()->begin(), value->kernel_size()->end()); + } + std::vector strides; + strides.reserve(kNumTwo); + if (value->stride() == nullptr || value->stride()->size() < kNumTwo) { + strides = {}; + } else { + strides = std::vector(value->stride()->begin(), value->stride()->end()); + } + std::vector dilation; + dilation.reserve(kNumTwo); + if (value->dilation() == nullptr || value->dilation()->size() < kNumTwo) { + dilation = {}; + } else { + dilation = std::vector(value->dilation()->begin(), value->dilation()->end()); + } + std::vector padList; + strides.reserve(kNumTwo); + if (value->pad_list() == nullptr || value->pad_list()->size() < kNumFour) { + padList = {}; + } else { + padList = std::vector(value->pad_list()->begin(), value->pad_list()->end()); + } + std::vector output_paddings; + output_paddings.reserve(kNumTwo); + if (value->output_paddings() == nullptr || value->output_paddings()->size() < kNumTwo) { + output_paddings = {}; + } else { + output_paddings = std::vector(value->output_paddings()->begin(), value->output_paddings()->end()); + } + conv2d_transpose_fusion.kernelSize = kernel_size; + conv2d_transpose_fusion.stride = strides; + conv2d_transpose_fusion.dilation = dilation; + conv2d_transpose_fusion.padMode = static_cast(value->pad_mode()); + conv2d_transpose_fusion.padList = padList; + conv2d_transpose_fusion.group = value->group(); + conv2d_transpose_fusion.inChannel = value->in_channel(); + conv2d_transpose_fusion.outChannel = value->out_channel(); + conv2d_transpose_fusion.activationType = static_cast(value->activation_type()); + conv2d_transpose_fusion.outputPaddings = output_paddings; + OHOS::MessageParcel data; + (void)Conv2dTransposeFusionBlockMarshalling(data, conv2d_transpose_fusion); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertDivFusion(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_DivFusion(); + if (value != nullptr) { + DivFusion div_fusion{}; + div_fusion.activationType = static_cast(value->activation_type()); + OHOS::MessageParcel data; + (void)DivFusionBlockMarshalling(data, div_fusion); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertEltwise(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Eltwise(); + if (value != nullptr) { + Eltwise eltwise{}; + eltwise.mode = static_cast(value->mode()); + OHOS::MessageParcel data; + (void)EltwiseBlockMarshalling(data, eltwise); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertExpandDims(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_ExpandDims(); + if (value != nullptr) { + ExpandDims expand_dims{}; + OHOS::MessageParcel data; + (void)ExpandDimsBlockMarshalling(data, expand_dims); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertFill(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Fill(); + if (value != nullptr) { + Fill fill{}; + OHOS::MessageParcel data; + (void)FillBlockMarshalling(data, fill); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertFullConnection(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_FullConnection(); + if (value != nullptr) { + FullConnection full_connection{}; + full_connection.hasBias = value->has_bias(); + full_connection.useAxis = value->use_axis(); + full_connection.axis = value->axis(); + full_connection.activationType = static_cast(value->activation_type()); + OHOS::MessageParcel data; + (void)FullConnectionBlockMarshalling(data, full_connection); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertFusedBatchNorm(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_FusedBatchNorm(); + if (value != nullptr) { + FusedBatchNorm fused_batch_norm{}; + fused_batch_norm.epsilon = value->epsilon(); + OHOS::MessageParcel data; + (void)FusedBatchNormBlockMarshalling(data, fused_batch_norm); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertGather(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Gather(); + if (value != nullptr) { + Gather gather{}; + OHOS::MessageParcel data; + (void)GatherBlockMarshalling(data, gather); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertLayerNormFusion(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_LayerNormFusion(); + if (value != nullptr) { + LayerNormFusion layer_norm_fusion{}; + layer_norm_fusion.beginNormAxis = value->begin_norm_axis(); + layer_norm_fusion.epsilon = value->epsilon(); + layer_norm_fusion.elementwiseAffine = value->elementwise_affine(); + layer_norm_fusion.beginParamsAxis = value->begin_params_axis(); + OHOS::MessageParcel data; + (void)LayerNormFusionBlockMarshalling(data, layer_norm_fusion); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertLessEqual(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_LessEqual(); + if (value != nullptr) { + LessEqual less_equal{}; + OHOS::MessageParcel data; + (void)LessEqualBlockMarshalling(data, less_equal); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertMatMulFusion(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_MatMulFusion(); + if (value != nullptr) { + MatMulFusion mat_mul_fusion{}; + mat_mul_fusion.transposeA = value->transpose_a(); + mat_mul_fusion.transposeB = value->transpose_b(); + mat_mul_fusion.activationType = static_cast(value->activation_type()); + OHOS::MessageParcel data; + (void)MatMulFusionBlockMarshalling(data, mat_mul_fusion); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertMaximum(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Maximum(); + if (value != nullptr) { + Maximum maximum{}; + OHOS::MessageParcel data; + (void)MaximumBlockMarshalling(data, maximum); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertMaxPoolFusion(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_MaxPoolFusion(); + if (value != nullptr) { + MaxPoolFusion max_pool_fusion{}; + std::vector kernel_size; + kernel_size.reserve(kNumTwo); + if (value->kernel_size() == nullptr || value->kernel_size()->size() < kNumTwo) { + kernel_size = {}; + } else { + kernel_size = std::vector(value->kernel_size()->begin(), value->kernel_size()->end()); + } + std::vector strides; + strides.reserve(kNumTwo); + if (value->strides() == nullptr || value->strides()->size() < kNumTwo) { + strides = {}; + } else { + strides = std::vector(value->strides()->begin(), value->strides()->end()); + } + std::vector padList; + padList.reserve(kNumFour); + if (value->pad() == nullptr || value->pad()->size() < kNumFour) { + padList = {}; + } else { + padList = std::vector(value->pad()->begin(), value->pad()->end()); + } + max_pool_fusion.kernelSize = kernel_size; + max_pool_fusion.strides = strides; + max_pool_fusion.pad = padList; + max_pool_fusion.padMode = static_cast(value->pad_mode()); + max_pool_fusion.format = static_cast(value->format()); + max_pool_fusion.global = value->global(); + max_pool_fusion.activationType = static_cast(value->activation_type()); + OHOS::MessageParcel data; + (void)MaxPoolFusionBlockMarshalling(data, max_pool_fusion); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertMulFusion(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_MulFusion(); + if (value != nullptr) { + MulFusion mul_fusion{}; + mul_fusion.activationType = static_cast(value->activation_type()); + OHOS::MessageParcel data; + (void)MulFusionBlockMarshalling(data, mul_fusion); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertOneHot(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_OneHot(); + if (value != nullptr) { + OneHot one_hot{}; + one_hot.axis = value->axis(); + OHOS::MessageParcel data; + (void)OneHotBlockMarshalling(data, one_hot); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertPadFusion(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_PadFusion(); + if (value != nullptr) { + PadFusion pad_fusion{}; + auto paddings = value->paddings(); + std::vector> paddings_vec2d; + if (paddings == nullptr || paddings->data()->size() < kNumTwo) { + paddings_vec2d = {{0}, {0}, {0}, {0}}; + } else { + paddings_vec2d.reserve(paddings->data()->size()); + for (size_t i = 0; i < paddings->data()->size(); i++) { + auto vet = paddings->data()->Get(i); + paddings_vec2d.emplace_back(std::vector(vet->data()->begin(), vet->data()->end())); + } + } + pad_fusion.paddings = paddings_vec2d; + pad_fusion.paddingMode = static_cast(value->padding_mode()); + pad_fusion.constantValue = value->constant_value(); + OHOS::MessageParcel data; + (void)PadFusionBlockMarshalling(data, pad_fusion); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertPowFusion(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_PowFusion(); + if (value != nullptr) { + PowFusion pow_fusion{}; + pow_fusion.scale = value->scale(); + pow_fusion.shift = value->shift(); + OHOS::MessageParcel data; + (void)PowFusionBlockMarshalling(data, pow_fusion); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertPReLUFusion(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_PReLUFusion(); + if (value != nullptr) { + PReLUFusion p_re_l_u_fusion{}; + p_re_l_u_fusion.channelShared = value->channel_shared(); + OHOS::MessageParcel data; + (void)PReLUFusionBlockMarshalling(data, p_re_l_u_fusion); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertQuantDTypeCast(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_QuantDTypeCast(); + if (value != nullptr) { + QuantDTypeCast quant_d_type_cast{}; + quant_d_type_cast.srcT = value->src_t(); + quant_d_type_cast.dstT = value->dst_t(); + OHOS::MessageParcel data; + (void)QuantDTypeCastBlockMarshalling(data, quant_d_type_cast); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertReduceFusion(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_ReduceFusion(); + if (value != nullptr) { + ReduceFusion reduce_fusion{}; + reduce_fusion.keepDims = value->keep_dims(); + reduce_fusion.mode = static_cast(value->mode()); + reduce_fusion.reduceToEnd = value->reduce_to_end(); + reduce_fusion.coeff = value->coeff(); + OHOS::MessageParcel data; + (void)ReduceFusionBlockMarshalling(data, reduce_fusion); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertReshape(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Reshape(); + if (value != nullptr) { + Reshape reshape{}; + OHOS::MessageParcel data; + (void)ReshapeBlockMarshalling(data, reshape); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} + +std::vector ConvertResize(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Resize(); + if (value != nullptr) { + Resize resize{}; + resize.method = static_cast(value->method()); + resize.newHeight = value->new_height(); + resize.newWidth = value->new_width(); + resize.preserveAspectRatio = value->preserve_aspect_ratio(); + resize.coordinateTransformMode = + static_cast(value->coordinate_transform_mode()); + resize.cubicCoeff = value->cubic_coeff(); + resize.excludeOutside = value->exclude_outside(); + resize.extrapolationValue = value->extrapolation_value(); + resize.nearestMode = static_cast(value->nearest_mode()); + OHOS::MessageParcel data; + (void)ResizeBlockMarshalling(data, resize); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertRsqrt(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Rsqrt(); + if (value != nullptr) { + Rsqrt rsqrt{}; + OHOS::MessageParcel data; + (void)RsqrtBlockMarshalling(data, rsqrt); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertScaleFusion(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_ScaleFusion(); + if (value != nullptr) { + ScaleFusion scale_fusion{}; + scale_fusion.axis = value->axis(); + scale_fusion.activationType = static_cast(value->activation_type()); + OHOS::MessageParcel data; + (void)ScaleFusionBlockMarshalling(data, scale_fusion); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertShape(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Shape(); + if (value != nullptr) { + Shape shape{}; + OHOS::MessageParcel data; + (void)ShapeBlockMarshalling(data, shape); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertSliceFusion(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_SliceFusion(); + if (value != nullptr) { + SliceFusion slice_fusion{}; + std::vector axes; + if (value->axes() == nullptr) { + axes = {1, 2, 3, 4, 5, 6, 7}; + } else { + axes = std::vector(value->axes()->begin(), value->axes()->end()); + } + slice_fusion.axes = axes; + OHOS::MessageParcel data; + (void)SliceFusionBlockMarshalling(data, slice_fusion); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertSoftmax(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Softmax(); + if (value != nullptr) { + Softmax softmax{}; + std::vector axis; + if (value->axis() == nullptr) { + axis = {}; + } else { + axis = std::vector(value->axis()->begin(), value->axis()->end()); + } + softmax.axis = axis; + OHOS::MessageParcel data; + (void)SoftmaxBlockMarshalling(data, softmax); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertSpaceToBatchND(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_SpaceToBatchND(); + if (value != nullptr) { + SpaceToBatchND space_to_batch_n_d{}; + std::vector blockShape; + blockShape.reserve(kNumTwo); + if (value->block_shape() == nullptr || value->block_shape()->size() < kNumTwo) { + blockShape = {0, 0}; + } else { + blockShape = std::vector(value->block_shape()->begin(), value->block_shape()->end()); + } + space_to_batch_n_d.blockShape = blockShape; + auto paddings = value->paddings(); + std::vector> paddings_vec2d; + if (paddings == nullptr || paddings->data()->size() == 0 || *(paddings->data()->begin()) == nullptr || + (*(paddings->data()->begin()))->data() == nullptr) { + paddings_vec2d = {}; + } else { + paddings_vec2d.reserve(paddings->data()->size()); + for (size_t i = 0; i < paddings->data()->size(); i++) { + auto vet = paddings->data()->Get(i); + paddings_vec2d.emplace_back(std::vector(vet->data()->begin(), vet->data()->end())); + } + } + space_to_batch_n_d.paddings = paddings_vec2d; + OHOS::MessageParcel data; + (void)SpaceToBatchNDBlockMarshalling(data, space_to_batch_n_d); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertSplit(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Split(); + if (value != nullptr) { + Split split{}; + split.outputNum = value->output_num(); + std::vector sizeSplits; + sizeSplits.reserve(split.outputNum); + if (value->size_splits() == nullptr || value->size_splits()->size() <= static_cast(split.outputNum)) { + sizeSplits = {}; + } else { + sizeSplits = std::vector(value->size_splits()->begin(), value->size_splits()->end()); + } + split.sizeSplits = sizeSplits; + split.axis = value->axis(); + OHOS::MessageParcel data; + (void)SplitBlockMarshalling(data, split); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertSqrt(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Sqrt(); + if (value != nullptr) { + Sqrt sqrt{}; + OHOS::MessageParcel data; + (void)SqrtBlockMarshalling(data, sqrt); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertSquaredDifference(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_SquaredDifference(); + if (value != nullptr) { + SquaredDifference squared_difference{}; + OHOS::MessageParcel data; + (void)SquaredDifferenceBlockMarshalling(data, squared_difference); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertSqueeze(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Squeeze(); + if (value != nullptr) { + Squeeze squeeze{}; + std::vector axis; + if (value->axis() == nullptr) { + axis = {}; + } else { + axis = std::vector(value->axis()->begin(), value->axis()->end()); + } + squeeze.axis = axis; + OHOS::MessageParcel data; + (void)SqueezeBlockMarshalling(data, squeeze); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertStack(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Stack(); + if (value != nullptr) { + Stack stack{}; + stack.axis = value->axis(); + OHOS::MessageParcel data; + (void)StackBlockMarshalling(data, stack); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertStridedSlice(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_StridedSlice(); + if (value != nullptr) { + StridedSlice strided_slice{}; + strided_slice.beginMask = value->begin_mask(); + strided_slice.endMask = value->end_mask(); + strided_slice.ellipsisMask = value->ellipsis_mask(); + strided_slice.newAxisMask = value->new_axis_mask(); + strided_slice.shrinkAxisMask = value->shrink_axis_mask(); + OHOS::MessageParcel data; + (void)StridedSliceBlockMarshalling(data, strided_slice); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertSubFusion(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_SubFusion(); + if (value != nullptr) { + SubFusion sub_fusion{}; + sub_fusion.activationType = static_cast(value->activation_type()); + OHOS::MessageParcel data; + (void)SubFusionBlockMarshalling(data, sub_fusion); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertTileFusion(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_TileFusion(); + if (value != nullptr) { + TileFusion tile_fusion{}; + std::vector dims; + dims.reserve(kNumEight); + if (value->dims() == nullptr) { + dims = {0, 0, 0, 0, 0, 0, 0, 0}; + } else { + dims = std::vector(value->dims()->begin(), value->dims()->end()); + } + tile_fusion.dims = dims; + OHOS::MessageParcel data; + (void)TileFusionBlockMarshalling(data, tile_fusion); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertTopKFusion(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_TopKFusion(); + if (value != nullptr) { + TopKFusion top_k_fusion{}; + top_k_fusion.sorted = value->sorted(); + top_k_fusion.axis = value->axis(); + OHOS::MessageParcel data; + (void)TopKFusionBlockMarshalling(data, top_k_fusion); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertTranspose(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Transpose(); + if (value != nullptr) { + Transpose transpose{}; + OHOS::MessageParcel data; + (void)TransposeBlockMarshalling(data, transpose); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} +std::vector ConvertUnsqueeze(PrimitivePtr primitive) { + if (primitive != nullptr) { + auto prim = static_cast(primitive); + auto value = prim->value_as_Unsqueeze(); + if (value != nullptr) { + Unsqueeze unsqueeze{}; + std::vector axis; + axis.reserve(kNumEight); + if (value->axis() == nullptr) { + axis = {0, 0, 0, 0}; + } else { + axis = std::vector(value->axis()->begin(), value->axis()->end()); + } + unsqueeze.axis = axis; + OHOS::MessageParcel data; + (void)UnsqueezeBlockMarshalling(data, unsqueeze); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; + } else { + return {}; + } + } else { + return {}; + } +} + +std::vector Convert(NodeType type, PrimitivePtr primitive) { + switch (type) { + case NODE_TYPE_ACTIVATION: + return ConvertActivation(primitive); + break; + case NODE_TYPE_ADD_FUSION: + return ConvertAddFusion(primitive); + break; + case NODE_TYPE_ARGMAX_FUSION: + return ConvertArgMaxFusion(primitive); + break; + case NODE_TYPE_AVG_POOL_FUSION: + return ConvertAvgPoolFusion(primitive); + break; + case NODE_TYPE_BATCH_TO_SPACE_ND: + return ConvertBatchToSpaceND(primitive); + break; + case NODE_TYPE_BIAS_ADD: + return ConvertBiasAdd(primitive); + break; + case NODE_TYPE_CAST: + return ConvertCast(primitive); + break; + case NODE_TYPE_CONCAT: + return ConvertConcat(primitive); + break; + case NODE_TYPE_CONV2D_FUSION: + return ConvertConv2DFusion(primitive); + break; + case NODE_TYPE_CONV2D_TRANSPOSE_FUSION: + return ConvertConv2dTransposeFusion(primitive); + break; + case NODE_TYPE_DIV_FUSION: + return ConvertDivFusion(primitive); + break; + case NODE_TYPE_ELTWISE: + return ConvertEltwise(primitive); + break; + case NODE_TYPE_EXPAND_DIMS: + return ConvertExpandDims(primitive); + break; + case NODE_TYPE_FILL: + return ConvertFill(primitive); + break; + case NODE_TYPE_FULL_CONNECTION: + return ConvertFullConnection(primitive); + break; + case NODE_TYPE_FUSED_BATCH_NORM: + return ConvertFusedBatchNorm(primitive); + break; + case NODE_TYPE_GATHER: + return ConvertGather(primitive); + break; + case NODE_TYPE_LAYER_NORM_FUSION: + return ConvertLayerNormFusion(primitive); + break; + case NODE_TYPE_LESS_EQUAL: + return ConvertLessEqual(primitive); + break; + case NODE_TYPE_MATMUL_FUSION: + return ConvertMatMulFusion(primitive); + break; + case NODE_TYPE_MAXIMUM: + return ConvertMaximum(primitive); + break; + case NODE_TYPE_MAX_POOL_FUSION: + return ConvertMaxPoolFusion(primitive); + break; + case NODE_TYPE_MUL_FUSION: + return ConvertMulFusion(primitive); + break; + case NODE_TYPE_ONE_HOT: + return ConvertOneHot(primitive); + break; + case NODE_TYPE_PAD_FUSION: + return ConvertPadFusion(primitive); + break; + case NODE_TYPE_POW_FUSION: + return ConvertPowFusion(primitive); + break; + case NODE_TYPE_PRELU_FUSION: + return ConvertPReLUFusion(primitive); + break; + case NODE_TYPE_QUANT_DTYPE_CAST: + return ConvertQuantDTypeCast(primitive); + break; + case NODE_TYPE_REDUCE_FUSION: + return ConvertReduceFusion(primitive); + break; + case NODE_TYPE_RESHAPE: + return ConvertReshape(primitive); + break; + case NODE_TYPE_RESIZE: + return ConvertResize(primitive); + break; + case NODE_TYPE_RSQRT: + return ConvertRsqrt(primitive); + break; + case NODE_TYPE_SCALE_FUSION: + return ConvertScaleFusion(primitive); + break; + case NODE_TYPE_SHAPE: + return ConvertShape(primitive); + break; + case NODE_TYPE_SLICE_FUSION: + return ConvertSliceFusion(primitive); + break; + case NODE_TYPE_SOFTMAX: + return ConvertSoftmax(primitive); + break; + case NODE_TYPE_SPACE_TO_BATCH_ND: + return ConvertSpaceToBatchND(primitive); + break; + case NODE_TYPE_SPLIT: + return ConvertSplit(primitive); + break; + case NODE_TYPE_SQRT: + return ConvertSqrt(primitive); + break; + case NODE_TYPE_SQUARED_DIFFERENCE: + return ConvertSquaredDifference(primitive); + break; + case NODE_TYPE_SQUEEZE: + return ConvertSqueeze(primitive); + break; + case NODE_TYPE_STACK: + return ConvertStack(primitive); + break; + case NODE_TYPE_STRIDED_SLICE: + return ConvertStridedSlice(primitive); + break; + case NODE_TYPE_SUB_FUSION: + return ConvertSubFusion(primitive); + break; + case NODE_TYPE_TILE_FUSION: + return ConvertTileFusion(primitive); + break; + case NODE_TYPE_TOPK_FUSION: + return ConvertTopKFusion(primitive); + break; + case NODE_TYPE_TRANSPOSE: + return ConvertTranspose(primitive); + break; + case NODE_TYPE_UNSQUEEZE: + return ConvertUnsqueeze(primitive); + break; + default: + return {}; + } +} + +} // namespace lite +} // namespace mindspore \ No newline at end of file diff --git a/mindspore/lite/mindir/src/mindir_tensor.cc b/mindspore/lite/mindir/src/mindir_tensor.cc new file mode 100644 index 00000000..a62ec257 --- /dev/null +++ b/mindspore/lite/mindir/src/mindir_tensor.cc @@ -0,0 +1,389 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "src/common/log.h" +#include "mindir.h" +#include "utils.h" +#include "mindir_memory_manager.h" +#include "nnrt/v1_0/nnrt_types.h" + +using namespace OHOS::HDI::Nnrt::V1_0; + +namespace mindspore { +namespace lite { +// ********** Tensor ********** +TensorPtr MindIR_Tensor_Create() { + flatbuffers::FlatBufferBuilder fbb; + std::vector dims(1, 0); + std::vector data(1, 0); + std::vector quant_params(1, {0, 0, 8}); + std::string name = " "; + auto ops_offset = schema::CreateTensor(fbb, 0, DataType::DATA_TYPE_INT32, 0, schema::Format::Format_NCHW, 0, 0, 0, 0, + 0, fbb.CreateString(name.c_str(), name.size())); + fbb.Finish(ops_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreateTensorFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} + +TensorPtr MindIR_Tensor_Create(const std::string &name, DataType data_type, const std::vector &dims, + Format format, const std::vector &data, + const std::vector &quant_params) { + flatbuffers::FlatBufferBuilder fbb; + + auto ops_offset = + schema::CreateTensor(fbb, 0, data_type, fbb.CreateVector(dims.data(), dims.size()), + static_cast(format), 0, 0, fbb.CreateVector(data.data(), data.size()), + ConvertQuantParams(fbb, quant_params), 0, fbb.CreateString(name.c_str(), name.size())); + fbb.Finish(ops_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreateTensorFromBuilder(fbb, nullptr); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + return ret_value; +} + +std::string MindIR_Tensor_GetName(ConstTensorPtr tensor) { + if (tensor != nullptr) { + auto value = static_cast(tensor); + if (value != nullptr) { + return value->name()->str(); + } else { + return ""; + } + } else { + return ""; + } +} + +void MindIR_Tensor_SetName(TensorPtr *tensor, const std::string &name) { + if (tensor != nullptr && *tensor != nullptr) { + auto value = static_cast(*tensor); + if (value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + flatbuffers::Offset> dims; + if (value->dims() == nullptr || value->dims()->size() <= 0) { + dims = 0; + } else { + dims = fbb.CreateVector(value->dims()->data(), value->dims()->size()); + } + flatbuffers::Offset> data; + if (value->data() == nullptr || value->data()->size() <= 0) { + data = 0; + } else { + data = fbb.CreateVector(value->data()->data(), value->data()->size()); + } + auto ops_offset = schema::CreateTensor( + fbb, 0, value->dataType(), dims, static_cast(value->format()), 0, 0, data, + ConvertQuantParams(fbb, value->quantParams()), 0, fbb.CreateString(name.c_str(), name.size())); + fbb.Finish(ops_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreateTensorFromBuilder(fbb, value); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *tensor = ret_value; + } + } +} +DataType MindIR_Tensor_GetDataType(ConstTensorPtr tensor) { + if (tensor != nullptr) { + auto value = static_cast(tensor); + if (value != nullptr) { + return static_cast(value->dataType()); + } else { + DataType en = DATA_TYPE_INT32; + return en; + } + } else { + DataType en = DATA_TYPE_INT32; + return en; + } +} + +void MindIR_Tensor_SetDataType(TensorPtr *tensor, DataType data_type) { + if (tensor != nullptr && *tensor != nullptr) { + auto value = static_cast(*tensor); + if (value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + flatbuffers::Offset> dims; + if (value->dims() == nullptr || value->dims()->size() <= 0) { + dims = 0; + } else { + dims = fbb.CreateVector(value->dims()->data(), value->dims()->size()); + } + flatbuffers::Offset> data; + if (value->data() == nullptr || value->data()->size() <= 0) { + data = 0; + } else { + data = fbb.CreateVector(value->data()->data(), value->data()->size()); + } + flatbuffers::Offset name; + if (value->name() == nullptr || value->name()->size() <= 0) { + name = 0; + } else { + name = fbb.CreateString(value->name()->c_str(), value->name()->size()); + } + auto ops_offset = + schema::CreateTensor(fbb, 0, value->dataType(), dims, static_cast(value->format()), 0, 0, data, + ConvertQuantParams(fbb, value->quantParams()), 0, name); + fbb.Finish(ops_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreateTensorFromBuilder(fbb, value); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *tensor = ret_value; + } + } +} + +std::vector MindIR_Tensor_GetDims(ConstTensorPtr tensor) { + if (tensor != nullptr) { + auto value = static_cast(tensor); + if (value != nullptr) { + std::vector result; + auto src = value->dims(); + if (src == nullptr) { + return {}; + } + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](int32_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_Tensor_SetDims(TensorPtr *tensor, const std::vector &dims) { + if (tensor != nullptr && *tensor != nullptr) { + auto value = static_cast(*tensor); + if (value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + flatbuffers::Offset> data; + if (value->data() == nullptr || value->data()->size() <= 0) { + data = 0; + } else { + data = fbb.CreateVector(value->data()->data(), value->data()->size()); + } + flatbuffers::Offset name; + if (value->name() == nullptr || value->name()->size() <= 0) { + name = 0; + } else { + name = fbb.CreateString(value->name()->c_str(), value->name()->size()); + } + auto ops_offset = schema::CreateTensor(fbb, 0, value->dataType(), fbb.CreateVector(dims.data(), dims.size()), + static_cast(value->format()), 0, 0, data, + ConvertQuantParams(fbb, value->quantParams()), 0, name); + fbb.Finish(ops_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreateTensorFromBuilder(fbb, value); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *tensor = ret_value; + } + } +} +Format MindIR_Tensor_GetFormat(ConstTensorPtr tensor) { + if (tensor != nullptr) { + auto value = static_cast(tensor); + if (value != nullptr) { + return static_cast(value->format()); + } else { + Format en = FORMAT_NCHW; + return en; + } + } else { + Format en = FORMAT_NCHW; + return en; + } +} + +void MindIR_Tensor_SetFormat(TensorPtr *tensor, Format format) { + if (tensor != nullptr && *tensor != nullptr) { + auto value = static_cast(*tensor); + if (value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + flatbuffers::Offset> dims; + if (value->dims() == nullptr || value->dims()->size() <= 0) { + dims = 0; + } else { + dims = fbb.CreateVector(value->dims()->data(), value->dims()->size()); + } + flatbuffers::Offset> data; + if (value->data() == nullptr || value->data()->size() <= 0) { + data = 0; + } else { + data = fbb.CreateVector(value->data()->data(), value->data()->size()); + } + flatbuffers::Offset name; + if (value->name() == nullptr || value->name()->size() <= 0) { + name = 0; + } else { + name = fbb.CreateString(value->name()->c_str(), value->name()->size()); + } + auto ops_offset = schema::CreateTensor(fbb, 0, value->dataType(), dims, static_cast(format), 0, 0, + data, ConvertQuantParams(fbb, value->quantParams()), 0, name); + fbb.Finish(ops_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreateTensorFromBuilder(fbb, value); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *tensor = ret_value; + } + } +} + +SharedBuffer MindIR_Tensor_GetData(ConstTensorPtr tensor, const SharedBuffer &buffer_templete, uint8_t *mmap_ptr, + unsigned int offset) { + if (tensor != nullptr) { + auto value = static_cast(tensor); + if (value != nullptr) { + SharedBuffer result{}; + + if (value->data() == nullptr || value->data()->size() == 0) { + result.fd = -1; + result.bufferSize = buffer_templete.bufferSize; + result.offset = offset; + result.dataSize = 0; + return result; + } + if (mmap_ptr == nullptr) { + MS_LOG(ERROR) << "Tensor GetData failed, mmap pointer should not be nullptr"; + return {-1, 0, offset, 0}; + } + result.fd = buffer_templete.fd; + result.bufferSize = buffer_templete.bufferSize; + // MS_LOG(ERROR) << "offset:" << offset << ",src->size():" << value->data()->size(); + memcpy(mmap_ptr + offset, value->data()->data(), value->data()->size()); + result.offset = offset; + result.dataSize = value->data()->size(); + return result; + } else { + MS_LOG(WARNING) << "Tensor GetData failed, mmap pointer should not be nullptr"; + return {-1, 0, offset, 0}; + } + } else { + return {-1, 0, offset, 0}; + } +} + +std::vector MindIR_Tensor_GetData(ConstTensorPtr tensor) { + if (tensor != nullptr) { + auto value = static_cast(tensor); + if (value != nullptr) { + std::vector result; + auto src = value->data(); + if (src == nullptr) { + return {}; + } + result.resize(src->size()); + std::transform(src->begin(), src->end(), result.begin(), [](uint8_t item) { return item; }); + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_Tensor_SetData(TensorPtr *tensor, const std::vector &data) { + if (tensor != nullptr && *tensor != nullptr) { + auto value = static_cast(*tensor); + if (value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + flatbuffers::Offset> dims; + if (value->dims() == nullptr || value->dims()->size() <= 0) { + dims = 0; + } else { + dims = fbb.CreateVector(value->dims()->data(), value->dims()->size()); + } + flatbuffers::Offset name; + if (value->name() == nullptr || value->name()->size() <= 0) { + name = 0; + } else { + name = fbb.CreateString(value->name()->c_str(), value->name()->size()); + } + auto ops_offset = schema::CreateTensor( + fbb, 0, value->dataType(), dims, static_cast(value->format()), 0, 0, + fbb.CreateVector(data.data(), data.size()), ConvertQuantParams(fbb, value->quantParams()), 0, name); + fbb.Finish(ops_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreateTensorFromBuilder(fbb, value); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *tensor = ret_value; + } + } +} +std::vector MindIR_Tensor_GetQuantParams(ConstTensorPtr tensor) { + if (tensor != nullptr) { + auto value = static_cast(tensor); + if (value != nullptr) { + std::vector result; + auto src = value->quantParams(); + if (src == nullptr) { + return {}; + } + size_t size = src->size(); + result.reserve(src->size()); + for (size_t i = 0; i < size; i++) { + auto tmp = src->Get(i); + QuantParam q{tmp->zeroPoint(), tmp->scale(), tmp->numBits()}; + result.emplace_back(q); + } + return result; + } else { + return {}; + } + } else { + return {}; + } +} + +void MindIR_Tensor_SetQuantParams(TensorPtr *tensor, const std::vector &quant_params) { + if (tensor != nullptr && *tensor != nullptr) { + auto value = static_cast(*tensor); + if (value != nullptr) { + flatbuffers::FlatBufferBuilder fbb; + flatbuffers::Offset> dims; + if (value->dims() == nullptr || value->dims()->size() <= 0) { + dims = 0; + } else { + dims = fbb.CreateVector(value->dims()->data(), value->dims()->size()); + } + flatbuffers::Offset> data; + if (value->data() == nullptr || value->data()->size() <= 0) { + data = 0; + } else { + data = fbb.CreateVector(value->data()->data(), value->data()->size()); + } + flatbuffers::Offset name; + if (value->name() == nullptr || value->name()->size() <= 0) { + name = 0; + } else { + name = fbb.CreateString(value->name()->c_str(), value->name()->size()); + } + auto ops_offset = + schema::CreateTensor(fbb, 0, value->dataType(), dims, static_cast(value->format()), 0, 0, data, + ConvertQuantParams(fbb, quant_params), 0, name); + fbb.Finish(ops_offset); + auto new_addr = MindIRMemoryManager::GetInstance()->CreateTensorFromBuilder(fbb, value); + auto ret_value = flatbuffers::GetMutableRoot(new_addr); + *tensor = ret_value; + } + } +} + +void MindIR_Tensor_Destroy(TensorPtr *tensor) { + if (tensor != nullptr && *tensor != nullptr) { + auto schema = static_cast(*tensor); + MindIRMemoryManager::GetInstance()->DeleteTensor(schema); + *tensor = nullptr; + } + *tensor = nullptr; +} +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/mindir/src/utils.cc b/mindspore/lite/mindir/src/utils.cc new file mode 100644 index 00000000..ca5f7f4b --- /dev/null +++ b/mindspore/lite/mindir/src/utils.cc @@ -0,0 +1,96 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "utils.h" +#include "src/common/log.h" +#include "mindir_memory_manager.h" +namespace mindspore { +namespace lite { + +// ********** PrimitiveBase ********** +NodeType MindIR_Primitive_GetType(PrimitivePtr primitive) { + auto prim = flatbuffers::GetMutableRoot(primitive); + auto type = prim->value_type(); + return static_cast(type); +} + +void MindIR_Primitive_Destroy(PrimitivePtr *primitive) { + if (primitive != nullptr && *primitive != nullptr) { + auto schema = static_cast(*primitive); + MS_LOG(ERROR) << "schema=" << schema->value_type(); + MindIRMemoryManager::GetInstance()->DeletePrimitive(schema); + *primitive = nullptr; + } +} +PrimitivePtr MindIR_CreatePrimitiveFromBuilder(flatbuffers::FlatBufferBuilder &fbb) { + auto buff = reinterpret_cast(malloc(fbb.GetSize())); + if (buff == nullptr) { + MS_LOG(ERROR) << "malloc memory for primitive failed!"; + fbb.Clear(); + return nullptr; + } + memcpy(buff, fbb.GetBufferPointer(), fbb.GetSize()); + fbb.Clear(); + return buff; +} +flatbuffers::Offset CreateVec2D(flatbuffers::FlatBufferBuilder &fbb, + const std::vector> &data) { + std::vector> vet2d; + vet2d.reserve(data.size()); + for (const auto &data_one : data) { + vet2d.emplace_back(schema::CreateVec(fbb, fbb.CreateVector(data_one))); + } + flatbuffers::Offset v2d = schema::CreateVec2D(fbb, fbb.CreateVector(vet2d)); + return v2d; +} +flatbuffers::Offset CreateVec2D(flatbuffers::FlatBufferBuilder &fbb, + const mindspore::schema::Vec2D *data) { + auto data_inner = data->data(); + std::vector> vet2d; + vet2d.reserve(data_inner->size()); + for (const auto data_one : *data_inner) { + vet2d.emplace_back(schema::CreateVec(fbb, fbb.CreateVector(data_one->data()->data(), data_one->data()->size()))); + } + flatbuffers::Offset v2d = schema::CreateVec2D(fbb, fbb.CreateVector(vet2d)); + return v2d; +} + +flatbuffers::Offset>> ConvertQuantParams( + flatbuffers::FlatBufferBuilder &fbb, const std::vector &quant_params) { + std::vector> tmp_vec; + tmp_vec.reserve(quant_params.size()); + for (auto q_param : quant_params) { + tmp_vec.emplace_back(schema::CreateQuantParam(fbb, q_param.scale, q_param.zeroPoint, 0, 0, true, q_param.numBits)); + } + flatbuffers::Offset>> ret_quant_param = + fbb.CreateVector(tmp_vec.data(), tmp_vec.size()); + return ret_quant_param; +} + +flatbuffers::Offset>> ConvertQuantParams( + flatbuffers::FlatBufferBuilder &fbb, + const flatbuffers::Vector> *quant_params) { + std::vector> tmp_vec; + tmp_vec.reserve(quant_params->size()); + for (auto q_param : *quant_params) { + tmp_vec.emplace_back( + schema::CreateQuantParam(fbb, q_param->scale(), q_param->zeroPoint(), 0, 0, true, q_param->numBits())); + } + flatbuffers::Offset>> ret_quant_param = + fbb.CreateVector(tmp_vec.data(), tmp_vec.size()); + return ret_quant_param; +} +} // namespace lite +} // namespace mindspore \ No newline at end of file diff --git a/mindspore/lite/mindir/tests/BUILD.gn b/mindspore/lite/mindir/tests/BUILD.gn new file mode 100644 index 00000000..de1902fe --- /dev/null +++ b/mindspore/lite/mindir/tests/BUILD.gn @@ -0,0 +1,35 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import("//build/ohos.gni") + +ohos_executable("mindir_test") { + sources = [ "mindir_test.cc" ] + + include_dirs = [ + "./", + "../include", + "../include/inner", + "../../", + "//third_party/flatbuffers/include", + ] + remove_configs = [ "//build/config/compiler:no_rtti" ] + + deps = [ "../:mindir" ] + + output_name = "mindir_test" + install_enable = true + part_name = "mindspore" +} diff --git a/mindspore/lite/mindir/tests/mindir_test.cc b/mindspore/lite/mindir/tests/mindir_test.cc new file mode 100644 index 00000000..09ef7090 --- /dev/null +++ b/mindspore/lite/mindir/tests/mindir_test.cc @@ -0,0 +1,51 @@ +#include "mindir.h" +#include "mindir_memory_manager.h" +#include +#include +using namespace mindspore::lite; +int main() { + int loop = 0; + int all = 100; + while (loop < all) { + auto tensor = MindIR_Tensor_Create(); + auto str = MindIR_Tensor_GetName(tensor); + DataType dataType = DATA_TYPE_UINT16; + MindIR_Tensor_SetDataType(&tensor, dataType); + std::cout << "set data type" << std::endl; + DataType d = MindIR_Tensor_GetDataType(tensor); + std::cout << "loop = " << loop << ",data type = " << (int)d << std::endl; + PrimitivePtr ret = nullptr; + ret = MindIR_SquaredDifference_CreatePrimitive(); + std::cout << "MindIR_SquaredDifference_CreatePrimitive 1,PrimitivePtr = " << ret << std::endl; + ret = MindIR_SquaredDifference_CreatePrimitive(); + std::cout << "MindIR_SquaredDifference_CreatePrimitive 1,PrimitivePtr = " << ret << std::endl; + ret = MindIR_SubFusion_CreatePrimitive(ACTIVATION_TYPE_RELU6); + std::cout << "MindIR_SubFusion_CreatePrimitive 2,PrimitivePtr = " << ret << std::endl; + ret = MindIR_Activation_CreatePrimitive(ACTIVATION_TYPE_RELU6, .5, 0, 1, true); + std::cout << "MindIR_Activation_CreatePrimitive 3,PrimitivePtr = " << ret << std::endl; + MindIR_Primitive_Destroy(&ret); + std::cout << "MindIR_Primitive_Destroy,PrimitivePtr = " << ret << std::endl; + TensorPtr t_ret = nullptr; + t_ret = MindIR_Tensor_Create(); + std::cout << "MindIR_Tensor_Create 3,TensorPtr = " << t_ret << std::endl; + MindIR_Tensor_Destroy(&t_ret); + std::cout << "MindIR_Tensor_Destroy,Tensor = " << t_ret << std::endl; + ret = MindIR_SpaceToBatchND_CreatePrimitive({2, 2}, {{0}, {0}, {0}, {0}}); + auto blockshape = MindIR_SpaceToBatchND_GetBlockShape(ret); + std::string bs_(""); + for (int i = 0; i < 2; i++) { + bs_.append(std::to_string(blockshape[i]).c_str()); + } + std::cout << "MindIR_SpaceToBatchND_GetBlockShape,blockshape = " << bs_ << std::endl; + auto paddings = MindIR_SpaceToBatchND_GetPaddings(ret); + std::string pad_(""); + for (auto item : paddings) { + pad_.append(std::to_string(item[0]).c_str()); + } + std::cout << "MindIR_SpaceToBatchND_GetPaddings,Paddings = " << pad_ << std::endl; + loop++; + } + MindIRMemoryManager::GetInstance()->ClearAllMemory(); + std::cout << "MindIRMemoryManager::GetInstance()->ClearAllMemory()" << std::endl; + loop++; +} \ No newline at end of file diff --git a/mindspore/lite/src/runtime/delegate/nnrt/CMakeLists.txt b/mindspore/lite/src/runtime/delegate/nnrt/CMakeLists.txt new file mode 100644 index 00000000..70aa63f3 --- /dev/null +++ b/mindspore/lite/src/runtime/delegate/nnrt/CMakeLists.txt @@ -0,0 +1,30 @@ +include_directories(${DDK_PATH}) +include_directories($(CCSRC_DIR)/plugin/device/cpu/kernel) + +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include) +#include_directories(/home/tony/wty/workspace/ohos/third_party/mindspore/mindspore/lite/mindir/include/inner) +#include_directories(/home/tony/wty/workspace/ohos/third_party/mindspore/mindspore/lite/mindir/include) +file(GLOB_RECURSE NNRT_SRC + ${CMAKE_CURRENT_SOURCE_DIR}/*.cc +) + +#add_library(hiai SHARED IMPORTED) +#set_target_properties(hiai PROPERTIES IMPORTED_LOCATION +# ${DDK_LIB_PATH}/libhiai.so) +#add_library(hiai_ir SHARED IMPORTED) +#set_target_properties(hiai_ir PROPERTIES IMPORTED_LOCATION +# ${DDK_LIB_PATH}/libhiai_ir.so) +#add_library(hiai_ir_build SHARED IMPORTED) +#set_target_properties(hiai_ir_build PROPERTIES IMPORTED_LOCATION +# ${DDK_LIB_PATH}/libhiai_ir_build.so) +#add_library(npu_kernel_mid OBJECT ${NPU_RUNTIME_SRC}) +#add_dependencies(npu_kernel_mid fbs_src) +#target_link_libraries( +# npu_kernel_mid +# hiai +# hiai_ir +# hiai_ir_build +#) + +file(GLOB convert_source checker/*.cc) +add_library(nnr_mid OBJECT ${NNRT_SRC} ${convert_source} ) \ No newline at end of file diff --git a/mindspore/lite/src/runtime/delegate/nnrt/checker/primitive_check.cc b/mindspore/lite/src/runtime/delegate/nnrt/checker/primitive_check.cc new file mode 100644 index 00000000..a647796c --- /dev/null +++ b/mindspore/lite/src/runtime/delegate/nnrt/checker/primitive_check.cc @@ -0,0 +1,187 @@ +#include +#include +#include "primitive_check.h" +#include "dtype/type_id.h" +#include "src/runtime/weight_decoder.h" +#include "src/common/log.h" +#include "src/common/utils.h" +namespace mindspore { +namespace lite { + +Status CheckPrimitiveSupported(const schema::Primitive *primitive) { + if (primitive != nullptr) { + auto prim = primitive; + auto type = prim->value_type(); + switch (type) { + case schema::PrimitiveType_Activation: + return mindspore::kSuccess; + case schema::PrimitiveType_AddFusion: + return mindspore::kSuccess; + case schema::PrimitiveType_ArgMaxFusion: + return mindspore::kSuccess; + case schema::PrimitiveType_AvgPoolFusion: + return mindspore::kSuccess; + case schema::PrimitiveType_BatchToSpaceND: + return mindspore::kSuccess; + case schema::PrimitiveType_BiasAdd: + return mindspore::kSuccess; + case schema::PrimitiveType_Cast: + return mindspore::kSuccess; + case schema::PrimitiveType_Concat: + return mindspore::kSuccess; + case schema::PrimitiveType_Conv2DFusion: + return mindspore::kSuccess; + case schema::PrimitiveType_Conv2dTransposeFusion: + return mindspore::kSuccess; + case schema::PrimitiveType_DivFusion: + return mindspore::kSuccess; + case schema::PrimitiveType_Eltwise: + return mindspore::kSuccess; + case schema::PrimitiveType_ExpandDims: + return mindspore::kSuccess; + case schema::PrimitiveType_Fill: + return mindspore::kSuccess; + case schema::PrimitiveType_FullConnection: + return mindspore::kSuccess; + case schema::PrimitiveType_FusedBatchNorm: + return mindspore::kSuccess; + case schema::PrimitiveType_Gather: + return mindspore::kSuccess; + case schema::PrimitiveType_LayerNormFusion: + return mindspore::kSuccess; + case schema::PrimitiveType_LessEqual: + return mindspore::kSuccess; + case schema::PrimitiveType_MatMulFusion: + return mindspore::kSuccess; + case schema::PrimitiveType_Maximum: + return mindspore::kSuccess; + case schema::PrimitiveType_MaxPoolFusion: + return mindspore::kSuccess; + case schema::PrimitiveType_MulFusion: + return mindspore::kSuccess; + case schema::PrimitiveType_OneHot: + return mindspore::kSuccess; + case schema::PrimitiveType_PadFusion: + return mindspore::kSuccess; + case schema::PrimitiveType_PowFusion: + return mindspore::kSuccess; + case schema::PrimitiveType_PReLUFusion: + return mindspore::kSuccess; + case schema::PrimitiveType_QuantDTypeCast: + return mindspore::kSuccess; + case schema::PrimitiveType_ReduceFusion: + return mindspore::kSuccess; + case schema::PrimitiveType_Reshape: + return mindspore::kSuccess; + case schema::PrimitiveType_Resize: + return mindspore::kSuccess; + case schema::PrimitiveType_Rsqrt: + return mindspore::kSuccess; + case schema::PrimitiveType_ScaleFusion: + return mindspore::kSuccess; + case schema::PrimitiveType_Shape: + return mindspore::kSuccess; + case schema::PrimitiveType_SliceFusion: + return mindspore::kSuccess; + case schema::PrimitiveType_Softmax: + return mindspore::kSuccess; + case schema::PrimitiveType_SpaceToBatchND: + return mindspore::kSuccess; + case schema::PrimitiveType_Split: + return mindspore::kSuccess; + case schema::PrimitiveType_Sqrt: + return mindspore::kSuccess; + case schema::PrimitiveType_SquaredDifference: + return mindspore::kSuccess; + case schema::PrimitiveType_Squeeze: + return mindspore::kSuccess; + case schema::PrimitiveType_Stack: + return mindspore::kSuccess; + case schema::PrimitiveType_StridedSlice: + return mindspore::kSuccess; + case schema::PrimitiveType_SubFusion: + return mindspore::kSuccess; + case schema::PrimitiveType_TileFusion: + return mindspore::kSuccess; + case schema::PrimitiveType_TopKFusion: + return mindspore::kSuccess; + case schema::PrimitiveType_Transpose: + return mindspore::kSuccess; + case schema::PrimitiveType_Unsqueeze: + return mindspore::kSuccess; + default: { + MS_LOG(WARNING) << "No primitive type :" << (int)(type); + return mindspore::kLiteSuccessExit; + } + } + return mindspore::kSuccess; + } else { + MS_LOG(ERROR) << "primitive is nullptr."; + return mindspore::kLiteError; + } +} +namespace { +bool NeedBitUppackCheck(const schema::Tensor &src_tensor) { + if (src_tensor.enableHuffmanCode()) { + return true; + } + bool need_bit_unpack = src_tensor.quantParams() != nullptr && src_tensor.quantParams()->size() > 0 && + src_tensor.quantParams()->Get(0) != nullptr; + if (need_bit_unpack) { + auto num_bits = src_tensor.quantParams()->Get(0)->numBits(); + need_bit_unpack = ((num_bits >= kBitNum1 && num_bits < kBitNum8) || (num_bits > kBitNum8 && num_bits < kBitNum16)); + } + + return need_bit_unpack; +} +int DecompressTensor(const schema::Tensor &src_tensor) { + if (src_tensor.weightQunatCompressType() == schema::WeightQunatCompressType_FSE || + src_tensor.weightQunatCompressType() == schema::WeightQunatCompressType_INDEXING || + src_tensor.weightQunatCompressType() == schema::WeightQunatCompressType_SPARSE) { + return RET_NOT_SUPPORT; + } + if (!NeedBitUppackCheck(src_tensor)) { + return RET_NO_CHANGE; + } + MS_LOG(ERROR) << "DecompressTensor Error."; + return RET_ERROR; +} +} // namespace + +Status CheckTensorSupported(const schema::Tensor *primitive) { + if (primitive == nullptr) { + MS_LOG(ERROR) << "primitive is nullptr, which type is Tensor."; + return mindspore::kLiteSuccessExit; + } + + int32_t data_type = primitive->dataType(); + if (data_type <= kTypeUnknown || data_type >= kMonadTypeEnd) { + MS_LOG(ERROR) << "invalid data type. " << data_type; + return mindspore::kLiteSuccessExit; + } + + if (primitive->dims() == nullptr) { + MS_LOG(DEBUG) << "Dims of tensor is nullptr"; + } + + if (data_type == kObjectTypeTensorType) { + MS_LOG(ERROR) << "Not support TensorList."; + return mindspore::kLiteNotSupport; + } + + if (primitive->data() == nullptr || primitive->data()->size() <= 0) { + MS_LOG(DEBUG) << "No valid data converted."; + return mindspore::kSuccess; + } else { + auto ret = DecompressTensor(*primitive); + if (ret == RET_NO_CHANGE) { + } else { + MS_LOG(ERROR) << "Not support Decompress Tensor."; + return mindspore::kLiteNotSupport; + } + } + return mindspore::kSuccess; + ; +} +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/runtime/delegate/nnrt/checker/primitive_check.h b/mindspore/lite/src/runtime/delegate/nnrt/checker/primitive_check.h new file mode 100644 index 00000000..dbdd812c --- /dev/null +++ b/mindspore/lite/src/runtime/delegate/nnrt/checker/primitive_check.h @@ -0,0 +1,12 @@ +#ifndef OHOS_HDI_NNRT_V1_0_CPP_H +#define OHOS_HDI_NNRT_V1_0_CPP_H +#include "schema/model_generated.h" +#include "include/api/status.h" +namespace mindspore { +namespace lite { +Status CheckPrimitiveSupported(const schema::Primitive *primitive); +Status CheckTensorSupported(const schema::Tensor *primitive); +} // namespace lite +} // namespace mindspore + +#endif // OHOS_HDI_NNRT_V1_0_CPP_H \ No newline at end of file diff --git a/mindspore/lite/src/runtime/delegate/nnrt/nnrt_delegate.cc b/mindspore/lite/src/runtime/delegate/nnrt/nnrt_delegate.cc new file mode 100644 index 00000000..34897331 --- /dev/null +++ b/mindspore/lite/src/runtime/delegate/nnrt/nnrt_delegate.cc @@ -0,0 +1,360 @@ +/** + * Copyright 2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "nnrt_delegate.h" +#include "checker/primitive_check.h" +#include "src/common/log_adapter.h" +#include "interfaces/kits/c/neural_network_runtime.h" +#include "interfaces/innerkits/c/neural_network_runtime_inner.h" +#include "nnrt_model_kernel.h" + +mindspore::Status mindspore::NNRTDelegate::Build(DelegateModel *model) { + if (this->nnrt_lite_graph == nullptr) { + MS_LOG(ERROR) << "nnrt_lite_graph is nullptr."; + return mindspore::kLiteError; + } + if (this->nnrt_lite_graph->sub_graphs_.empty()) { + // must have at lease one subgraph + MS_LOG(ERROR) << "must have at lease one subgraph"; + return mindspore::kLiteError; + } + OH_NN_ReturnCode ret_code; + OH_NNModel *oh_nnmodel = OH_NNModel_Construct(); + if (oh_nnmodel == nullptr) { + MS_LOG(ERROR) << "Construct NNModel failed, oh_nnmodel is nullptr."; + return mindspore::kLiteError; + } + + ret_code = OH_NNModel_BuildFromLiteGraph(oh_nnmodel, this->nnrt_lite_graph); + if (ret_code != OH_NN_SUCCESS) { + MS_LOG(ERROR) << "Build NNModel failed, OH_NN_ReturnCode = " << ret_code; + OH_NNModel_Destroy(&oh_nnmodel); + return mindspore::kLiteError; + } + MS_LOG(INFO) << "NNRTDelegate creates NNModel success."; + + OH_NNCompilation *oh_nn_compilation = nullptr; + oh_nn_compilation = OH_NNCompilation_Construct(oh_nnmodel); + + if (oh_nn_compilation == nullptr) { + MS_LOG(ERROR) << "Construct NNCompilation failed"; + OH_NNModel_Destroy(&oh_nnmodel); + return mindspore::kLiteError; + } + MS_LOG(INFO) << "NNRTDelegate creates NNCompilation success."; + + const size_t *allDevicesID = nullptr; + uint32_t device_count = 0; + ret_code = OH_NNDevice_GetAllDevicesID(&allDevicesID, &device_count); + if (ret_code != OH_NN_SUCCESS) { + MS_LOG(ERROR) << "NNModel GetAllDevicesID failed, OH_NN_ReturnCode = " << ret_code; + OH_NNCompilation_Destroy(&oh_nn_compilation); + OH_NNModel_Destroy(&oh_nnmodel); + return mindspore::kLiteError; + } + + if (device_count <= 0) { + MS_LOG(WARNING) << "No NNRt Device found, fall back to CPU. "; + // OH_NNCompilation_Destroy(&oh_nn_compilation); + // OH_NNModel_Destroy(&oh_nnmodel); + return mindspore::kSuccess; + } + MS_LOG(INFO) << "NNRTDelegate GetAllDevicesID success."; + + // check if model ops are supported + const bool *issupported = nullptr; + uint32_t op_count = 0; + ret_code = OH_NNModel_GetAvailableOperations(oh_nnmodel, allDevicesID[0], &issupported, &op_count); + if (ret_code != OH_NN_SUCCESS) { + MS_LOG(ERROR) << "NNModel GetAvailableOperations failed, OH_NN_ReturnCode = " << ret_code + << ", maybe due to dataParcel data length limitaion. Fall back to CPU."; + OH_NNCompilation_Destroy(&oh_nn_compilation); + OH_NNModel_Destroy(&oh_nnmodel); + return mindspore::kSuccess; + } + uint32_t supported_op_count = 0; + for (uint32_t i = 0; i < op_count; i++) { + if (issupported[i]) { + supported_op_count++; + } + } + if (op_count != supported_op_count) { + MS_LOG(WARNING) << "this model has " << op_count << "ops, but NNRT only support " << supported_op_count + << " ops, fall back to CPU."; + // must support all op, else fall back to CPU + OH_NNCompilation_Destroy(&oh_nn_compilation); + OH_NNModel_Destroy(&oh_nnmodel); + return mindspore::kSuccess; + } + MS_LOG(INFO) << "NNRtDelegate supports all op in this model."; + + ret_code = OH_NNCompilation_SetDevice(oh_nn_compilation, allDevicesID[0]); + + if (ret_code != OH_NN_SUCCESS) { + MS_LOG(ERROR) << "NNCompilation SetDevice failed, OH_NN_ReturnCode = " << ret_code; + OH_NNCompilation_Destroy(&oh_nn_compilation); + OH_NNModel_Destroy(&oh_nnmodel); + return mindspore::kLiteError; + } + + ret_code = OH_NNCompilation_Build(oh_nn_compilation); + + if (ret_code != OH_NN_SUCCESS) { + MS_LOG(ERROR) << "Build NNCompilation failed, OH_NN_ReturnCode = " << ret_code; + OH_NNCompilation_Destroy(&oh_nn_compilation); + OH_NNModel_Destroy(&oh_nnmodel); + return mindspore::kLiteError; + } + + MS_LOG(DEBUG) << "NNRTDelegate SetDevice success."; + + OH_NNExecutor *oh_nn_executor = nullptr; + oh_nn_executor = OH_NNExecutor_Construct(oh_nn_compilation); + if (oh_nn_executor == nullptr) { + MS_LOG(ERROR) << "Construct NNCompilation SetDevice failed, OH_NN_ReturnCode = " << ret_code; + OH_NNCompilation_Destroy(&oh_nn_compilation); + OH_NNModel_Destroy(&oh_nnmodel); + return mindspore::kLiteError; + } + MS_LOG(DEBUG) << "NNRTDelegate creates NNExecutor success."; + mindspore::Status prepare_data_ret; + auto nnr_model_kernel = new (std::nothrow) NNRTModelKernel(oh_nn_executor, model->inputs(), model->outputs()); + if (nnr_model_kernel == nullptr) { + MS_LOG(ERROR) << "new NNRTModelKernel failed"; + return mindspore::kLiteError; + } + OH_NNCompilation_Destroy(&oh_nn_compilation); + OH_NNModel_Destroy(&oh_nnmodel); + KernelIter from = model->BeginKernelIterator(); + KernelIter end = model->EndKernelIterator(); + model->Replace(from, end, nnr_model_kernel); + + MS_LOG(INFO) << "NNRTDelegate build success."; + return mindspore::kSuccess; +} + +mindspore::Status mindspore::NNRTDelegate::Init() { + MS_LOG(DEBUG) << "NNRTDelegate init success."; + return mindspore::kSuccess; +} +mindspore::Status mindspore::NNRTDelegate::PrepareInputs(DelegateModel *model, + OH_NNExecutor *oh_nn_executor) { + auto input_tensors = model->inputs(); + for (size_t i = 0; i < input_tensors.size(); i++) { + auto tensor = input_tensors[i]; + auto tensor_shape = tensor.Shape(); + auto tmp_quant_param = tensor.QuantParams(); + OH_NN_QuantParam *quant_param = nullptr; + std::vector bit_num; + std::vector scale; + std::vector zero_point; + if (!tmp_quant_param.empty()) { + quant_param = new (std::nothrow) OH_NN_QuantParam; + if (quant_param == nullptr) { + MS_LOG(ERROR) << "new OH_NN_QuantParam failed."; + return mindspore::kLiteError; + } + for (auto qparam : tmp_quant_param) { + bit_num.emplace_back(qparam.bit_num); + scale.emplace_back(qparam.scale); + zero_point.emplace_back(qparam.zero_point); + } + quant_param->quantCount = tmp_quant_param.size(); + quant_param->numBits = bit_num.data(); + quant_param->scale = scale.data(); + quant_param->zeroPoint = zero_point.data(); + } + auto oprend = new (std::nothrow) OH_NN_Tensor; + if (oprend == nullptr) { + MS_LOG(ERROR) << "new OH_NN_Tensor Failed"; + return mindspore::kLiteError; + } + oprend->dataType = ConvertDataType(tensor.DataType()); + oprend->dimensionCount = tensor_shape.size(); + + std::vector dimensions_list; + for (auto shape : tensor_shape) { + if (shape < INT32_MAX) { + dimensions_list.emplace_back(static_cast(shape)); + } else { + MS_LOG(ERROR) << "NNExecutor SetInput failed,tensor dimension is is too large, max dim = " << INT32_MAX + << ", but get dimension = " << shape; + return mindspore::kLiteError; + } + } + oprend->dimensions = dimensions_list.data(); + oprend->quantParam = quant_param; + oprend->type = OH_NN_TENSOR; + OH_NN_ReturnCode ret_code = + OH_NNExecutor_SetInput(oh_nn_executor, i, oprend, tensor.MutableData(), tensor.DataSize()); + delete (oprend); + + if (!tmp_quant_param.empty()) { + delete (quant_param); + quant_param = nullptr; + } + + if (ret_code != OH_NN_SUCCESS) { + MS_LOG(ERROR) << "NNExecutor SetInput failed, current input tensor is" << tensor.Name() + << "OH_NN_ReturnCode = " << ret_code; + return mindspore::kLiteError; + } + } + + return mindspore::kSuccess; +} +OH_NN_DataType mindspore::NNRTDelegate::ConvertDataType(mindspore::DataType data_type) { + OH_NN_DataType oh_data_type; + switch (data_type) { + case mindspore::DataType::kTypeUnknown: + case mindspore::DataType::kObjectTypeString: + case mindspore::DataType::kObjectTypeList: + case mindspore::DataType::kObjectTypeTuple: + case mindspore::DataType::kObjectTypeTensorType: + case mindspore::DataType::kNumberTypeBegin: + case mindspore::DataType::kNumberTypeEnd: + case mindspore::DataType::kInvalidType: + oh_data_type = OH_NN_UNKNOWN; + break; + case mindspore::DataType::kNumberTypeBool: + oh_data_type = OH_NN_BOOL; + break; + case mindspore::DataType::kNumberTypeInt8: + oh_data_type = OH_NN_INT8; + break; + case mindspore::DataType::kNumberTypeInt16: + oh_data_type = OH_NN_INT16; + break; + case mindspore::DataType::kNumberTypeInt32: + oh_data_type = OH_NN_INT32; + break; + case mindspore::DataType::kNumberTypeInt64: + oh_data_type = OH_NN_INT64; + break; + case mindspore::DataType::kNumberTypeUInt8: + oh_data_type = OH_NN_UINT8; + break; + case mindspore::DataType::kNumberTypeUInt16: + oh_data_type = OH_NN_UINT16; + break; + case mindspore::DataType::kNumberTypeUInt32: + oh_data_type = OH_NN_UINT32; + break; + case mindspore::DataType::kNumberTypeUInt64: + oh_data_type = OH_NN_UINT64; + break; + case mindspore::DataType::kNumberTypeFloat16: + oh_data_type = OH_NN_FLOAT16; + break; + case mindspore::DataType::kNumberTypeFloat32: + oh_data_type = OH_NN_FLOAT32; + break; + case mindspore::DataType::kNumberTypeFloat64: + oh_data_type = OH_NN_FLOAT64; + break; + default: { + oh_data_type = OH_NN_UNKNOWN; + } + } + return oh_data_type; +} + +mindspore::Status mindspore::NNRTDelegate::PrepareOutputs(DelegateModel *model, + OH_NNExecutor *oh_nn_executor) { + auto output_tensors = model->outputs(); + for (size_t i = 0; i < output_tensors.size(); i++) { + auto tensor = output_tensors[i]; + OH_NN_ReturnCode ret_code = OH_NNExecutor_SetOutput(oh_nn_executor, i, tensor.MutableData(), tensor.DataSize()); + if (ret_code != OH_NN_SUCCESS) { + MS_LOG(ERROR) << "NNExecutor SetOutput failed, current out tensor is" << tensor.Name() + << ", OH_NN_ReturnCode = " << ret_code; + return mindspore::kLiteError; + } + } + return mindspore::kSuccess; +} + +void mindspore::NNRTDelegate::ShallowCopyLiteGraph(const mindspore::lite::LiteGraph &lite_graph) { + Status ret; + for (auto node : lite_graph.all_nodes_) { + ret = lite::CheckPrimitiveSupported(static_cast(node->primitive_)); + if (ret == mindspore::kLiteError) { + MS_LOG(ERROR) << " primitive supported check failed."; + return; + } + } + std::vector node_list; + node_list.reserve(lite_graph.all_nodes_.size()); + // copy node + for (auto node : lite_graph.all_nodes_) { + auto new_node = new (std::nothrow) LiteGraph::Node; + if (new_node == nullptr) { + MS_LOG(ERROR) << " new LiteGraph::Node failed."; + return; + } + new_node->name_ = node->name_; + new_node->op_type_ = node->op_type_; + new_node->node_type_ = node->node_type_; + new_node->primitive_ = node->primitive_; + new_node->base_operator_ = node->base_operator_; + new_node->input_indices_ = node->input_indices_; + new_node->output_indices_ = node->output_indices_; + new_node->quant_type_ = node->quant_type_; + new_node->device_type_ = node->device_type_; + node_list.emplace_back(new_node); + } + // copy subgraph + std::vector subgraph_list; + for (auto subgraph : lite_graph.sub_graphs_) { + auto new_subgraph = new (std::nothrow) LiteGraph::SubGraph; + if (new_subgraph == nullptr) { + MS_LOG(ERROR) << "new LiteGraph::Subgraph failed."; + return; + } + new_subgraph->name_ = subgraph->name_; + new_subgraph->input_indices_ = subgraph->input_indices_; + new_subgraph->output_indices_ = subgraph->output_indices_; + new_subgraph->node_indices_ = subgraph->node_indices_; + subgraph_list.emplace_back(new_subgraph); + } + for (auto tensor : lite_graph.all_tensors_) { + ret = lite::CheckTensorSupported(static_cast(tensor)); + if (ret == mindspore::kLiteError) { + MS_LOG(ERROR) << "tensor supported check failed."; + return; + } + } + + nnrt_lite_graph = new (std::nothrow) lite::LiteGraph(); + if (nnrt_lite_graph == nullptr) { + MS_LOG(ERROR) << "new LiteGraph failed."; + return; + } + + nnrt_lite_graph->name_ = lite_graph.name_; + nnrt_lite_graph->version_ = lite_graph.version_; + nnrt_lite_graph->input_indices_ = lite_graph.input_indices_; + nnrt_lite_graph->output_indices_ = lite_graph.output_indices_; + nnrt_lite_graph->all_tensors_ = lite_graph.all_tensors_; + nnrt_lite_graph->all_nodes_ = node_list; + nnrt_lite_graph->sub_graphs_ = subgraph_list; + MS_LOG(INFO) << "ShallowCopyLiteGraph success."; +} + +mindspore::NNRTDelegate::~NNRTDelegate() { + if (this->nnrt_lite_graph != nullptr) { + MS_LOG(ERROR) << "Delete NNRTDelegate."; + } +}; diff --git a/mindspore/lite/src/runtime/delegate/nnrt/nnrt_delegate.h b/mindspore/lite/src/runtime/delegate/nnrt/nnrt_delegate.h new file mode 100644 index 00000000..1be08119 --- /dev/null +++ b/mindspore/lite/src/runtime/delegate/nnrt/nnrt_delegate.h @@ -0,0 +1,52 @@ +/** + * Copyright 2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNR_DELEGATE_H +#define MINDSPORE_NNR_DELEGATE_H +#include +#include +#include "include/api/delegate.h" +#include "include/context.h" +#include "include/model.h" +#include "interfaces/kits/c/neural_network_runtime_type.h" +namespace mindspore { + +using namespace lite; + +class NNRTDelegate : public Delegate { + public: + NNRTDelegate() : Delegate(){}; + + ~NNRTDelegate() override; + + Status Init() override; + + Status Build(DelegateModel *model) override; + + void ShallowCopyLiteGraph(const lite::LiteGraph &liteGraph); + + protected: + LiteGraph *nnrt_lite_graph = nullptr; + + private: + // static LiteGraph* CreateLiteGraph(const LiteGraph &liteGraph); + Status PrepareInputs(DelegateModel *model, OH_NNExecutor *oh_nn_executor); + Status PrepareOutputs(DelegateModel *model, OH_NNExecutor *oh_nn_executor); + OH_NN_DataType ConvertDataType(mindspore::DataType data_type); +}; + +} // namespace mindspore + +#endif // MINDSPORE_NNR_DELEGATE_H diff --git a/mindspore/lite/src/runtime/delegate/nnrt/nnrt_model_kernel.cc b/mindspore/lite/src/runtime/delegate/nnrt/nnrt_model_kernel.cc new file mode 100644 index 00000000..5acf2e9a --- /dev/null +++ b/mindspore/lite/src/runtime/delegate/nnrt/nnrt_model_kernel.cc @@ -0,0 +1,175 @@ +/** + * Copyright 2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include "nnrt_model_kernel.h" +int mindspore::NNRTModelKernel::Prepare() { return 0; } +int mindspore::NNRTModelKernel::Execute() { + lite::STATUS ret_val = PrepareInputs(); + if (ret_val != lite::RET_OK) { + MS_LOG(ERROR) << "NNRTModelKernel PrepareInputs failed, STATUS is " << ret_val; + return ret_val; + } + ret_val = TransferOutputs(); + if (ret_val != lite::RET_OK) { + MS_LOG(ERROR) << "NNRTModelKernel TransferOutputs failed, STATUS is " << ret_val; + return ret_val; + } + MS_LOG(INFO) << "Running NNRtModel Kernel..."; + OH_NN_ReturnCode ret_code; + ret_code = OH_NNExecutor_Run(this->oh_nn_executor); + + if (ret_code != OH_NN_SUCCESS) { + MS_LOG(ERROR) << "NNExecutor Run failed, OH_NN_ReturnCode = " << ret_code; + return lite::RET_ERROR; + } + MS_LOG(INFO) << "Run NNRtModel Kernel success."; + + return lite::RET_OK; +} + +OH_NN_DataType mindspore::NNRTModelKernel::ConvertDataType(mindspore::DataType data_type) { + OH_NN_DataType oh_data_type; + switch (data_type) { + case DataType::kTypeUnknown: + case DataType::kObjectTypeString: + case DataType::kObjectTypeList: + case DataType::kObjectTypeTuple: + case DataType::kObjectTypeTensorType: + case DataType::kNumberTypeBegin: + case DataType::kNumberTypeEnd: + case DataType::kInvalidType: + oh_data_type = OH_NN_UNKNOWN; + break; + case DataType::kNumberTypeBool: + oh_data_type = OH_NN_BOOL; + break; + case DataType::kNumberTypeInt8: + oh_data_type = OH_NN_INT8; + break; + case DataType::kNumberTypeInt16: + oh_data_type = OH_NN_INT16; + break; + case DataType::kNumberTypeInt32: + oh_data_type = OH_NN_INT32; + break; + case DataType::kNumberTypeInt64: + oh_data_type = OH_NN_INT64; + break; + case DataType::kNumberTypeUInt8: + oh_data_type = OH_NN_UINT8; + break; + case DataType::kNumberTypeUInt16: + oh_data_type = OH_NN_UINT16; + break; + case DataType::kNumberTypeUInt32: + oh_data_type = OH_NN_UINT32; + break; + case DataType::kNumberTypeUInt64: + oh_data_type = OH_NN_UINT64; + break; + case DataType::kNumberTypeFloat16: + oh_data_type = OH_NN_FLOAT16; + break; + case DataType::kNumberTypeFloat32: + oh_data_type = OH_NN_FLOAT32; + break; + case DataType::kNumberTypeFloat64: + oh_data_type = OH_NN_FLOAT64; + break; + default: { + oh_data_type = OH_NN_UNKNOWN; + } + } + return oh_data_type; +} +int mindspore::NNRTModelKernel::PrepareInputs() { + auto input_tensors = this->inputs(); + for (int i = 0; i < input_tensors.size(); i++) { + auto tensor = input_tensors[i]; + auto tensor_shape = tensor.Shape(); + auto tmp_quant_param = tensor.QuantParams(); + OH_NN_QuantParam *quant_param = nullptr; + std::vector bit_num; + std::vector scale; + std::vector zero_point; + if (!tmp_quant_param.empty()) { + quant_param = (new (std::nothrow) OH_NN_QuantParam); + if (quant_param == nullptr) { + MS_LOG(ERROR) << "new OH_NN_QuantParam failed."; + return lite::RET_NULL_PTR; + } + for (auto qparam : tmp_quant_param) { + bit_num.emplace_back(qparam.bit_num); + scale.emplace_back(qparam.scale); + zero_point.emplace_back(qparam.zero_point); + } + quant_param->quantCount = tmp_quant_param.size(); + quant_param->numBits = bit_num.data(); + quant_param->scale = scale.data(); + quant_param->zeroPoint = zero_point.data(); + } + auto oprend = new (std::nothrow) OH_NN_Tensor; + if (oprend == nullptr) { + MS_LOG(ERROR) << "new OH_NN_Tensor Failed"; + return lite::RET_ERROR; + } + oprend->dataType = ConvertDataType(tensor.DataType()); + oprend->dimensionCount = tensor_shape.size(); + + std::vector dimensions_list; + for (auto shape : tensor_shape) { + if (shape < INT32_MAX) { + dimensions_list.emplace_back(static_cast(shape)); + } else { + MS_LOG(ERROR) << "NNExecutor SetInput failed,tensor dimension is is too large, max dim = " << INT32_MAX + << ", but get dimension = " << shape; + return lite::RET_ERROR; + } + } + oprend->dimensions = dimensions_list.data(); + oprend->quantParam = quant_param; + oprend->type = OH_NN_TENSOR; + OH_NN_ReturnCode ret_code = + OH_NNExecutor_SetInput(oh_nn_executor, i, oprend, tensor.MutableData(), tensor.DataSize()); + delete (oprend); + + if (!tmp_quant_param.empty()) { + free(quant_param); + quant_param = nullptr; + } + + if (ret_code != OH_NN_SUCCESS) { + MS_LOG(ERROR) << "NNExecutor SetInput failed, current input tensor is" << tensor.Name() + << "OH_NN_ReturnCode = " << ret_code; + return lite::RET_ERROR; + } + } + + return lite::RET_OK; +} +int mindspore::NNRTModelKernel::TransferOutputs() { + auto output_tensors = this->outputs(); + for (size_t i = 0; i < output_tensors.size(); i++) { + auto tensor = output_tensors[i]; + OH_NN_ReturnCode ret_code = OH_NNExecutor_SetOutput(oh_nn_executor, i, tensor.MutableData(), tensor.DataSize()); + if (ret_code != OH_NN_SUCCESS) { + MS_LOG(ERROR) << "NNExecutor SetOutput failed, current out tensor is" << tensor.Name() + << ", OH_NN_ReturnCode = " << ret_code; + return lite::RET_ERROR; + } + } + return lite::RET_OK; +} diff --git a/mindspore/lite/src/runtime/delegate/nnrt/nnrt_model_kernel.h b/mindspore/lite/src/runtime/delegate/nnrt/nnrt_model_kernel.h new file mode 100644 index 00000000..cf9481df --- /dev/null +++ b/mindspore/lite/src/runtime/delegate/nnrt/nnrt_model_kernel.h @@ -0,0 +1,57 @@ +/** + * Copyright 2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef LITE_NNRT_MODEL_KERNEL_H +#define LITE_NNRT_MODEL_KERNEL_H +#include +#include +#include +#include +#include "include/api/kernel.h" +#include "interfaces/kits/c/neural_network_runtime.h" +#include "src/common/log_adapter.h" +#include "include/errorcode.h" + +namespace mindspore { + +class NNRTModelKernel : public kernel::Kernel { + /** + * Because nnr can't run single op, but the whole model. So we decide to make the whole model into one kernel. + * */ + public: + NNRTModelKernel(OH_NNExecutor *oh_nn_executor, const std::vector &inputs, + const std::vector &outputs) + : kernel::Kernel(inputs, outputs, nullptr, nullptr), oh_nn_executor(oh_nn_executor) {} + int Prepare() override; + int Execute() override; + int ReSize() override { + MS_LOG(ERROR) << "NNRT does not support the resize function temporarily."; + return lite::RET_ERROR; + }; + OH_NN_DataType ConvertDataType(mindspore::DataType data_type); + int PrepareInputs(); + int TransferOutputs(); + ~NNRTModelKernel() override { + MS_LOG(INFO) << "start NNExecutor Destroy."; + OH_NNExecutor_Destroy(&oh_nn_executor); + MS_LOG(INFO) << "start NNExecutor Finish."; + } + + protected: + OH_NNExecutor *oh_nn_executor = nullptr; +}; +} // namespace mindspore + +#endif // LITE_NNRTT_MODEL_KERNEL_H -- 2.34.1