• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef NNRT_UTILS_H
16 #define NNRT_UTILS_H
17 
18 #include <dirent.h>
19 #include <sys/stat.h>
20 #include <gtest/gtest.h>
21 
22 #include "interfaces/kits/c/neural_network_runtime/neural_network_runtime.h"
23 #include "common/log.h"
24 #include "mock_idevice.h"
25 #include "nncore_const.h"
26 
27 namespace OHOS {
28 namespace NeuralNetworkRuntime {
29 namespace Test {
30 namespace V2_0 = OHOS::HDI::Nnrt::V2_0;
31 struct OHNNOperandTest {
32     OH_NN_DataType dataType;
33     OH_NN_TensorType type;
34     std::vector<int32_t> shape;
35     void *data{nullptr};
36     int32_t length{0};
37     OH_NN_Format format = OH_NN_FORMAT_NONE;
38     const OH_NN_QuantParam *quantParam = nullptr;
39 };
40 
41 struct OHNNGraphArgs {
42     OH_NN_OperationType operationType;
43     std::vector<OHNNOperandTest> operands;
44     std::vector<uint32_t> paramIndices;
45     std::vector<uint32_t> inputIndices;
46     std::vector<uint32_t> outputIndices;
47     bool build = true;
48     bool specifyIO = true;
49     bool addOperation = true;
50 };
51 
52 struct OHNNGraphArgsMulti {
53     std::vector<OH_NN_OperationType> operationTypes;
54     std::vector<std::vector<OHNNOperandTest>> operands;
55     std::vector<std::vector<uint32_t>> paramIndices;
56     std::vector<std::vector<uint32_t>> inputIndices;
57     std::vector<std::vector<uint32_t>> outputIndices;
58     std::vector<uint32_t> graphInput;
59     std::vector<uint32_t> graphOutput;
60 };
61 
62 struct OHNNCompileParam {
63     int32_t deviceId = 0;
64     std::string cacheDir;
65     uint32_t cacheVersion = 0;
66     OH_NN_PerformanceMode performanceMode = OH_NN_PERFORMANCE_NONE;
67     OH_NN_Priority priority = OH_NN_PRIORITY_NONE;
68     bool enableFp16 = false;
69 };
70 
71 struct AddModel {
72     // ADD MODEL
73     float inputValue0[4] = {0, 1, 2, 3};
74     float inputValue1[4] = {0, 1, 2, 3};
75     int8_t activationValue = OH_NN_FUSED_NONE;
76     float outputValue[4] = {0};
77     float expectValue[4] = {0, 2, 4, 6};
78 
79     OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, TENSOR_SHAPE, inputValue0, ADD_DATA_LENGTH};
80     OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, TENSOR_SHAPE, inputValue1, ADD_DATA_LENGTH};
81     OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)};
82     OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, TENSOR_SHAPE, outputValue, ADD_DATA_LENGTH};
83     OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_ADD,
84                                .operands = {input0, input1, activation, output},
85                                .paramIndices = {2},
86                                .inputIndices = {0, 1},
87                                .outputIndices = {3}};
88 };
89 
90 struct AvgPoolDynamicModel {
91     // ADD MODEL
92     float inputValue0[4] = {0, 1, 2, 3};
93     float inputValue1[4] = {0, 1, 2, 3};
94     int8_t activationValue = OH_NN_FUSED_NONE;
95     float outputValue[4] = {0};
96     float expectValue[4] = {0, 2, 4, 6};
97 
98     OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, {-1, -1, -1, -1}, inputValue0, ADD_DATA_LENGTH};
99     OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, {-1, -1, -1, -1}, inputValue1, ADD_DATA_LENGTH};
100     OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)};
101     OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, {-1, -1, -1, -1}, outputValue, ADD_DATA_LENGTH};
102     OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_ADD,
103                                .operands = {input0, input1, activation, output},
104                                .paramIndices = {2},
105                                .inputIndices = {0, 1},
106                                .outputIndices = {3}};
107 };
108 
109 struct TopKModel {
110     // TopK Model
111     float valueX[6] = {0, 1, 2, 3, 4, 5};
112     int8_t valueK = 2;
113     bool valueSorted = true;
114     float valueOutput1[2];
115     int32_t valueOutput2[2];
116 
117     OHNNOperandTest x = {OH_NN_FLOAT32, OH_NN_TENSOR, {1, 6}, valueX, 6 * sizeof(float)};
118     OHNNOperandTest k = {OH_NN_INT8, OH_NN_TENSOR, {}, &valueK, sizeof(int8_t)};
119     OHNNOperandTest sorted = {OH_NN_BOOL, OH_NN_TOP_K_SORTED, {}, &valueSorted, sizeof(bool)};
120     OHNNOperandTest output1 = {OH_NN_FLOAT32, OH_NN_TENSOR, {1, 2}, valueOutput1, 2 * sizeof(float)};
121     OHNNOperandTest output2 = {OH_NN_INT32, OH_NN_TENSOR, {1, 2}, valueOutput2, 2 * sizeof(int32_t)};
122 
123     OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_TOP_K,
124                                .operands = {x, k, sorted, output1, output2},
125                                .paramIndices = {2},
126                                .inputIndices = {0, 1},
127                                .outputIndices = {3, 4}};
128 };
129 
130 class AddTopKModel {
131     // Build two ops Model
132 private:
133     AddModel addModel;
134     TopKModel topKModel;
135 
136 public:
137     OHNNGraphArgsMulti graphArgs = {
138         .operationTypes = {OH_NN_OPS_ADD, OH_NN_OPS_TOP_K},
139         .operands = {{addModel.input0, addModel.input1, addModel.activation, addModel.output},
140             {topKModel.k, topKModel.sorted, topKModel.output1, topKModel.output2}},
141         .paramIndices = {{2}, {5}},
142         .inputIndices = {{0, 1}, {3, 4}},
143         .outputIndices = {{3}, {6, 7}},
144         .graphInput = {0, 1, 4},
145         .graphOutput = {6, 7}};
146 };
147 
148 NN_TensorDesc* createTensorDesc(const int32_t* shape, size_t shapeNum, OH_NN_DataType dataType, OH_NN_Format format);
149 int MultiModelBuildEndStep(OH_NNModel *model, const OHNNGraphArgsMulti &graphArgs);
150 int SingleModelBuildEndStep(OH_NNModel *model, const OHNNGraphArgs &graphArgs);
151 int BuildSingleOpGraph(OH_NNModel *model, const OHNNGraphArgs &graphArgs);
152 int BuildSingleOpGraphWithQuantParams(OH_NNModel *model, const OHNNGraphArgs &graphArgs);
153 void Free(OH_NNModel *model = nullptr, OH_NNCompilation *compilation = nullptr, OH_NNExecutor *executor = nullptr);
154 int CompileGraphMock(OH_NNCompilation *compilation, const OHNNCompileParam &compileParam);
155 OH_NN_ReturnCode SetDevice(OH_NNCompilation *compilation);
156 int BuildMultiOpGraph(OH_NNModel *model, const OHNNGraphArgsMulti &graphArgs);
157 OH_NN_UInt32Array GetUInt32Array(std::vector<uint32_t> indices);
158 bool CheckOutput(const float* output, const float* expect);
159 OH_NN_ReturnCode GetDeviceID(size_t *deviceId);
160 
161 //文件相关
162 enum class PathType { FILE, DIR, UNKNOWN, NOT_FOUND };
163 PathType CheckPath(const std::string &path);
164 bool DeleteFile(const std::string &path);
165 void CopyFile(const std::string &srcPath, const std::string &dstPath);
166 std::string ConcatPath(const std::string &str1, const std::string &str2);
167 void DeleteFolder(const std::string &path);
168 bool CreateFolder(const std::string &path);
169 
170 //nncore创建相关
171 void ConstructAddModel(OH_NNModel **model);
172 void ConstructCompilation(OH_NNCompilation **compilation);
173 void CreateExecutor(OH_NNExecutor **executor);
174 void CreateDynamicExecutor(OH_NNExecutor **executor);
175 void GetExecutorInputOutputTensorDesc(OH_NNExecutor* executor,
176                                       std::vector<NN_TensorDesc*>& inputTensorDescs, size_t& inputCount,
177                                       std::vector<NN_TensorDesc*>& outputTensorDescs, size_t& outputCount);
178 void GetExecutorInputOutputTensorByDesc(OH_NNExecutor* executor,
179     std::vector<NN_Tensor*>& inputTensors, const std::vector<NN_TensorDesc*>& inputTensorDescs,
180     std::vector<NN_Tensor*>& outputTensors, const std::vector<NN_TensorDesc*>& outputTensorDescs);
181 void GetExecutorInputOutputTensor(OH_NNExecutor* executor, std::vector<NN_Tensor*>& inputTensors, size_t& inputCount,
182                                   std::vector<NN_Tensor*>& outputTensors, size_t& outputCount);
183 OH_NN_ReturnCode DestroyTensorDesc(std::vector<NN_TensorDesc*>& inputTensorDescs,
184                                    std::vector<NN_TensorDesc*>& outputTensorDescs);
185 OH_NN_ReturnCode DestroyTensor(std::vector<NN_Tensor*>& inputTensors, std::vector<NN_Tensor*>& outputTensors);
186 } // namespace Test
187 } // namespace NeuralNetworkRuntime
188 } // namespace OHOS
189 
190 #endif // NNRT_UTILS_H