• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef HDI_NNRT_TEST_UTILS_H
17 #define HDI_NNRT_TEST_UTILS_H
18 
19 #include <string>
20 #include <vector>
21 #include <v1_0/innrt_device.h>
22 #include <v1_0/nnrt_types.h>
23 
24 #include "gtest/gtest.h"
25 #include "mindir_lite_graph.h"
26 
27 #include "interfaces/kits/c/neural_network_runtime_type.h"
28 
29 namespace V1_0 = OHOS::HDI::Nnrt::V1_0;
30 
31 namespace OHOS::NeuralNetworkRuntime::Test {
32 // invaild file discription
33 const int NNRT_INVALID_FD = -1;
34 const uint32_t ADDEND_DATA_SIZE = 12;
35 const uint32_t ADDEND_BUFFER_LENGTH = ADDEND_DATA_SIZE * sizeof(float);
36 const std::vector<int32_t> TENSOR_DIMS = {3, 2, 2};
37 const float ADD_VALUE_1 = 1;
38 const float ADD_VALUE_2 = 2;
39 const float ADD_VALUE_RESULT = 3;
40 const size_t PRINT_NUM = 10;
41 
42 class HDICommon {
43 public:
44     static void BuildAddGraph(OH_NNModel **model);
45     static void BuildAddGraphDynamic(OH_NNModel **model);
46     static OH_NN_ReturnCode ConvertModel(OHOS::sptr<V1_0::INnrtDevice> device_, OH_NNModel *model,
47                                          V1_0::SharedBuffer &tensorBuffer, V1_0::Model **iModel);
48     static V1_0::IOTensor CreateIOTensor(OHOS::sptr<V1_0::INnrtDevice> &device);
49     static V1_0::IOTensor CreateInputIOTensor(OHOS::sptr<V1_0::INnrtDevice> &device, size_t length, float* data);
50     static V1_0::IOTensor CreateOutputIOTensor(OHOS::sptr<V1_0::INnrtDevice> &device, size_t length);
51     static void* MapMemory(int fd, size_t length);
52     static void UnmapMemory(float* buffer);
53     static void UnmapAllMemory(std::vector<void* > &buffers);
54     static void SetData(float* buffer, size_t length, float* data);
55     static void ReleaseBufferOfTensors(OHOS::sptr<V1_0::INnrtDevice> &device, std::vector<V1_0::IOTensor> &tensors);
56 };
57 
58 bool CheckExpectOutput(const std::vector<float> &output, const std::vector<float> &expect);
59 void PrintTensor(const float *buffer, size_t length);
60 } // namespace OHOS::NeuralNetworkRuntime::Test
61 
62 #endif