1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "DriverTestHelpers.hpp"
6
7 #include "../1.0/HalPolicy.hpp"
8
9 #include <boost/test/unit_test.hpp>
10
11 #include <log/log.h>
12
13 BOOST_AUTO_TEST_SUITE(GenericLayerTests)
14
15 using namespace android::hardware;
16 using namespace driverTestHelpers;
17 using namespace armnn_driver;
18
19 using HalPolicy = hal_1_0::HalPolicy;
20
BOOST_AUTO_TEST_CASE(GetSupportedOperations)21 BOOST_AUTO_TEST_CASE(GetSupportedOperations)
22 {
23 auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
24
25 V1_0::ErrorStatus errorStatus;
26 std::vector<bool> supported;
27
28 auto cb = [&](V1_0::ErrorStatus _errorStatus, const std::vector<bool>& _supported)
29 {
30 errorStatus = _errorStatus;
31 supported = _supported;
32 };
33
34 HalPolicy::Model model0 = {};
35
36 // Add operands
37 int32_t actValue = 0;
38 float weightValue[] = {2, 4, 1};
39 float biasValue[] = {4};
40
41 AddInputOperand<HalPolicy>(model0, hidl_vec<uint32_t>{1, 3});
42 AddTensorOperand<HalPolicy>(model0, hidl_vec<uint32_t>{1, 3}, weightValue);
43 AddTensorOperand<HalPolicy>(model0, hidl_vec<uint32_t>{1}, biasValue);
44 AddIntOperand<HalPolicy>(model0, actValue);
45 AddOutputOperand<HalPolicy>(model0, hidl_vec<uint32_t>{1, 1});
46
47 model0.operations.resize(1);
48
49 // Make a correct fully connected operation
50 model0.operations[0].type = HalPolicy::OperationType::FULLY_CONNECTED;
51 model0.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2, 3};
52 model0.operations[0].outputs = hidl_vec<uint32_t>{4};
53
54 driver->getSupportedOperations(model0, cb);
55 BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
56 BOOST_TEST(supported.size() == (size_t)1);
57 BOOST_TEST(supported[0] == true);
58
59 V1_0::Model model1 = {};
60
61 AddInputOperand<HalPolicy>(model1, hidl_vec<uint32_t>{1, 3});
62 AddTensorOperand<HalPolicy>(model1, hidl_vec<uint32_t>{1, 3}, weightValue);
63 AddTensorOperand<HalPolicy>(model1, hidl_vec<uint32_t>{1}, biasValue);
64 AddIntOperand<HalPolicy>(model1, actValue);
65 AddOutputOperand<HalPolicy>(model1, hidl_vec<uint32_t>{1, 1});
66
67 model1.operations.resize(2);
68
69 // Make a correct fully connected operation
70 model1.operations[0].type = HalPolicy::OperationType::FULLY_CONNECTED;
71 model1.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2, 3};
72 model1.operations[0].outputs = hidl_vec<uint32_t>{4};
73
74 // Add an incorrect fully connected operation
75 AddIntOperand<HalPolicy>(model1, actValue);
76 AddOutputOperand<HalPolicy>(model1, hidl_vec<uint32_t>{1, 1});
77
78 model1.operations[1].type = HalPolicy::OperationType::FULLY_CONNECTED;
79 model1.operations[1].inputs = hidl_vec<uint32_t>{4}; // Only 1 input operand, expected 4
80 model1.operations[1].outputs = hidl_vec<uint32_t>{5};
81
82 driver->getSupportedOperations(model1, cb);
83
84 BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::INVALID_ARGUMENT);
85 BOOST_TEST(supported.empty());
86
87 // Test Broadcast on add/mul operators
88 HalPolicy::Model model2 = {};
89
90 AddInputOperand<HalPolicy>(model2,
91 hidl_vec<uint32_t>{1, 1, 3, 4},
92 HalPolicy::OperandType::TENSOR_FLOAT32,
93 0.0f,
94 0,
95 2);
96 AddInputOperand<HalPolicy>(model2,
97 hidl_vec<uint32_t>{4},
98 HalPolicy::OperandType::TENSOR_FLOAT32,
99 0.0f,
100 0,
101 2);
102 AddIntOperand<HalPolicy>(model2, actValue, 2);
103 AddOutputOperand<HalPolicy>(model2, hidl_vec<uint32_t>{1, 1, 3, 4});
104 AddOutputOperand<HalPolicy>(model2, hidl_vec<uint32_t>{1, 1, 3, 4});
105
106 model2.operations.resize(2);
107
108 model2.operations[0].type = HalPolicy::OperationType::ADD;
109 model2.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2};
110 model2.operations[0].outputs = hidl_vec<uint32_t>{3};
111
112 model2.operations[1].type = HalPolicy::OperationType::MUL;
113 model2.operations[1].inputs = hidl_vec<uint32_t>{0, 1, 2};
114 model2.operations[1].outputs = hidl_vec<uint32_t>{4};
115
116 driver->getSupportedOperations(model2, cb);
117 BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
118 BOOST_TEST(supported.size() == (size_t)2);
119 BOOST_TEST(supported[0] == true);
120 BOOST_TEST(supported[1] == true);
121
122 V1_0::Model model3 = {};
123
124 AddInputOperand<HalPolicy>(model3,
125 hidl_vec<uint32_t>{1, 1, 3, 4},
126 HalPolicy::OperandType::TENSOR_INT32);
127 AddInputOperand<HalPolicy>(model3,
128 hidl_vec<uint32_t>{4},
129 HalPolicy::OperandType::TENSOR_INT32);
130 AddInputOperand<HalPolicy>(model3, hidl_vec<uint32_t>{1, 1, 3, 4});
131
132 AddOutputOperand<HalPolicy>(model3, hidl_vec<uint32_t>{1, 1, 3, 4});
133 AddOutputOperand<HalPolicy>(model3,
134 hidl_vec<uint32_t>{1, 1, 3, 4},
135 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
136 1.f / 225.f);
137
138 model3.operations.resize(1);
139
140 // Add unsupported operation, should return no error but we don't support it
141 model3.operations[0].type = HalPolicy::OperationType::HASHTABLE_LOOKUP;
142 model3.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2};
143 model3.operations[0].outputs = hidl_vec<uint32_t>{3, 4};
144
145 driver->getSupportedOperations(model3, cb);
146 BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
147 BOOST_TEST(supported.size() == (size_t)1);
148 BOOST_TEST(supported[0] == false);
149
150 HalPolicy::Model model4 = {};
151
152 AddIntOperand<HalPolicy>(model4, 0);
153
154 model4.operations.resize(1);
155
156 // Add invalid operation
157 model4.operations[0].type = static_cast<HalPolicy::OperationType>(100);
158 model4.operations[0].outputs = hidl_vec<uint32_t>{0};
159
160 driver->getSupportedOperations(model4, cb);
161 BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::INVALID_ARGUMENT);
162 BOOST_TEST(supported.empty());
163 }
164
165 // The purpose of this test is to ensure that when encountering an unsupported operation
166 // it is skipped and getSupportedOperations() continues (rather than failing and stopping).
167 // As per IVGCVSW-710.
BOOST_AUTO_TEST_CASE(UnsupportedLayerContinueOnFailure)168 BOOST_AUTO_TEST_CASE(UnsupportedLayerContinueOnFailure)
169 {
170 auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
171
172 V1_0::ErrorStatus errorStatus;
173 std::vector<bool> supported;
174
175 auto cb = [&](V1_0::ErrorStatus _errorStatus, const std::vector<bool>& _supported)
176 {
177 errorStatus = _errorStatus;
178 supported = _supported;
179 };
180
181 HalPolicy::Model model = {};
182
183 // Operands
184 int32_t actValue = 0;
185 float weightValue[] = {2, 4, 1};
186 float biasValue[] = {4};
187
188 // HASHTABLE_LOOKUP is unsupported at the time of writing this test, but any unsupported layer will do
189 AddInputOperand<HalPolicy>(model,
190 hidl_vec<uint32_t>{1, 1, 3, 4},
191 HalPolicy::OperandType::TENSOR_INT32);
192 AddInputOperand<HalPolicy>(model,
193 hidl_vec<uint32_t>{4},
194 HalPolicy::OperandType::TENSOR_INT32,
195 0.0f,
196 0,
197 2);
198 AddInputOperand<HalPolicy>(model,
199 hidl_vec<uint32_t>{1, 1, 3, 4},
200 HalPolicy::OperandType::TENSOR_FLOAT32,
201 0.0f,
202 0,
203 2);
204
205 AddOutputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 1, 3, 4});
206 AddOutputOperand<HalPolicy>(model,
207 hidl_vec<uint32_t>{1, 1, 3, 4},
208 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
209 1.f / 225.f);
210
211 // Fully connected is supported
212 AddInputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 3});
213
214 AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 3}, weightValue);
215 AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{1}, biasValue);
216
217 AddIntOperand<HalPolicy>(model, actValue);
218
219 AddOutputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 1});
220
221 // EMBEDDING_LOOKUP is unsupported
222 AddOutputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 1, 3, 4});
223
224 model.operations.resize(3);
225
226 // Unsupported
227 model.operations[0].type = HalPolicy::OperationType::HASHTABLE_LOOKUP;
228 model.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2};
229 model.operations[0].outputs = hidl_vec<uint32_t>{3, 4};
230
231 // Supported
232 model.operations[1].type = HalPolicy::OperationType::FULLY_CONNECTED;
233 model.operations[1].inputs = hidl_vec<uint32_t>{5, 6, 7, 8};
234 model.operations[1].outputs = hidl_vec<uint32_t>{9};
235
236 // Unsupported
237 model.operations[2].type = HalPolicy::OperationType::EMBEDDING_LOOKUP;
238 model.operations[2].inputs = hidl_vec<uint32_t>{1, 2};
239 model.operations[2].outputs = hidl_vec<uint32_t>{10};
240
241 // We are testing that the unsupported layers return false and the test continues rather than failing and stopping
242 driver->getSupportedOperations(model, cb);
243 BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
244 BOOST_TEST(supported.size() == (size_t)3);
245 BOOST_TEST(supported[0] == false);
246 BOOST_TEST(supported[1] == true);
247 BOOST_TEST(supported[2] == false);
248 }
249
250 // The purpose of this test is to ensure that when encountering an failure
251 // during mem pool mapping we properly report an error to the framework via a callback
BOOST_AUTO_TEST_CASE(ModelToINetworkConverterMemPoolFail)252 BOOST_AUTO_TEST_CASE(ModelToINetworkConverterMemPoolFail)
253 {
254 auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
255
256 V1_0::ErrorStatus errorStatus;
257 std::vector<bool> supported;
258
259 auto cb = [&](V1_0::ErrorStatus _errorStatus, const std::vector<bool>& _supported)
260 {
261 errorStatus = _errorStatus;
262 supported = _supported;
263 };
264
265 HalPolicy::Model model = {};
266
267 model.pools = hidl_vec<hidl_memory>{hidl_memory("Unsuported hidl memory type", nullptr, 0)};
268
269 // Memory pool mapping should fail, we should report an error
270 driver->getSupportedOperations(model, cb);
271 BOOST_TEST((int)errorStatus != (int)V1_0::ErrorStatus::NONE);
272 BOOST_TEST(supported.empty());
273 }
274
275 BOOST_AUTO_TEST_SUITE_END()
276