1 /*
2 * Copyright (c) 2019-2020 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "arm_compute/runtime/NEON/NEScheduler.h"
25 #include "arm_compute/runtime/NEON/functions/NEComputeAllAnchors.h"
26 #include "arm_compute/runtime/NEON/functions/NEGenerateProposalsLayer.h"
27 #include "arm_compute/runtime/NEON/functions/NEPermute.h"
28 #include "arm_compute/runtime/NEON/functions/NESlice.h"
29 #include "tests/Globals.h"
30 #include "tests/NEON/Accessor.h"
31 #include "tests/NEON/ArrayAccessor.h"
32 #include "tests/framework/Macros.h"
33 #include "tests/framework/datasets/Datasets.h"
34 #include "tests/validation/Validation.h"
35 #include "tests/validation/fixtures/ComputeAllAnchorsFixture.h"
36 #include "utils/TypePrinter.h"
37
38 namespace arm_compute
39 {
40 namespace test
41 {
42 namespace validation
43 {
44 namespace
45 {
46 template <typename U, typename T>
fill_tensor(U && tensor,const std::vector<T> & v)47 inline void fill_tensor(U &&tensor, const std::vector<T> &v)
48 {
49 std::memcpy(tensor.data(), v.data(), sizeof(T) * v.size());
50 }
51
52 template <typename T>
fill_tensor(Accessor && tensor,const std::vector<T> & v)53 inline void fill_tensor(Accessor &&tensor, const std::vector<T> &v)
54 {
55 if(tensor.data_layout() == DataLayout::NCHW)
56 {
57 std::memcpy(tensor.data(), v.data(), sizeof(T) * v.size());
58 }
59 else
60 {
61 const int channels = tensor.shape()[0];
62 const int width = tensor.shape()[1];
63 const int height = tensor.shape()[2];
64 for(int x = 0; x < width; ++x)
65 {
66 for(int y = 0; y < height; ++y)
67 {
68 for(int c = 0; c < channels; ++c)
69 {
70 *(reinterpret_cast<T *>(tensor(Coordinates(c, x, y)))) = *(reinterpret_cast<const T *>(v.data() + x + y * width + c * height * width));
71 }
72 }
73 }
74 }
75 }
76
77 const auto ComputeAllInfoDataset = framework::dataset::make("ComputeAllInfo",
78 {
79 ComputeAnchorsInfo(10U, 10U, 1. / 16.f),
80 ComputeAnchorsInfo(100U, 1U, 1. / 2.f),
81 ComputeAnchorsInfo(100U, 1U, 1. / 4.f),
82 ComputeAnchorsInfo(100U, 100U, 1. / 4.f),
83
84 });
85
86 constexpr AbsoluteTolerance<int16_t> tolerance_qsymm16(1);
87 } // namespace
88
89 TEST_SUITE(NEON)
TEST_SUITE(GenerateProposals)90 TEST_SUITE(GenerateProposals)
91
92 // *INDENT-OFF*
93 // clang-format off
94 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(
95 framework::dataset::make("scores", { TensorInfo(TensorShape(100U, 100U, 9U), 1, DataType::F32),
96 TensorInfo(TensorShape(100U, 100U, 9U), 1, DataType::F16), // Mismatching types
97 TensorInfo(TensorShape(100U, 100U, 9U), 1, DataType::F16), // Wrong deltas (number of transformation non multiple of 4)
98 TensorInfo(TensorShape(100U, 100U, 9U), 1, DataType::F16), // Wrong anchors (number of values per roi != 5)
99 TensorInfo(TensorShape(100U, 100U, 9U), 1, DataType::F16), // Output tensor num_valid_proposals not scalar
100 TensorInfo(TensorShape(100U, 100U, 9U), 1, DataType::F16)}), // num_valid_proposals not U32
101 framework::dataset::make("deltas",{ TensorInfo(TensorShape(100U, 100U, 36U), 1, DataType::F32),
102 TensorInfo(TensorShape(100U, 100U, 36U), 1, DataType::F32),
103 TensorInfo(TensorShape(100U, 100U, 38U), 1, DataType::F32),
104 TensorInfo(TensorShape(100U, 100U, 38U), 1, DataType::F32),
105 TensorInfo(TensorShape(100U, 100U, 38U), 1, DataType::F32),
106 TensorInfo(TensorShape(100U, 100U, 38U), 1, DataType::F32)})),
107 framework::dataset::make("anchors", { TensorInfo(TensorShape(4U, 9U), 1, DataType::F32),
108 TensorInfo(TensorShape(4U, 9U), 1, DataType::F32),
109 TensorInfo(TensorShape(4U, 9U), 1, DataType::F32),
110 TensorInfo(TensorShape(5U, 9U), 1, DataType::F32),
111 TensorInfo(TensorShape(4U, 9U), 1, DataType::F32),
112 TensorInfo(TensorShape(4U, 9U), 1, DataType::F32)})),
113 framework::dataset::make("proposals", { TensorInfo(TensorShape(5U, 100U*100U*9U), 1, DataType::F32),
114 TensorInfo(TensorShape(5U, 100U*100U*9U), 1, DataType::F32),
115 TensorInfo(TensorShape(5U, 100U*100U*9U), 1, DataType::F32),
116 TensorInfo(TensorShape(5U, 100U*100U*9U), 1, DataType::F32),
117 TensorInfo(TensorShape(5U, 100U*100U*9U), 1, DataType::F32),
118 TensorInfo(TensorShape(5U, 100U*100U*9U), 1, DataType::F32)})),
119 framework::dataset::make("scores_out", { TensorInfo(TensorShape(100U*100U*9U), 1, DataType::F32),
120 TensorInfo(TensorShape(100U*100U*9U), 1, DataType::F32),
121 TensorInfo(TensorShape(100U*100U*9U), 1, DataType::F32),
122 TensorInfo(TensorShape(100U*100U*9U), 1, DataType::F32),
123 TensorInfo(TensorShape(100U*100U*9U), 1, DataType::F32),
124 TensorInfo(TensorShape(100U*100U*9U), 1, DataType::F32)})),
125 framework::dataset::make("num_valid_proposals", { TensorInfo(TensorShape(1U, 1U), 1, DataType::U32),
126 TensorInfo(TensorShape(1U, 1U), 1, DataType::U32),
127 TensorInfo(TensorShape(1U, 1U), 1, DataType::U32),
128 TensorInfo(TensorShape(1U, 1U), 1, DataType::U32),
129 TensorInfo(TensorShape(1U, 10U), 1, DataType::U32),
130 TensorInfo(TensorShape(1U, 1U), 1, DataType::F16)})),
131 framework::dataset::make("generate_proposals_info", { GenerateProposalsInfo(10.f, 10.f, 1.f),
132 GenerateProposalsInfo(10.f, 10.f, 1.f),
133 GenerateProposalsInfo(10.f, 10.f, 1.f),
134 GenerateProposalsInfo(10.f, 10.f, 1.f),
135 GenerateProposalsInfo(10.f, 10.f, 1.f),
136 GenerateProposalsInfo(10.f, 10.f, 1.f)})),
137 framework::dataset::make("Expected", { true, false, false, false, false, false })),
138 scores, deltas, anchors, proposals, scores_out, num_valid_proposals, generate_proposals_info, expected)
139 {
140 ARM_COMPUTE_EXPECT(bool(NEGenerateProposalsLayer::validate(&scores.clone()->set_is_resizable(true),
141 &deltas.clone()->set_is_resizable(true),
142 &anchors.clone()->set_is_resizable(true),
143 &proposals.clone()->set_is_resizable(true),
144 &scores_out.clone()->set_is_resizable(true),
145 &num_valid_proposals.clone()->set_is_resizable(true),
146 generate_proposals_info)) == expected, framework::LogLevel::ERRORS);
147 }
148 // clang-format on
149 // *INDENT-ON*
150
151 template <typename T>
152 using NEComputeAllAnchorsFixture = ComputeAllAnchorsFixture<Tensor, Accessor, NEComputeAllAnchors, T>;
153
154 TEST_SUITE(Float)
TEST_SUITE(FP32)155 TEST_SUITE(FP32)
156 DATA_TEST_CASE(IntegrationTestCaseAllAnchors, framework::DatasetMode::ALL, framework::dataset::make("DataType", { DataType::F32 }),
157 data_type)
158 {
159 const int values_per_roi = 4;
160 const int num_anchors = 3;
161 const int feature_height = 4;
162 const int feature_width = 3;
163
164 SimpleTensor<float> anchors_expected(TensorShape(values_per_roi, feature_width * feature_height * num_anchors), DataType::F32);
165 fill_tensor(anchors_expected, std::vector<float> { -26, -19, 87, 86,
166 -81, -27, 58, 63,
167 -44, -15, 55, 36,
168 -10, -19, 103, 86,
169 -65, -27, 74, 63,
170 -28, -15, 71, 36,
171 6, -19, 119, 86,
172 -49, -27, 90, 63,
173 -12, -15, 87, 36,
174 -26, -3, 87, 102,
175 -81, -11, 58, 79,
176 -44, 1, 55, 52,
177 -10, -3, 103, 102,
178 -65, -11, 74, 79,
179 -28, 1, 71, 52,
180 6, -3, 119, 102,
181 -49, -11, 90, 79,
182 -12, 1, 87, 52,
183 -26, 13, 87, 118,
184 -81, 5, 58, 95,
185 -44, 17, 55, 68,
186 -10, 13, 103, 118,
187 -65, 5, 74, 95,
188 -28, 17, 71, 68,
189 6, 13, 119, 118,
190 -49, 5, 90, 95,
191 -12, 17, 87, 68,
192 -26, 29, 87, 134,
193 -81, 21, 58, 111,
194 -44, 33, 55, 84,
195 -10, 29, 103, 134,
196 -65, 21, 74, 111,
197 -28, 33, 71, 84,
198 6, 29, 119, 134,
199 -49, 21, 90, 111,
200 -12, 33, 87, 84
201 });
202
203 Tensor all_anchors;
204 Tensor anchors = create_tensor<Tensor>(TensorShape(4, num_anchors), data_type);
205
206 // Create and configure function
207 NEComputeAllAnchors compute_anchors;
208 compute_anchors.configure(&anchors, &all_anchors, ComputeAnchorsInfo(feature_width, feature_height, 1. / 16.0));
209 anchors.allocator()->allocate();
210 all_anchors.allocator()->allocate();
211
212 fill_tensor(Accessor(anchors), std::vector<float> { -26, -19, 87, 86,
213 -81, -27, 58, 63,
214 -44, -15, 55, 36
215 });
216 // Compute function
217 compute_anchors.run();
218 validate(Accessor(all_anchors), anchors_expected);
219 }
220
221 DATA_TEST_CASE(IntegrationTestCaseGenerateProposals, framework::DatasetMode::ALL, combine(framework::dataset::make("DataType", { DataType::F32 }),
222 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
223 data_type, data_layout)
224 {
225 const int values_per_roi = 4;
226 const int num_anchors = 2;
227 const int feature_height = 4;
228 const int feature_width = 5;
229
230 std::vector<float> scores_vector
231 {
232 5.055894435664012e-04f, 1.270304909820112e-03f, 2.492271113912067e-03f, 5.951663827809190e-03f,
233 7.846917156877404e-03f, 6.776275276294789e-03f, 6.761571012891965e-03f, 4.898292096237725e-03f,
234 6.044472332578605e-04f, 3.203334118759474e-03f, 2.947527908919908e-03f, 6.313238560015770e-03f,
235 7.931767757095738e-03f, 8.764345805102866e-03f, 7.325012199914913e-03f, 4.317069470446271e-03f,
236 2.372537409795522e-03f, 1.589227460352735e-03f, 7.419477503600818e-03f, 3.157690354133824e-05f,
237 1.125915135986472e-03f, 9.865363483872330e-03f, 2.429454743386769e-03f, 2.724460564167563e-03f,
238 7.670409838207963e-03f, 5.558891552328172e-03f, 7.876904873099614e-03f, 6.824746047239291e-03f,
239 7.023817548067892e-03f, 3.651314909238673e-04f, 6.720443709032501e-03f, 5.935615511606155e-03f,
240 2.837349642759774e-03f, 1.787235113610299e-03f, 4.538568889918262e-03f, 3.391510678188818e-03f,
241 7.328474239481874e-03f, 6.306967923936016e-03f, 8.102218904895860e-04f, 3.366646521610209e-03f
242 };
243
244 std::vector<float> bbx_vector
245 {
246 5.066650471856862e-03, -7.638671742936328e-03, 2.549596503988635e-03, -8.316416756423296e-03,
247 -2.397471917924575e-04, 7.370595187754891e-03, -2.771880178185262e-03, 3.958364873973579e-03,
248 4.493661094712284e-03, 2.016487051533088e-03, -5.893883038142033e-03, 7.570636080807809e-03,
249 -1.395511229386785e-03, 3.686686052704696e-03, -7.738166245767079e-03, -1.947306329828059e-03,
250 -9.299719716045681e-03, -3.476410493413708e-03, -2.390761190919604e-03, 4.359281254364210e-03,
251 -2.135251160164030e-04, 9.203299843371962e-03, 4.042322775006053e-03, -9.464271243910754e-03,
252 2.566239543229305e-03, -9.691093900220627e-03, -4.019283034310979e-03, 8.145470429508792e-03,
253 7.345087308315662e-04, 7.049642787384043e-03, -2.768492313674294e-03, 6.997160053405803e-03,
254 6.675346697112969e-03, 2.353293365652274e-03, -3.612002585241749e-04, 1.592076522068768e-03,
255 -8.354188900818149e-04, -5.232515333564140e-04, 6.946683728847089e-03, -8.469757407935994e-03,
256 -8.985324496496555e-03, 4.885832859017961e-03, -7.662967577576512e-03, 7.284124004335807e-03,
257 -5.812167510299458e-03, -5.760336800482398e-03, 6.040416930336549e-03, 5.861508595443691e-03,
258 -5.509243096133549e-04, -2.006142470055888e-03, -7.205925340416066e-03, -1.117459082969758e-03,
259 4.233247017623154e-03, 8.079257498201178e-03, 2.962639022639513e-03, 7.069474943472751e-03,
260 -8.562946284971293e-03, -8.228634642768271e-03, -6.116245322799971e-04, -7.213122000180859e-03,
261 1.693094399433209e-03, -4.287504459132290e-03, 8.740365683925144e-03, 3.751788160720638e-03,
262 7.006764222862830e-03, 9.676754678358187e-03, -6.458757235812945e-03, -4.486506575589758e-03,
263 -4.371087196816259e-03, 3.542166755953152e-03, -2.504808998699504e-03, 5.666601724512010e-03,
264 -3.691862724546129e-03, 3.689809719085287e-03, 9.079930264704458e-03, 6.365127787359476e-03,
265 2.881681788246101e-06, 9.991866069315165e-03, -1.104757466496565e-03, -2.668455405633477e-03,
266 -1.225748887087659e-03, 6.530536159094015e-03, 3.629468917975644e-03, 1.374426066950348e-03,
267 -2.404098881570632e-03, -4.791365049441602e-03, -2.970654027009094e-03, 7.807553690294366e-03,
268 -1.198321129505323e-03, -3.574885336949881e-03, -5.380848303732298e-03, 9.705151282165116e-03,
269 -1.005217683242201e-03, 9.178094036278405e-03, -5.615977269541644e-03, 5.333533158509859e-03,
270 -2.817116206168516e-03, 6.672609782000503e-03, 6.575769501651313e-03, 8.987596634989362e-03,
271 -1.283530791296188e-03, 1.687717120057778e-03, 3.242391851439037e-03, -7.312060454341677e-03,
272 4.735335326324270e-03, -6.832367028817463e-03, -5.414854835884652e-03, -9.352380213755996e-03,
273 -3.682662043703889e-03, -6.127508590419776e-04, -7.682256596819467e-03, 9.569532628790246e-03,
274 -1.572157284518933e-03, -6.023034366859191e-03, -5.110873282582924e-03, -8.697072236660256e-03,
275 -3.235150419663566e-03, -8.286320236471386e-03, -5.229472409112913e-03, 9.920785896115053e-03,
276 -2.478413362126123e-03, -9.261324796935007e-03, 1.718512310840434e-04, 3.015875488208480e-03,
277 -6.172932549255669e-03, -4.031715551985103e-03, -9.263878005853677e-03, -2.815310738453385e-03,
278 7.075307462133643e-03, 1.404611747938669e-03, -1.518548732533266e-03, -9.293430941655778e-03,
279 6.382186966633246e-03, 8.256835789169248e-03, 3.196907843506736e-03, 8.821615689753433e-03,
280 -7.661543424832439e-03, 1.636273081822326e-03, -8.792373335756125e-03, 2.958775812049877e-03,
281 -6.269300278071262e-03, 6.248285790856450e-03, -3.675414624536002e-03, -1.692616700318762e-03,
282 4.126007647815893e-03, -9.155291689759584e-03, -8.432616039924004e-03, 4.899980636213323e-03,
283 3.511535019681671e-03, -1.582745757177339e-03, -2.703657774917963e-03, 6.738168990840388e-03,
284 4.300455303937919e-03, 9.618312854781494e-03, 2.762142918402472e-03, -6.590025003382154e-03,
285 -2.071168373801788e-03, 8.613893943683627e-03, 9.411190295341036e-03, -6.129018930548372e-03
286 };
287
288 const std::vector<float> anchors_vector{ -26, -19, 87, 86, -81, -27, 58, 63 };
289 SimpleTensor<float> proposals_expected(TensorShape(5, 9), DataType::F32);
290 fill_tensor(proposals_expected, std::vector<float>
291 {
292 0, 0, 0, 75.269, 64.4388,
293 0, 21.9579, 13.0535, 119, 99,
294 0, 38.303, 0, 119, 87.6447,
295 0, 0, 0, 119, 64.619,
296 0, 0, 20.7997, 74.0714, 99,
297 0, 0, 0, 91.8963, 79.3724,
298 0, 0, 4.42377, 58.1405, 95.1781,
299 0, 0, 13.4405, 104.799, 99,
300 0, 38.9066, 28.2434, 119, 99,
301
302 });
303
304 SimpleTensor<float> scores_expected(TensorShape(9), DataType::F32);
305 fill_tensor(scores_expected, std::vector<float>
306 {
307 0.00986536,
308 0.00876435,
309 0.00784692,
310 0.00767041,
311 0.00732847,
312 0.00682475,
313 0.00672044,
314 0.00631324,
315 3.15769e-05
316 });
317
318 TensorShape scores_shape = TensorShape(feature_width, feature_height, num_anchors);
319 TensorShape deltas_shape = TensorShape(feature_width, feature_height, values_per_roi * num_anchors);
320 if(data_layout == DataLayout::NHWC)
321 {
322 permute(scores_shape, PermutationVector(2U, 0U, 1U));
323 permute(deltas_shape, PermutationVector(2U, 0U, 1U));
324 }
325 // Inputs
326 Tensor scores = create_tensor<Tensor>(scores_shape, data_type, 1, QuantizationInfo(), data_layout);
327 Tensor bbox_deltas = create_tensor<Tensor>(deltas_shape, data_type, 1, QuantizationInfo(), data_layout);
328 Tensor anchors = create_tensor<Tensor>(TensorShape(values_per_roi, num_anchors), data_type);
329
330 // Outputs
331 Tensor proposals;
332 Tensor num_valid_proposals;
333 Tensor scores_out;
334 num_valid_proposals.allocator()->init(TensorInfo(TensorShape(1), 1, DataType::U32));
335
336 NEGenerateProposalsLayer generate_proposals;
337 generate_proposals.configure(&scores, &bbox_deltas, &anchors, &proposals, &scores_out, &num_valid_proposals,
338 GenerateProposalsInfo(120, 100, 0.166667f, 1 / 16.0, 6000, 300, 0.7f, 16.0f));
339
340 // Allocate memory for input/output tensors
341 scores.allocator()->allocate();
342 bbox_deltas.allocator()->allocate();
343 anchors.allocator()->allocate();
344 proposals.allocator()->allocate();
345 num_valid_proposals.allocator()->allocate();
346 scores_out.allocator()->allocate();
347 // Fill inputs
348 fill_tensor(Accessor(scores), scores_vector);
349 fill_tensor(Accessor(bbox_deltas), bbx_vector);
350 fill_tensor(Accessor(anchors), anchors_vector);
351
352 // Run operator
353 generate_proposals.run();
354 // Gather num_valid_proposals
355 const uint32_t N = *reinterpret_cast<uint32_t *>(num_valid_proposals.ptr_to_element(Coordinates(0, 0)));
356
357 // Select the first N entries of the proposals
358 Tensor proposals_final;
359 NESlice select_proposals;
360 select_proposals.configure(&proposals, &proposals_final, Coordinates(0, 0), Coordinates(values_per_roi + 1, N));
361
362 proposals_final.allocator()->allocate();
363 select_proposals.run();
364
365 // Select the first N entries of the proposals
366 Tensor scores_final;
367 NESlice select_scores;
368 select_scores.configure(&scores_out, &scores_final, Coordinates(0), Coordinates(N));
369 scores_final.allocator()->allocate();
370 select_scores.run();
371
372 const RelativeTolerance<float> tolerance_f32(1e-5f);
373 // Validate the output
374 validate(Accessor(proposals_final), proposals_expected, tolerance_f32);
375 validate(Accessor(scores_final), scores_expected, tolerance_f32);
376 }
377
378 FIXTURE_DATA_TEST_CASE(ComputeAllAnchors, NEComputeAllAnchorsFixture<float>, framework::DatasetMode::ALL,
379 combine(combine(framework::dataset::make("NumAnchors", { 2, 4, 8 }), ComputeAllInfoDataset), framework::dataset::make("DataType", { DataType::F32 })))
380 {
381 // Validate output
382 validate(Accessor(_target), _reference);
383 }
384 TEST_SUITE_END() // FP32
385 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)386 TEST_SUITE(FP16)
387 FIXTURE_DATA_TEST_CASE(ComputeAllAnchors, NEComputeAllAnchorsFixture<half>, framework::DatasetMode::ALL,
388 combine(combine(framework::dataset::make("NumAnchors", { 2, 4, 8 }), ComputeAllInfoDataset), framework::dataset::make("DataType", { DataType::F16 })))
389 {
390 // Validate output
391 validate(Accessor(_target), _reference);
392 }
393 TEST_SUITE_END() // FP16
394 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
395
396 TEST_SUITE_END() // Float
397
398 template <typename T>
399 using NEComputeAllAnchorsQuantizedFixture = ComputeAllAnchorsQuantizedFixture<Tensor, Accessor, NEComputeAllAnchors, T>;
400
401 TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)402 TEST_SUITE(QASYMM8)
403 FIXTURE_DATA_TEST_CASE(ComputeAllAnchors, NEComputeAllAnchorsQuantizedFixture<int16_t>, framework::DatasetMode::ALL,
404 combine(combine(combine(framework::dataset::make("NumAnchors", { 2, 4, 8 }), ComputeAllInfoDataset),
405 framework::dataset::make("DataType", { DataType::QSYMM16 })),
406 framework::dataset::make("QuantInfo", { QuantizationInfo(0.125f, 0) })))
407 {
408 // Validate output
409 validate(Accessor(_target), _reference, tolerance_qsymm16);
410 }
411 TEST_SUITE_END() // QASYMM8
412 TEST_SUITE_END() // Quantized
413
414 TEST_SUITE_END() // GenerateProposals
415 TEST_SUITE_END() // NEON
416 } // namespace validation
417 } // namespace test
418 } // namespace arm_compute
419