1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "ActivationTestImpl.hpp"
7
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10
11 #include <backendsCommon/test/ActivationFixture.hpp>
12 #include <backendsCommon/test/TensorCopyUtils.hpp>
13 #include <backendsCommon/test/WorkloadTestUtils.hpp>
14 #include <reference/test/RefWorkloadFactoryHelper.hpp>
15
16 #include <armnn/utility/NumericCast.hpp>
17
18 #include <test/TensorHelpers.hpp>
19
20 #include <boost/multi_array.hpp>
21
22 #include <algorithm>
23
24 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
BoundedReLuTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float upperBound,float lowerBound,float inputScale,int32_t inputOffset,float outputScale,int32_t outputOffset,const std::vector<T> & inputData,const std::vector<T> & outputExpectedData,unsigned int inputWidth,unsigned int inputHeight,unsigned int inputChannels,unsigned int inputBatchSize)25 LayerTestResult<T, 4> BoundedReLuTestCommon(
26 armnn::IWorkloadFactory& workloadFactory,
27 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
28 const armnn::ITensorHandleFactory& tensorHandleFactory,
29 float upperBound,
30 float lowerBound,
31 float inputScale,
32 int32_t inputOffset,
33 float outputScale,
34 int32_t outputOffset,
35 const std::vector<T>& inputData,
36 const std::vector<T>& outputExpectedData,
37 unsigned int inputWidth,
38 unsigned int inputHeight,
39 unsigned int inputChannels,
40 unsigned int inputBatchSize)
41 {
42 IgnoreUnused(memoryManager);
43 unsigned int outputWidth = inputWidth;
44 unsigned int outputHeight = inputHeight;
45 unsigned int outputChannels = inputChannels;
46 unsigned int outputBatchSize = inputBatchSize;
47
48 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
49
50 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
51
52 if(armnn::IsQuantizedType<T>())
53 {
54 inputTensorInfo.SetQuantizationScale(inputScale);
55 inputTensorInfo.SetQuantizationOffset(inputOffset);
56
57 outputTensorInfo.SetQuantizationScale(outputScale);
58 outputTensorInfo.SetQuantizationOffset(outputOffset);
59 }
60
61 LayerTestResult<T, 4> result(inputTensorInfo);
62
63 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
64
65 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
66 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
67
68 // Setup bounded ReLu.
69 armnn::ActivationQueueDescriptor descriptor;
70 armnn::WorkloadInfo workloadInfo;
71 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
72 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
73
74 descriptor.m_Parameters.m_Function = armnn::ActivationFunction::BoundedReLu;
75 descriptor.m_Parameters.m_A = upperBound;
76 descriptor.m_Parameters.m_B = lowerBound;
77
78 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
79
80 inputHandle->Allocate();
81 outputHandle->Allocate();
82
83 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
84
85 workload->Execute();
86
87 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
88
89 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
90
91 return result;
92 }
93
BoundedReLuUpperAndLowerBoundTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)94 LayerTestResult<float, 4> BoundedReLuUpperAndLowerBoundTest(
95 armnn::IWorkloadFactory& workloadFactory,
96 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
97 const armnn::ITensorHandleFactory& tensorHandleFactory)
98 {
99 unsigned int inputWidth = 4u;
100 unsigned int inputHeight = 5u;
101 unsigned int inputChannels = 1u;
102 unsigned int inputBatchSize = 1;
103
104 std::vector<float> input = std::vector<float>{
105 -2.0f, 0.1f, 0.5f, 1.25f,
106 0.786f, 0.9875f, -1.5f, 0.384f,
107 1.0001f, 3.5f, 7.5f, 0.896f,
108 2.126f, 2.0f, 0.3f, 0.15f,
109 0.999f, 1.2f, 0.89f, 6.1f,
110 };
111
112 // Calculated manually.
113 std::vector<float> output = std::vector<float>{
114 -1.0f, 0.1f, 0.5f, 1.0f,
115 0.786f, 0.9875f, -1.0f, 0.384f,
116 1.0f, 1.0f, 1.0f, 0.896f,
117 1.0f, 1.0f, 0.3f, 0.15f,
118 0.999f, 1.0f, 0.89f, 1.0f,
119 };
120
121 return BoundedReLuTestCommon<armnn::DataType::Float32>(
122 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output,
123 inputWidth, inputHeight, inputChannels, inputBatchSize);
124 }
125
BoundedReLuUpperBoundOnlyTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)126 LayerTestResult<float, 4> BoundedReLuUpperBoundOnlyTest(
127 armnn::IWorkloadFactory& workloadFactory,
128 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
129 const armnn::ITensorHandleFactory& tensorHandleFactory)
130 {
131 unsigned int inputWidth = 4u;
132 unsigned int inputHeight = 5u;
133 unsigned int inputChannels = 1u;
134 unsigned int inputBatchSize = 1;
135
136 std::vector<float> input = std::vector<float>{
137 -1.0f, 0.1f, 0.5f, 6.25f,
138 0.786f, 5.9875f, -0.5f, 0.384f,
139 6.0001f, 3.5f, 7.5f, 0.896f,
140 2.126f, 12.0f, 0.3f, 0.15f,
141 0.999f, 1.2f, 0.89f, 6.1f,
142 };
143
144 // Calculated manually.
145 std::vector<float> output = std::vector<float>{
146 0.0f, 0.1f, 0.5f, 6.0f,
147 0.786f, 5.9875f, 0.0f, 0.384f,
148 6.0f, 3.5f, 6.0f, 0.896f,
149 2.126f, 6.0f, 0.3f, 0.15f,
150 0.999f, 1.2f, 0.89f, 6.0f,
151 };
152
153 return BoundedReLuTestCommon<armnn::DataType::Float32>(
154 workloadFactory, memoryManager, tensorHandleFactory, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output,
155 inputWidth, inputHeight, inputChannels, inputBatchSize);
156 }
157
BoundedReLuUint8UpperBoundOnlyTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)158 LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperBoundOnlyTest(
159 armnn::IWorkloadFactory& workloadFactory,
160 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
161 const armnn::ITensorHandleFactory& tensorHandleFactory)
162 {
163 unsigned int inputWidth = 3u;
164 unsigned int inputHeight = 2u;
165 unsigned int inputChannels = 1u;
166 unsigned int inputBatchSize = 1;
167
168 std::vector<uint8_t> input = std::vector<uint8_t>{
169 51, 124, 28,
170 251, 8, 92
171 };
172
173 // Calculated manually.
174 std::vector<uint8_t> output = std::vector<uint8_t>{
175 0, 122, 0,
176 255, 0, 58
177 };
178
179 float inputScale = 12.0f / 255.0f;
180 int32_t inputOffset = 63;
181 float outputScale = 6.0f / 255.0f;
182 int32_t outputOffset = 0;
183
184 return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
185 workloadFactory, memoryManager, tensorHandleFactory, 6.0f, 0.0f,
186 inputScale, inputOffset, outputScale, outputOffset,
187 input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
188 }
189
BoundedReLuUint8UpperAndLowerBoundTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)190 LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperAndLowerBoundTest(
191 armnn::IWorkloadFactory& workloadFactory,
192 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
193 const armnn::ITensorHandleFactory& tensorHandleFactory)
194 {
195 unsigned int inputWidth = 3u;
196 unsigned int inputHeight = 2u;
197 unsigned int inputChannels = 1u;
198 unsigned int inputBatchSize = 1;
199
200 std::vector<uint8_t> input = std::vector<uint8_t>{
201 51, 230, 28,
202 251, 8, 92
203 };
204
205 // Calculated manually.
206 std::vector<uint8_t> output = std::vector<uint8_t>{
207 51, 192, 32,
208 192, 32, 92
209 };
210
211 int32_t inputOffset = 112;
212 float inputScale = 0.0125f;
213
214 return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
215 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -1.0f,
216 inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same.
217 input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
218 }
219
220 namespace
221 {
222
223 struct BoundedReLuRandomInputTestTraits
224 {
225 constexpr static unsigned int inputHeight = 31u;
226 constexpr static unsigned int inputWidth = 19u;
227 constexpr static unsigned int inputChannels = 4u;
228 constexpr static unsigned int inputBatchSize = 2;
229
230 constexpr static unsigned int outputHeight = inputHeight;
231 constexpr static unsigned int outputWidth = inputWidth;
232 constexpr static unsigned int outputChannels = inputChannels;
233 constexpr static unsigned int outputBatchSize = inputBatchSize;
234
GetInputTensorInfo__anon97e7ab7f0111::BoundedReLuRandomInputTestTraits235 static armnn::TensorInfo GetInputTensorInfo()
236 {
237 return armnn::TensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
238 armnn::DataType::Float32);
239 }
240
GetOutputTensorInfo__anon97e7ab7f0111::BoundedReLuRandomInputTestTraits241 static armnn::TensorInfo GetOutputTensorInfo()
242 {
243 return armnn::TensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
244 armnn::DataType::Float32);
245 }
246 };
247
BoundedReLuRandomInputTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float lowerBound,float upperBound,const armnn::ActivationDescriptor & activationDescriptor)248 boost::multi_array<float, 4> BoundedReLuRandomInputTest(
249 armnn::IWorkloadFactory& workloadFactory,
250 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
251 const armnn::ITensorHandleFactory& tensorHandleFactory,
252 float lowerBound,
253 float upperBound,
254 const armnn::ActivationDescriptor& activationDescriptor)
255 {
256 IgnoreUnused(memoryManager);
257 const armnn::TensorInfo inputTensorInfo = BoundedReLuRandomInputTestTraits::GetInputTensorInfo();
258 const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo();
259
260 boost::multi_array<float, 4> output(GetTensorShapeAsArray<4>(outputTensorInfo));
261
262 // Min/max random values passed to MakeRandomTensor are purposely outside of the ReLu
263 // range [lowerBound, upperBound].
264 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 4605828, lowerBound - 5.0f, upperBound * 2.0f);
265
266 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
267 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
268
269 // Set up bounded ReLu.
270 armnn::ActivationQueueDescriptor descriptor;
271 armnn::WorkloadInfo workloadInfo;
272 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
273 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
274 descriptor.m_Parameters = activationDescriptor;
275
276 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
277
278 inputHandle->Allocate();
279 outputHandle->Allocate();
280
281 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
282
283 workload->Execute();
284
285 CopyDataFromITensorHandle(&output[0][0][0][0], outputHandle.get());
286
287 return output;
288 }
289
290 } // namespace
291
CompareBoundedReLuTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,armnn::IWorkloadFactory & refWorkloadFactory,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::ITensorHandleFactory & refTensorHandleFactory,float upperBound,float lowerBound)292 LayerTestResult<float, 4> CompareBoundedReLuTest(
293 armnn::IWorkloadFactory& workloadFactory,
294 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
295 armnn::IWorkloadFactory& refWorkloadFactory,
296 const armnn::ITensorHandleFactory& tensorHandleFactory,
297 const armnn::ITensorHandleFactory& refTensorHandleFactory,
298 float upperBound,
299 float lowerBound)
300 {
301 LayerTestResult<float, 4> result(BoundedReLuRandomInputTestTraits::GetOutputTensorInfo());
302
303 armnn::ActivationDescriptor activationDescriptor;
304 activationDescriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
305 activationDescriptor.m_A = upperBound;
306 activationDescriptor.m_B = lowerBound;
307
308 result.output = BoundedReLuRandomInputTest(
309 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, upperBound, activationDescriptor);
310 result.outputExpected = BoundedReLuRandomInputTest(
311 refWorkloadFactory, nullptr, refTensorHandleFactory, 0.0f, upperBound, activationDescriptor);
312
313 return result;
314 }
315
316 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
ConstantLinearActivationTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float qScale=0.0f,int32_t qOffset=0)317 LayerTestResult<T,4> ConstantLinearActivationTestCommon(
318 armnn::IWorkloadFactory& workloadFactory,
319 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
320 const armnn::ITensorHandleFactory& tensorHandleFactory,
321 float qScale = 0.0f,
322 int32_t qOffset = 0)
323 {
324 IgnoreUnused(memoryManager);
325 unsigned int inputHeight = 20;
326 unsigned int inputWidth = 17;
327 unsigned int inputChannels = 3;
328 unsigned int batchSize = 5;
329
330 armnn::TensorInfo inputTensorInfo;
331 armnn::TensorInfo outputTensorInfo;
332
333 unsigned int shape[] = {batchSize, inputChannels, inputHeight, inputWidth};
334
335 inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
336 outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
337
338 // Set quantization parameters if the requested type is a quantized type.
339 if(armnn::IsQuantizedType<T>())
340 {
341 inputTensorInfo.SetQuantizationScale(qScale);
342 inputTensorInfo.SetQuantizationOffset(qOffset);
343 outputTensorInfo.SetQuantizationScale(qScale);
344 outputTensorInfo.SetQuantizationOffset(qOffset);
345 }
346
347 LayerTestResult<T, 4> ret(outputTensorInfo);
348 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
349 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
350
351 // Do linear activation that should leave the tensor unchanged.
352 armnn::ActivationQueueDescriptor data;
353 armnn::WorkloadInfo info;
354 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
355 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
356 data.m_Parameters.m_A = 1.0f;
357 data.m_Parameters.m_B = 0.0f;
358 data.m_Parameters.m_Function = armnn::ActivationFunction::Linear;
359
360 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
361
362 inputHandle->Allocate();
363 outputHandle->Allocate();
364
365 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 7123561);
366 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
367
368 workload->Execute();
369
370 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
371
372 // Ensure output equals input.
373 ret.outputExpected = input;
374
375 return ret;
376 }
377
ConstantLinearActivationTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)378 LayerTestResult<float, 4> ConstantLinearActivationTest(
379 armnn::IWorkloadFactory& workloadFactory,
380 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
381 const armnn::ITensorHandleFactory& tensorHandleFactory)
382 {
383 return ConstantLinearActivationTestCommon<armnn::DataType::Float32>(workloadFactory,
384 memoryManager,
385 tensorHandleFactory);
386 }
387
ConstantLinearActivationUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)388 LayerTestResult<uint8_t, 4> ConstantLinearActivationUint8Test(
389 armnn::IWorkloadFactory& workloadFactory,
390 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
391 const armnn::ITensorHandleFactory& tensorHandleFactory)
392 {
393 return ConstantLinearActivationTestCommon<armnn::DataType::QAsymmU8>(
394 workloadFactory, memoryManager, tensorHandleFactory, 4.0f, 3);
395 }
396
ConstantLinearActivationInt16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)397 LayerTestResult<int16_t, 4> ConstantLinearActivationInt16Test(
398 armnn::IWorkloadFactory& workloadFactory,
399 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
400 const armnn::ITensorHandleFactory& tensorHandleFactory)
401 {
402 return ConstantLinearActivationTestCommon<armnn::DataType::QSymmS16>(
403 workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
404 }
405
406 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
SimpleActivationTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,armnn::ActivationFunction activationFunction,float activationParameterA,float activationParameterB,float scale,int32_t offset,const std::vector<float> & inputData,float outScale,int32_t outOffset,const std::vector<float> & outputExpectedData)407 LayerTestResult<T, 4> SimpleActivationTest(
408 armnn::IWorkloadFactory& workloadFactory,
409 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
410 const armnn::ITensorHandleFactory& tensorHandleFactory,
411 armnn::ActivationFunction activationFunction,
412 float activationParameterA,
413 float activationParameterB,
414 float scale,
415 int32_t offset,
416 const std::vector<float>& inputData,
417 float outScale,
418 int32_t outOffset,
419 const std::vector<float>& outputExpectedData)
420 {
421 IgnoreUnused(memoryManager);
422 constexpr static unsigned int inputWidth = 16u;
423 constexpr static unsigned int inputHeight = 1u;
424 constexpr static unsigned int inputChannels = 1u;
425 constexpr static unsigned int inputBatchSize = 1u;
426
427 constexpr static unsigned int outputWidth = inputWidth;
428 constexpr static unsigned int outputHeight = inputHeight;
429 constexpr static unsigned int outputChannels = inputChannels;
430 constexpr static unsigned int outputBatchSize = inputBatchSize;
431
432 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
433 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
434
435 // Set quantization parameters if the requested type is a quantized type.
436 if(armnn::IsQuantizedType<T>())
437 {
438 inputTensorInfo.SetQuantizationScale(scale);
439 inputTensorInfo.SetQuantizationOffset(offset);
440 outputTensorInfo.SetQuantizationScale(outScale);
441 outputTensorInfo.SetQuantizationOffset(outOffset);
442 }
443
444 LayerTestResult<T, 4> result(inputTensorInfo);
445
446 auto input = MakeTensor<T, 4>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, scale, offset));
447
448 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
449 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
450
451 // Setup bounded ReLu.
452 armnn::ActivationQueueDescriptor descriptor;
453 armnn::WorkloadInfo workloadInfo;
454 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
455 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
456
457 descriptor.m_Parameters.m_Function = activationFunction;
458 descriptor.m_Parameters.m_A = activationParameterA;
459 descriptor.m_Parameters.m_B = activationParameterB;
460
461 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
462
463 inputHandle->Allocate();
464 outputHandle->Allocate();
465
466 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
467
468 workload->Execute();
469
470 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
471
472 // Calculated manually.
473 result.outputExpected =
474 MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(outputExpectedData, outScale, outOffset));
475
476 return result;
477 }
478
479 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
SimpleSigmoidTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float qScale,int32_t qOffset)480 LayerTestResult<T, 4> SimpleSigmoidTestCommon(
481 armnn::IWorkloadFactory& workloadFactory,
482 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
483 const armnn::ITensorHandleFactory& tensorHandleFactory,
484 float qScale,
485 int32_t qOffset)
486 {
487 std::vector<float> inputData =
488 {
489 -0.1f, -0.2f, -0.3f, -0.4f,
490 0.1f, 0.2f, 0.3f, 0.4f,
491 -1.0f, -2.0f, -3.0f, -4.0f,
492 1.0f, 2.0f, 3.0f, 4.0f
493 };
494
495 // Calculate output values for input.
496 auto f = [](float value)
497 {
498 return 1.0f / (1.0f + std::exp(-value));
499 };
500 std::vector<float> outputExpectedData(inputData.size());
501 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
502
503 return SimpleActivationTest<ArmnnType>(workloadFactory,
504 memoryManager,
505 tensorHandleFactory,
506 armnn::ActivationFunction::Sigmoid,
507 0.f,
508 0.f,
509 qScale,
510 qOffset,
511 inputData,
512 1.f / 256.f,
513 0,
514 outputExpectedData);
515 }
516
SimpleSigmoidTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)517 LayerTestResult<float, 4> SimpleSigmoidTest(
518 armnn::IWorkloadFactory& workloadFactory,
519 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
520 const armnn::ITensorHandleFactory& tensorHandleFactory)
521 {
522 return SimpleSigmoidTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager,
523 tensorHandleFactory, 0.0f, 0);
524 }
525
SimpleSigmoidUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)526 LayerTestResult<uint8_t, 4> SimpleSigmoidUint8Test(
527 armnn::IWorkloadFactory& workloadFactory,
528 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
529 const armnn::ITensorHandleFactory& tensorHandleFactory)
530 {
531 return SimpleSigmoidTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
532 tensorHandleFactory, 0.1f, 50);
533 }
534
SimpleSigmoidInt16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)535 LayerTestResult<int16_t, 4> SimpleSigmoidInt16Test(
536 armnn::IWorkloadFactory& workloadFactory,
537 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
538 const armnn::ITensorHandleFactory& tensorHandleFactory)
539 {
540 return SimpleSigmoidTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager,
541 tensorHandleFactory, 0.1f, 0);
542 }
543
544 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
ReLuTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float qScale,int32_t qOffset)545 LayerTestResult<T, 4> ReLuTestCommon(
546 armnn::IWorkloadFactory& workloadFactory,
547 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
548 const armnn::ITensorHandleFactory& tensorHandleFactory,
549 float qScale,
550 int32_t qOffset)
551 {
552 std::vector<float> inputData = {
553 -0.1f, -0.2f, -0.3f, -0.4f,
554 0.1f, 0.2f, 0.3f, 0.4f,
555 -1.0f, -2.0f, -3.0f, -4.0f,
556 1.0f, 2.0f, 3.0f, 4.0f
557 };
558
559 // Calculate output values for input.
560 auto f = [](float value)
561 {
562 return std::fmax(0.0f, value);
563 };
564 std::vector<float> outputExpectedData(inputData.size());
565 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
566
567 return SimpleActivationTest<ArmnnType>(workloadFactory,
568 memoryManager,
569 tensorHandleFactory,
570 armnn::ActivationFunction::ReLu,
571 0.f,
572 0.f,
573 qScale,
574 qOffset,
575 inputData,
576 qScale,
577 qOffset,
578 outputExpectedData);
579 }
580
ReLuInt16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)581 LayerTestResult<int16_t, 4> ReLuInt16Test(
582 armnn::IWorkloadFactory& workloadFactory,
583 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
584 const armnn::ITensorHandleFactory& tensorHandleFactory)
585 {
586 return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
587 }
588
589
ReLuUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)590 LayerTestResult<uint8_t, 4> ReLuUint8Test(
591 armnn::IWorkloadFactory& workloadFactory,
592 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
593 const armnn::ITensorHandleFactory& tensorHandleFactory)
594 {
595 return ReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
596 }
597
ReLuTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)598 LayerTestResult<float, 4> ReLuTest(
599 armnn::IWorkloadFactory& workloadFactory,
600 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
601 const armnn::ITensorHandleFactory& tensorHandleFactory)
602 {
603 return ReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
604 }
605
606
607 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
BoundedReLuTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float qScale,int32_t qOffset)608 LayerTestResult<T, 4> BoundedReLuTestCommon(
609 armnn::IWorkloadFactory& workloadFactory,
610 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
611 const armnn::ITensorHandleFactory& tensorHandleFactory,
612 float qScale,
613 int32_t qOffset)
614 {
615 std::vector<float> inputData = {
616 -0.1f, -0.2f, -0.3f, -0.4f,
617 0.1f, 0.2f, 0.3f, 0.4f,
618 -1.0f, -2.0f, -3.0f, -4.0f,
619 1.0f, 2.0f, 3.0f, 4.0f
620 };
621 const float a = 1.0f;
622 const float b = -1.0f;
623 // Calculate output values for input.
624 auto f = [a, b](float value)
625 {
626 return std::min(a, std::max(b, value));
627 };
628 std::vector<float> outputExpectedData(inputData.size());
629 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
630
631 return SimpleActivationTest<ArmnnType>(workloadFactory,
632 memoryManager,
633 tensorHandleFactory,
634 armnn::ActivationFunction::BoundedReLu,
635 a,
636 b,
637 qScale,
638 qOffset,
639 inputData,
640 qScale,
641 qOffset,
642 outputExpectedData);
643 }
644
BoundedReLuInt16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)645 LayerTestResult<int16_t, 4> BoundedReLuInt16Test(
646 armnn::IWorkloadFactory& workloadFactory,
647 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
648 const armnn::ITensorHandleFactory& tensorHandleFactory)
649 {
650 return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
651 }
652
653
654
655 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
SoftReLuTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float qScale,int32_t qOffset)656 LayerTestResult<T, 4> SoftReLuTestCommon(
657 armnn::IWorkloadFactory& workloadFactory,
658 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
659 const armnn::ITensorHandleFactory& tensorHandleFactory,
660 float qScale,
661 int32_t qOffset)
662 {
663 std::vector<float> inputData = {
664 -0.1f, -0.2f, -0.3f, -0.4f,
665 0.1f, 0.2f, 0.3f, 0.4f,
666 -1.0f, -2.0f, -3.0f, -4.0f,
667 1.0f, 2.0f, 3.0f, 4.0f
668 };
669
670 // Calculate output values for input.
671 auto f = [](float value)
672 {
673 return std::log(1.0f + std::exp(value));
674 };
675 std::vector<float> outputExpectedData(inputData.size());
676 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
677
678 return SimpleActivationTest<ArmnnType>(workloadFactory,
679 memoryManager,
680 tensorHandleFactory,
681 armnn::ActivationFunction::SoftReLu,
682 0.f,
683 0.f,
684 qScale,
685 qOffset,
686 inputData,
687 qScale,
688 qOffset,
689 outputExpectedData);
690 }
691
SoftReLuTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)692 LayerTestResult<float, 4> SoftReLuTest(
693 armnn::IWorkloadFactory& workloadFactory,
694 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
695 const armnn::ITensorHandleFactory& tensorHandleFactory)
696 {
697 return SoftReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
698 }
699
SoftReLuUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)700 LayerTestResult<uint8_t, 4> SoftReLuUint8Test(
701 armnn::IWorkloadFactory& workloadFactory,
702 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
703 const armnn::ITensorHandleFactory& tensorHandleFactory)
704 {
705 return SoftReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
706 tensorHandleFactory, 0.0625f, 64);
707 }
708
SoftReLuInt16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)709 LayerTestResult<int16_t, 4> SoftReLuInt16Test(
710 armnn::IWorkloadFactory& workloadFactory,
711 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
712 const armnn::ITensorHandleFactory& tensorHandleFactory)
713 {
714 return SoftReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
715 }
716
717 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LeakyReLuTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float qScale,int32_t qOffset)718 LayerTestResult<T, 4> LeakyReLuTestCommon(
719 armnn::IWorkloadFactory& workloadFactory,
720 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
721 const armnn::ITensorHandleFactory& tensorHandleFactory,
722 float qScale,
723 int32_t qOffset)
724 {
725 std::vector<float> inputData = {
726 -0.1f, -0.2f, -0.3f, -0.4f,
727 0.1f, 0.2f, 0.3f, 0.4f,
728 -1.0f, -2.0f, -3.0f, -4.0f,
729 1.0f, 2.0f, 3.0f, 4.0f
730 };
731
732 const float a = 0.01f;
733 // Calculate output values for input.
734 auto f = [a](float value)
735 {
736 return value > 0.0f ? value : (value * a);
737 };
738 std::vector<float> outputExpectedData(inputData.size());
739 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
740
741 return SimpleActivationTest<ArmnnType>(workloadFactory,
742 memoryManager,
743 tensorHandleFactory,
744 armnn::ActivationFunction::LeakyReLu,
745 a,
746 0.f,
747 qScale,
748 qOffset,
749 inputData,
750 qScale,
751 qOffset,
752 outputExpectedData);
753 }
754
LeakyReLuTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)755 LayerTestResult<float, 4> LeakyReLuTest(
756 armnn::IWorkloadFactory& workloadFactory,
757 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
758 const armnn::ITensorHandleFactory& tensorHandleFactory)
759 {
760 return LeakyReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
761 }
762
LeakyReLuUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)763 LayerTestResult<uint8_t, 4> LeakyReLuUint8Test(
764 armnn::IWorkloadFactory& workloadFactory,
765 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
766 const armnn::ITensorHandleFactory& tensorHandleFactory)
767 {
768 return LeakyReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
769 tensorHandleFactory, 0.0625f, 64);
770 }
771
LeakyReLuInt16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)772 LayerTestResult<int16_t, 4> LeakyReLuInt16Test(
773 armnn::IWorkloadFactory& workloadFactory,
774 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
775 const armnn::ITensorHandleFactory& tensorHandleFactory)
776 {
777 return LeakyReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
778 }
779
780 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
AbsTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float qScale,int32_t qOffset)781 LayerTestResult<T, 4> AbsTestCommon(
782 armnn::IWorkloadFactory& workloadFactory,
783 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
784 const armnn::ITensorHandleFactory& tensorHandleFactory,
785 float qScale,
786 int32_t qOffset)
787 {
788 std::vector<float> inputData = {
789 -0.1f, -0.2f, -0.3f, -0.4f,
790 0.1f, 0.2f, 0.3f, 0.4f,
791 -1.0f, -2.0f, -3.0f, -4.0f,
792 1.0f, 2.0f, 3.0f, 4.0f
793 };
794
795 // Calculate output values for input.
796 auto f = [](float value)
797 {
798 return std::abs(value);
799 };
800 std::vector<float> outputExpectedData(inputData.size());
801 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
802
803 return SimpleActivationTest<ArmnnType>(workloadFactory,
804 memoryManager,
805 tensorHandleFactory,
806 armnn::ActivationFunction::Abs,
807 0.f,
808 0.f,
809 qScale,
810 qOffset,
811 inputData,
812 qScale,
813 qOffset,
814 outputExpectedData);
815 }
816
AbsTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)817 LayerTestResult<float, 4> AbsTest(
818 armnn::IWorkloadFactory& workloadFactory,
819 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
820 const armnn::ITensorHandleFactory& tensorHandleFactory)
821 {
822 return AbsTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
823 }
824
AbsUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)825 LayerTestResult<uint8_t, 4> AbsUint8Test(
826 armnn::IWorkloadFactory& workloadFactory,
827 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
828 const armnn::ITensorHandleFactory& tensorHandleFactory)
829 {
830 return AbsTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.0625f, 64);
831 }
832
AbsInt16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)833 LayerTestResult<int16_t, 4> AbsInt16Test(
834 armnn::IWorkloadFactory& workloadFactory,
835 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
836 const armnn::ITensorHandleFactory& tensorHandleFactory)
837 {
838 return AbsTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
839 }
840
SqrtNNTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)841 LayerTestResult<float, 5> SqrtNNTest(
842 armnn::IWorkloadFactory& workloadFactory,
843 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
844 const armnn::ITensorHandleFactory& tensorHandleFactory)
845 {
846 IgnoreUnused(memoryManager);
847 const int inputDataSize = 120;
848 std::vector<float> inputData(inputDataSize);
849
850 for (unsigned int i = 0u; i < inputDataSize; ++i)
851 {
852 inputData[i] = static_cast<float>(i) / 10;
853 }
854
855 auto f = [](float value)
856 {
857 return std::sqrt(value);
858 };
859 std::vector<float> outputExpectedData(inputDataSize);
860 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
861
862 armnn::TensorInfo inputTensorInfo(
863 { 1u, 2u, 3u, 4u, 5u }, armnn::DataType::Float32);
864 armnn::TensorInfo outputTensorInfo(
865 { 1u, 2u, 3u, 4u, 5u }, armnn::DataType::Float32);
866
867 LayerTestResult<float, 5> result(inputTensorInfo);
868
869 auto input = MakeTensor<float, 5>(inputTensorInfo, inputData);
870
871 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
872 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
873
874 armnn::ActivationQueueDescriptor descriptor;
875 armnn::WorkloadInfo workloadInfo;
876 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
877 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
878
879 descriptor.m_Parameters.m_Function = armnn::ActivationFunction::Sqrt;
880
881 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
882
883 inputHandle->Allocate();
884 outputHandle->Allocate();
885
886 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0][0]);
887
888 workload->Execute();
889
890 CopyDataFromITensorHandle(&result.output[0][0][0][0][0], outputHandle.get());
891
892 // Calculated manually.
893 result.outputExpected = MakeTensor<float, 5>(outputTensorInfo, outputExpectedData);
894
895 return result;
896 };
897
898 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
SqrtTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float qScale,int32_t qOffset)899 LayerTestResult<T, 4> SqrtTestCommon(
900 armnn::IWorkloadFactory& workloadFactory,
901 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
902 const armnn::ITensorHandleFactory& tensorHandleFactory,
903 float qScale,
904 int32_t qOffset)
905 {
906 std::vector<float> inputData = {
907 0.1f, 0.2f, 0.3f, 0.4f,
908 0.1f, 0.2f, 0.3f, 0.4f,
909 1.0f, 2.0f, 3.0f, 4.0f,
910 1.0f, 2.0f, 3.0f, 4.0f
911 };
912
913 // Calculate output values for input.
914 auto f = [](float value)
915 {
916 return std::sqrt(value);
917 };
918 std::vector<float> outputExpectedData(inputData.size());
919 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
920
921 return SimpleActivationTest<ArmnnType>(workloadFactory,
922 memoryManager,
923 tensorHandleFactory,
924 armnn::ActivationFunction::Sqrt,
925 0.f,
926 0.f,
927 qScale,
928 qOffset,
929 inputData,
930 qScale,
931 qOffset,
932 outputExpectedData);
933 }
934
SqrtTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)935 LayerTestResult<float, 4> SqrtTest(
936 armnn::IWorkloadFactory& workloadFactory,
937 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
938 const armnn::ITensorHandleFactory& tensorHandleFactory)
939 {
940 return SqrtTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
941 }
942
SqrtUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)943 LayerTestResult<uint8_t, 4> SqrtUint8Test(
944 armnn::IWorkloadFactory& workloadFactory,
945 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
946 const armnn::ITensorHandleFactory& tensorHandleFactory)
947 {
948 return SqrtTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.0625f, 64);
949 }
950
SqrtInt16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)951 LayerTestResult<int16_t, 4> SqrtInt16Test(
952 armnn::IWorkloadFactory& workloadFactory,
953 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
954 const armnn::ITensorHandleFactory& tensorHandleFactory)
955 {
956 return SqrtTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
957 }
958
959 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
SquareTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float qScale,int32_t qOffset)960 LayerTestResult<T, 4> SquareTestCommon(
961 armnn::IWorkloadFactory& workloadFactory,
962 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
963 const armnn::ITensorHandleFactory& tensorHandleFactory,
964 float qScale,
965 int32_t qOffset)
966 {
967 std::vector<float> inputData = {
968 -0.1f, -0.2f, -0.3f, -0.4f,
969 0.1f, 0.2f, 0.3f, 0.4f,
970 -1.0f, -2.0f, -3.0f, -4.0f,
971 1.0f, 2.0f, 3.0f, 4.0f
972 };
973
974 // Calculate output values for input.
975 auto f = [](float value)
976 {
977 return std::pow(value,2);
978 };
979 std::vector<float> outputExpectedData(inputData.size());
980 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
981
982 return SimpleActivationTest<ArmnnType>(workloadFactory,
983 memoryManager,
984 tensorHandleFactory,
985 armnn::ActivationFunction::Square,
986 0.f,
987 0.f,
988 qScale,
989 qOffset,
990 inputData,
991 qScale,
992 qOffset,
993 outputExpectedData);
994 }
995
SquareTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)996 LayerTestResult<float, 4> SquareTest(
997 armnn::IWorkloadFactory& workloadFactory,
998 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
999 const armnn::ITensorHandleFactory& tensorHandleFactory)
1000 {
1001 return SquareTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1002 }
1003
SquareUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)1004 LayerTestResult<uint8_t, 4> SquareUint8Test(
1005 armnn::IWorkloadFactory& workloadFactory,
1006 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1007 const armnn::ITensorHandleFactory& tensorHandleFactory)
1008 {
1009 return SquareTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
1010 tensorHandleFactory, 0.0625f, 64);
1011 }
1012
SquareInt16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)1013 LayerTestResult<int16_t, 4> SquareInt16Test(
1014 armnn::IWorkloadFactory& workloadFactory,
1015 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1016 const armnn::ITensorHandleFactory& tensorHandleFactory)
1017 {
1018 return SquareTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1019 }
1020
1021 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
TanhTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float qScale,int32_t qOffset)1022 LayerTestResult<T, 4> TanhTestCommon(
1023 armnn::IWorkloadFactory& workloadFactory,
1024 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1025 const armnn::ITensorHandleFactory& tensorHandleFactory,
1026 float qScale,
1027 int32_t qOffset)
1028 {
1029 std::vector<float> inputData = {
1030 -0.1f, -0.2f, -0.3f, -0.4f,
1031 0.1f, 0.2f, 0.3f, 0.4f,
1032 -1.0f, -2.0f, -3.0f, -4.0f,
1033 1.0f, 2.0f, 3.0f, 4.0f
1034 };
1035
1036 const float a = 2.0f;
1037 const float b = 3.0f;
1038 // Calculate output values for input.
1039 auto f = [a, b](float value)
1040 {
1041 return a * tanhf(b * value);
1042 };
1043 std::vector<float> outputExpectedData(inputData.size());
1044 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
1045
1046 return SimpleActivationTest<ArmnnType>(workloadFactory,
1047 memoryManager,
1048 tensorHandleFactory,
1049 armnn::ActivationFunction::TanH,
1050 a,
1051 b,
1052 qScale,
1053 qOffset,
1054 inputData,
1055 qScale,
1056 qOffset,
1057 outputExpectedData);
1058 }
1059
TanhTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)1060 LayerTestResult<float, 4> TanhTest(
1061 armnn::IWorkloadFactory& workloadFactory,
1062 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1063 const armnn::ITensorHandleFactory& tensorHandleFactory)
1064 {
1065 return TanhTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1066 }
1067
TanhUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)1068 LayerTestResult<uint8_t, 4> TanhUint8Test(
1069 armnn::IWorkloadFactory& workloadFactory,
1070 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1071 const armnn::ITensorHandleFactory& tensorHandleFactory)
1072 {
1073 return TanhTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
1074 }
1075
TanhInt16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)1076 LayerTestResult<int16_t, 4> TanhInt16Test(
1077 armnn::IWorkloadFactory& workloadFactory,
1078 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1079 const armnn::ITensorHandleFactory& tensorHandleFactory)
1080 {
1081 return TanhTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1082 }
1083
1084
1085 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
EluTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float qScale,int32_t qOffset)1086 LayerTestResult<T, 4> EluTestCommon(
1087 armnn::IWorkloadFactory& workloadFactory,
1088 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1089 const armnn::ITensorHandleFactory& tensorHandleFactory,
1090 float qScale,
1091 int32_t qOffset)
1092 {
1093 std::vector<float> inputData = {
1094 -0.1f, -0.2f, -0.3f, -0.4f,
1095 0.1f, 0.2f, 0.3f, 0.4f,
1096 -1.0f, -2.0f, -3.0f, -4.0f,
1097 1.0f, 2.0f, 3.0f, 4.0f
1098 };
1099
1100
1101 const float a = 0.01f;
1102 // Calculate output values for input.
1103 auto f = [a](float value)
1104 {
1105 return (value >= 0) ? value : a * (expf(value) - 1);
1106 };
1107 std::vector<float> outputExpectedData(inputData.size());
1108 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
1109
1110 return SimpleActivationTest<ArmnnType>(workloadFactory,
1111 memoryManager,
1112 tensorHandleFactory,
1113 armnn::ActivationFunction::Elu,
1114 a,
1115 0.0f,
1116 qScale,
1117 qOffset,
1118 inputData,
1119 qScale,
1120 qOffset,
1121 outputExpectedData);
1122 }
1123
EluTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)1124 LayerTestResult<float, 4> EluTest(
1125 armnn::IWorkloadFactory& workloadFactory,
1126 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1127 const armnn::ITensorHandleFactory& tensorHandleFactory)
1128 {
1129 return EluTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1130 }
1131
EluUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)1132 LayerTestResult<uint8_t, 4> EluUint8Test(
1133 armnn::IWorkloadFactory& workloadFactory,
1134 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1135 const armnn::ITensorHandleFactory& tensorHandleFactory)
1136 {
1137 return EluTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
1138 }
1139
EluInt16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)1140 LayerTestResult<int16_t, 4> EluInt16Test(
1141 armnn::IWorkloadFactory& workloadFactory,
1142 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1143 const armnn::ITensorHandleFactory& tensorHandleFactory)
1144 {
1145 return EluTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1146 }
1147
1148
1149 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
HardSwishTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float qScale,int32_t qOffset)1150 LayerTestResult<T, 4> HardSwishTestCommon(
1151 armnn::IWorkloadFactory& workloadFactory,
1152 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1153 const armnn::ITensorHandleFactory& tensorHandleFactory,
1154 float qScale,
1155 int32_t qOffset)
1156 {
1157 std::vector<float> inputData = {
1158 -0.1f, -0.2f, -0.3f, -0.4f,
1159 0.1f, 0.2f, 0.3f, 0.4f,
1160 -1.0f, -2.0f, -3.0f, -4.0f,
1161 1.0f, 2.0f, 3.0f, 4.0f
1162 };
1163 // Calculate output values for input.
1164 auto f = [](float x)
1165 {
1166 // Break down the calculation to help with verification.
1167 // hard_swish(x) = x * relu6(x+3) / 6
1168 // relu6(x) = min(max(x,0),6)
1169 float reLu6_step1 = std::max((x + 3),0.0f);
1170 float reLu6Complete = std::min(reLu6_step1, 6.0f);
1171 float hardSwish_step1 = x * reLu6Complete;
1172 float result = hardSwish_step1 / 6;
1173 return result;
1174 };
1175 std::vector<float> outputExpectedData(inputData.size());
1176 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
1177
1178 return SimpleActivationTest<ArmnnType>(workloadFactory,
1179 memoryManager,
1180 tensorHandleFactory,
1181 armnn::ActivationFunction::HardSwish,
1182 0.f,
1183 0.f,
1184 qScale,
1185 qOffset,
1186 inputData,
1187 qScale,
1188 qOffset,
1189 outputExpectedData);
1190 }
1191
HardSwishTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)1192 LayerTestResult<float, 4> HardSwishTest(
1193 armnn::IWorkloadFactory& workloadFactory,
1194 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1195 const armnn::ITensorHandleFactory& tensorHandleFactory)
1196 {
1197 return HardSwishTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1198 }
1199
HardSwishUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)1200 LayerTestResult<uint8_t, 4> HardSwishUint8Test(
1201 armnn::IWorkloadFactory& workloadFactory,
1202 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1203 const armnn::ITensorHandleFactory& tensorHandleFactory)
1204 {
1205 return HardSwishTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
1206 tensorHandleFactory, 0.1f, 64);
1207 }
1208
HardSwishInt16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)1209 LayerTestResult<int16_t, 4> HardSwishInt16Test(
1210 armnn::IWorkloadFactory& workloadFactory,
1211 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1212 const armnn::ITensorHandleFactory& tensorHandleFactory)
1213 {
1214 return HardSwishTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1215 }
1216
1217
1218 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
CompareActivationTestImpl(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,armnn::IWorkloadFactory & refWorkloadFactory,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::ITensorHandleFactory & refTensorHandleFactory,armnn::ActivationFunction f,unsigned int batchSize=5,float qScale=0.0f,int32_t qOffset=0)1219 LayerTestResult<T,4> CompareActivationTestImpl(
1220 armnn::IWorkloadFactory& workloadFactory,
1221 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1222 armnn::IWorkloadFactory& refWorkloadFactory,
1223 const armnn::ITensorHandleFactory& tensorHandleFactory,
1224 const armnn::ITensorHandleFactory& refTensorHandleFactory,
1225 armnn::ActivationFunction f,
1226 unsigned int batchSize = 5,
1227 float qScale = 0.0f,
1228 int32_t qOffset = 0)
1229 {
1230 IgnoreUnused(memoryManager);
1231 unsigned int width = 17;
1232 unsigned int height = 29;
1233 unsigned int channels = 2;
1234
1235 float a = 0.234f;
1236 float b = -12.345f;
1237
1238 armnn::TensorInfo inputTensorInfo;
1239 armnn::TensorInfo outputTensorInfo;
1240
1241 unsigned int shape[] = {batchSize, channels, height, width};
1242
1243 inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
1244 outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
1245
1246 // Set quantization parameters if the requested type is a quantized type.
1247 if(armnn::IsQuantizedType<T>())
1248 {
1249 inputTensorInfo.SetQuantizationScale(qScale);
1250 inputTensorInfo.SetQuantizationOffset(qOffset);
1251 outputTensorInfo.SetQuantizationScale(qScale);
1252 outputTensorInfo.SetQuantizationOffset(qOffset);
1253 }
1254
1255 float minVal = -10.f;
1256 if (f == armnn::ActivationFunction::Sqrt)
1257 {
1258 minVal = 0.f;
1259 }
1260
1261 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 21453, minVal, 10.f);
1262
1263
1264 LayerTestResult<T,4> ret(outputTensorInfo);
1265 auto boostArrayExtents = boost::extents
1266 [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)]
1267 [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
1268 [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(height)]
1269 [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
1270 ret.output.resize(boostArrayExtents);
1271 ret.outputExpected.resize(boostArrayExtents);
1272
1273 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1274 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1275
1276 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1277 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1278
1279 armnn::ActivationQueueDescriptor data;
1280 armnn::WorkloadInfo info;
1281 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1282 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1283 data.m_Parameters.m_A = a;
1284 data.m_Parameters.m_B = b;
1285 data.m_Parameters.m_Function = f;
1286
1287 armnn::ActivationQueueDescriptor refData = data;
1288 armnn::WorkloadInfo refInfo = info;
1289 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1290 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1291
1292 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
1293 ARMNN_ASSERT(workload != nullptr);
1294 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateActivation(refData, refInfo);
1295 ARMNN_ASSERT(workloadRef != nullptr);
1296
1297 inputHandle->Allocate();
1298 outputHandle->Allocate();
1299 inputHandleRef->Allocate();
1300 outputHandleRef->Allocate();
1301
1302 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1303 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1304
1305 workload->Execute();
1306 workloadRef->Execute();
1307
1308 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1309 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1310
1311 return ret;
1312 }
1313
CompareActivationTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,armnn::IWorkloadFactory & refWorkloadFactory,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::ITensorHandleFactory & refTensorHandleFactory,armnn::ActivationFunction f,unsigned int batchSize)1314 LayerTestResult<float,4> CompareActivationTest(
1315 armnn::IWorkloadFactory& workloadFactory,
1316 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1317 armnn::IWorkloadFactory& refWorkloadFactory,
1318 const armnn::ITensorHandleFactory& tensorHandleFactory,
1319 const armnn::ITensorHandleFactory& refTensorHandleFactory,
1320 armnn::ActivationFunction f,
1321 unsigned int batchSize)
1322 {
1323 return CompareActivationTestImpl<armnn::DataType::Float32>(
1324 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory,
1325 refTensorHandleFactory, f, batchSize);
1326 }
1327
CompareActivationUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,armnn::IWorkloadFactory & refWorkloadFactory,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::ITensorHandleFactory & refTensorHandleFactory,armnn::ActivationFunction f)1328 LayerTestResult<uint8_t,4> CompareActivationUint8Test(
1329 armnn::IWorkloadFactory& workloadFactory,
1330 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1331 armnn::IWorkloadFactory& refWorkloadFactory,
1332 const armnn::ITensorHandleFactory& tensorHandleFactory,
1333 const armnn::ITensorHandleFactory& refTensorHandleFactory,
1334 armnn::ActivationFunction f)
1335 {
1336 return CompareActivationTestImpl<armnn::DataType::QAsymmU8>(
1337 workloadFactory, memoryManager, refWorkloadFactory,
1338 tensorHandleFactory, refTensorHandleFactory, f, 5, 0.1f, 50);
1339 }
1340
CompareActivationInt16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,armnn::IWorkloadFactory & refWorkloadFactory,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::ITensorHandleFactory & refTensorHandleFactory,armnn::ActivationFunction f)1341 LayerTestResult<int16_t,4> CompareActivationInt16Test(
1342 armnn::IWorkloadFactory& workloadFactory,
1343 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1344 armnn::IWorkloadFactory& refWorkloadFactory,
1345 const armnn::ITensorHandleFactory& tensorHandleFactory,
1346 const armnn::ITensorHandleFactory& refTensorHandleFactory,
1347 armnn::ActivationFunction f)
1348 {
1349 return CompareActivationTestImpl<armnn::DataType::QSymmS16>(
1350 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory,
1351 refTensorHandleFactory, f, 5, 0.1f, 0);
1352 }
1353