1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "SoftmaxTestImpl.hpp"
7
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10
11
12 #include <backendsCommon/CpuTensorHandle.hpp>
13
14 #include <backendsCommon/test/TensorCopyUtils.hpp>
15 #include <backendsCommon/test/WorkloadTestUtils.hpp>
16
17 #include <test/TensorHelpers.hpp>
18
19 #include <algorithm>
20
21 namespace
22 {
23
24 struct Simple3dSoftmaxOutputData
25 {
26 const std::vector<float> outputData =
27 {
28 0.0964599f, 0.26220518f, 0.0964599f, 0.0964599f,
29 0.15903549f, 0.0964599f, 0.0964599f, 0.0964599f
30 };
31
32 const armnn::TensorShape inputShape{ 1, 8, 1 };
33
34 const std::vector<float> inputData =
35 {
36 0.0f, 1.0f, 0.0f, 0.0f,
37 0.5f, 0.0f, 0.0f, 0.0f,
38 };
39 };
40
41 struct Simple4dSoftmaxData
42 {
43 const armnn::TensorShape inputShape{ 1, 8, 1, 1 };
44
45 const std::vector<float> outputData =
46 {
47 0.0964599f, 0.26220518f, 0.0964599f, 0.0964599f,
48 0.15903549f, 0.0964599f, 0.0964599f, 0.0964599f
49 };
50
51 const std::vector<float> inputData =
52 {
53 0.0f, 1.0f, 0.0f, 0.0f,
54 0.5f, 0.0f, 0.0f, 0.0f
55 };
56 };
57
58 template<armnn::DataType ArmnnType, std::size_t n, typename T = armnn::ResolveType<ArmnnType>>
SimpleSoftmaxBaseTestImpl(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float beta,const armnn::TensorShape & inputShape,const std::vector<float> & outputData,const std::vector<float> & inputData,int axis=-1)59 LayerTestResult<T, n> SimpleSoftmaxBaseTestImpl(
60 armnn::IWorkloadFactory& workloadFactory,
61 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
62 const armnn::ITensorHandleFactory& tensorHandleFactory,
63 float beta,
64 const armnn::TensorShape& inputShape,
65 const std::vector<float>& outputData,
66 const std::vector<float>& inputData,
67 int axis = -1)
68 {
69 using std::exp;
70
71 const float qScale = 1.f / 256.f;
72 const int qOffset = 0;
73
74 armnn::TensorInfo inputTensorInfo;
75 armnn::TensorInfo outputTensorInfo;
76
77 inputTensorInfo = armnn::TensorInfo(inputShape, ArmnnType);
78 inputTensorInfo.SetQuantizationScale(qScale);
79 inputTensorInfo.SetQuantizationOffset(qOffset);
80
81 outputTensorInfo = armnn::TensorInfo(inputShape, ArmnnType);
82 outputTensorInfo.SetQuantizationScale(qScale);
83 outputTensorInfo.SetQuantizationOffset(qOffset);
84
85 LayerTestResult<T, n> ret(outputTensorInfo);
86
87 // Each row is independently softmax'd.
88 auto input = MakeTensor<T, n>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
89
90 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
91 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
92
93 armnn::SoftmaxQueueDescriptor data;
94 data.m_Parameters.m_Beta = beta;
95 data.m_Parameters.m_Axis = axis;
96
97 armnn::WorkloadInfo info;
98 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
99 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
100
101 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info);
102
103 inputHandle->Allocate();
104 outputHandle->Allocate();
105 CopyDataToITensorHandle(inputHandle.get(), input.origin());
106
107 ARMNN_ASSERT(workload);
108
109 ExecuteWorkload(*workload, memoryManager);
110
111 CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
112
113 std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(outputData, qScale, qOffset);
114 ret.outputExpected = MakeTensor<T, n>(outputTensorInfo, expectedOutput);
115
116 return ret;
117 }
118
119 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
SimpleSoftmaxTestImpl(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float beta)120 LayerTestResult<T, 2> SimpleSoftmaxTestImpl(
121 armnn::IWorkloadFactory& workloadFactory,
122 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
123 const armnn::ITensorHandleFactory& tensorHandleFactory,
124 float beta)
125 {
126 using std::exp;
127 const armnn::TensorShape inputShape{ 2, 4 };
128
129 float x0[4] = { exp((0.f - 1.0f) * beta), exp((1.0f - 1.0f) * beta),
130 exp((0.0f - 1.0f) * beta), exp((0.0f - 1.0f) * beta) };
131 float sum0 = x0[0] + x0[1] + x0[2] + x0[3];
132 float x1[4] = { exp((0.5f - 0.5f) * beta), exp((0.0f - 0.5f) * beta),
133 exp((0.0f - 0.5f) * beta), exp((0.0f - 0.5f) * beta) };
134 float sum1 = x1[0] + x1[1] + x1[2] + x1[3];
135
136 const std::vector<float> outputData = { x0[0] / sum0, x0[1] / sum0, x0[2] / sum0, x0[3] / sum0,
137 x1[0] / sum1, x1[1] / sum1, x1[2] / sum1, x1[3] / sum1 };
138
139 const std::vector<float> inputData =
140 {
141 0.f, 1.f, 0.f, 0.f,
142 .5f, 0.f, 0.f, 0.f,
143 };
144
145 return SimpleSoftmaxBaseTestImpl<ArmnnType, 2>(workloadFactory, memoryManager, tensorHandleFactory, beta,
146 inputShape, outputData, inputData);
147 }
148
149 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
SimpleSoftmaxTestImpl(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float beta,int axis)150 LayerTestResult<T, 2> SimpleSoftmaxTestImpl(
151 armnn::IWorkloadFactory& workloadFactory,
152 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
153 const armnn::ITensorHandleFactory& tensorHandleFactory,
154 float beta,
155 int axis)
156 {
157 armnn::TensorShape inputShape;
158 std::vector<float> inputData;
159 std::vector<float> outputData;
160 switch (axis)
161 {
162 case -2:
163 case 0:
164 {
165 inputShape = {5, 2};
166
167 inputData =
168 {
169 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
170 };
171
172 outputData =
173 {
174 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
175 0.087144312427294f,
176 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
177 7.246299848982885e-08f
178 };
179 break;
180 }
181 case -1:
182 case 1:
183 {
184 inputShape = {2, 5};
185
186 inputData =
187 {
188 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
189 };
190
191 outputData =
192 {
193 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
194 7.246299848982885e-08f,
195 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
196 7.246299848982885e-08f
197 };
198 break;
199 }
200 }
201 return SimpleSoftmaxBaseTestImpl<ArmnnType, 2>(workloadFactory, memoryManager, tensorHandleFactory, beta,
202 inputShape, outputData, inputData, axis);
203 }
204
205 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Simple3dSoftmaxTestImpl(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float beta,const armnn::TensorShape & inputShape,const std::vector<float> & outputData,const std::vector<float> & inputData,int axis=1)206 LayerTestResult<T, 3> Simple3dSoftmaxTestImpl(
207 armnn::IWorkloadFactory& workloadFactory,
208 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
209 const armnn::ITensorHandleFactory& tensorHandleFactory,
210 float beta,
211 const armnn::TensorShape& inputShape,
212 const std::vector<float>& outputData,
213 const std::vector<float>& inputData,
214 int axis = 1)
215 {
216 return SimpleSoftmaxBaseTestImpl<ArmnnType, 3>(workloadFactory, memoryManager, tensorHandleFactory, beta,
217 inputShape, outputData, inputData, axis);
218 }
219
220 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Simple4dSoftmaxTestImpl(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float beta,const armnn::TensorShape & inputShape,const std::vector<float> & outputData,const std::vector<float> & inputData,int axis=1)221 LayerTestResult<T, 4> Simple4dSoftmaxTestImpl(
222 armnn::IWorkloadFactory& workloadFactory,
223 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
224 const armnn::ITensorHandleFactory& tensorHandleFactory,
225 float beta,
226 const armnn::TensorShape& inputShape,
227 const std::vector<float>& outputData,
228 const std::vector<float>& inputData,
229 int axis = 1)
230 {
231
232 return SimpleSoftmaxBaseTestImpl<ArmnnType, 4>(workloadFactory, memoryManager, tensorHandleFactory, beta,
233 inputShape, outputData, inputData, axis);
234 }
235
236 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
CompareSoftmaxTestImpl(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,armnn::IWorkloadFactory & refWorkloadFactory,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::ITensorHandleFactory & refTensorHandleFactory,float beta)237 LayerTestResult<T, 2> CompareSoftmaxTestImpl(
238 armnn::IWorkloadFactory& workloadFactory,
239 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
240 armnn::IWorkloadFactory& refWorkloadFactory,
241 const armnn::ITensorHandleFactory& tensorHandleFactory,
242 const armnn::ITensorHandleFactory& refTensorHandleFactory,
243 float beta)
244 {
245 const int batchSize = 20;
246 const int channels = 30;
247
248 armnn::TensorInfo inputTensorInfo;
249 armnn::TensorInfo outputTensorInfo;
250
251 unsigned int inputShape[] = { batchSize, channels };
252
253 inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
254 outputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
255 float qScale = 1.f / 256.f;
256 int qOffset = 0;
257 inputTensorInfo.SetQuantizationScale(qScale);
258 inputTensorInfo.SetQuantizationOffset(qOffset);
259 outputTensorInfo.SetQuantizationScale(qScale);
260 outputTensorInfo.SetQuantizationOffset(qOffset);
261
262
263 LayerTestResult<T, 2> ret(outputTensorInfo);
264 auto input = MakeRandomTensor<T, 2>(inputTensorInfo, 0xF00D, 0.0f, 1.0f);
265
266 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
267 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
268
269 armnn::SoftmaxQueueDescriptor data;
270 data.m_Parameters.m_Beta = beta;
271
272 armnn::WorkloadInfo info;
273 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
274 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
275
276 std::unique_ptr<armnn::ITensorHandle> outputHandleRef =
277 refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
278 std::unique_ptr<armnn::ITensorHandle> inputHandleRef =
279 refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
280
281 armnn::SoftmaxQueueDescriptor refData = data;
282 armnn::WorkloadInfo refInfo = info;
283 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
284 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
285
286 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info);
287 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateSoftmax(refData, refInfo);
288
289 outputHandleRef->Allocate();
290 inputHandleRef->Allocate();
291
292 inputHandle->Allocate();
293 outputHandle->Allocate();
294
295 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
296 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0]);
297
298 ExecuteWorkload(*workload, memoryManager);
299
300 workloadRef->Execute();
301
302 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
303 CopyDataFromITensorHandle(&ret.outputExpected[0][0], outputHandleRef.get());
304
305 return ret;
306 }
307
308 } // anonymous namespace
309
SimpleSoftmaxTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float beta)310 LayerTestResult<float,2> SimpleSoftmaxTest(
311 armnn::IWorkloadFactory& workloadFactory,
312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
313 const armnn::ITensorHandleFactory& tensorHandleFactory,
314 float beta)
315 {
316 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, beta);
317 }
318
SimpleAxisSoftmaxTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float beta,int axis)319 LayerTestResult<float,2> SimpleAxisSoftmaxTest(
320 armnn::IWorkloadFactory& workloadFactory,
321 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
322 const armnn::ITensorHandleFactory& tensorHandleFactory,
323 float beta,
324 int axis)
325 {
326 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager,
327 tensorHandleFactory, beta, axis);
328 }
329
Simple3dSoftmaxTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float beta)330 LayerTestResult<float,3> Simple3dSoftmaxTest(
331 armnn::IWorkloadFactory& workloadFactory,
332 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
333 const armnn::ITensorHandleFactory& tensorHandleFactory,
334 float beta)
335 {
336 Simple3dSoftmaxOutputData data;
337 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, beta,
338 data.inputShape, data.outputData, data.inputData);
339 }
340
Simple3dAxisSoftmaxTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float beta,int axis)341 LayerTestResult<float,3> Simple3dAxisSoftmaxTest(
342 armnn::IWorkloadFactory& workloadFactory,
343 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
344 const armnn::ITensorHandleFactory& tensorHandleFactory,
345 float beta,
346 int axis)
347 {
348 armnn::TensorShape inputShape;
349 std::vector<float> inputData;
350 std::vector<float> outputData;
351 switch (axis)
352 {
353 case -3:
354 case 0:
355 {
356 inputShape = {5, 2, 2};
357
358 inputData =
359 {
360 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
361
362 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f
363 };
364
365 outputData =
366 {
367 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
368 0.236882800924671f,
369 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
370 0.087144312427294f,
371
372 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
373 0.032058600957022f,
374 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
375 7.246299848982885e-08f
376 };
377 break;
378 }
379 case -2:
380 case 1:
381 {
382 inputShape = {2, 5, 2};
383
384 inputData =
385 {
386 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
387
388 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
389 };
390
391 outputData =
392 {
393 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
394 0.087144312427294f,
395 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
396 7.246299848982885e-08f,
397
398 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
399 0.087144312427294f,
400 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
401 7.246299848982885e-08f
402 };
403 break;
404 }
405 case -1:
406 case 2:
407 {
408 inputShape = {2, 2, 5};
409
410 inputData =
411 {
412 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
413 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
414 };
415
416 outputData =
417 {
418 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
419 7.246299848982885e-08f,
420 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
421 7.246299848982885e-08f,
422
423 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
424 7.246299848982885e-08f,
425 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
426 7.246299848982885e-08f
427 };
428 break;
429 }
430 }
431
432 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, beta,
433 inputShape, outputData, inputData, axis);
434 }
435
Simple4dSoftmaxTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float beta)436 LayerTestResult<float,4> Simple4dSoftmaxTest(
437 armnn::IWorkloadFactory& workloadFactory,
438 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
439 const armnn::ITensorHandleFactory& tensorHandleFactory,
440 float beta)
441 {
442 Simple4dSoftmaxData data;
443 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory,
444 beta, data.inputShape, data.outputData, data.inputData);
445 }
446
Simple4dAxisSoftmaxTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float beta,int axis)447 LayerTestResult<float,4> Simple4dAxisSoftmaxTest(
448 armnn::IWorkloadFactory& workloadFactory,
449 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
450 const armnn::ITensorHandleFactory& tensorHandleFactory,
451 float beta,
452 int axis)
453 {
454 armnn::TensorShape inputShape;
455 std::vector<float> inputData;
456 std::vector<float> outputData;
457 switch (axis)
458 {
459 case -4:
460 case 0:
461 {
462 inputShape = {5, 2, 2, 2};
463
464 inputData =
465 {
466 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f,
467 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f,
468 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f,
469 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f
470 };
471
472 outputData =
473 {
474 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
475 0.643914213228014f,
476 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f,
477 0.236882800924671f,
478 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f,
479 0.236882800924671f,
480 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f,
481 0.087144312427294f,
482
483 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f,
484 0.032058600957022f,
485 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f,
486 0.032058600957022f,
487 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f,
488 7.246299848982885e-08f,
489 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
490 7.246299848982885e-08f, 7.246299848982885e-08f
491 };
492 break;
493 }
494 case -3:
495 case 1:
496 {
497 inputShape = {2, 5, 2, 2};
498
499 inputData =
500 {
501 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
502 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f,
503 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
504 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f
505 };
506
507 outputData =
508 {
509 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
510 0.236882800924671f,
511 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
512 0.087144312427294f,
513 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
514 0.032058600957022f,
515 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
516 7.246299848982885e-08f,
517
518
519 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
520 0.236882800924671f,
521 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
522 0.087144312427294f,
523 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
524 0.032058600957022f,
525 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
526 7.246299848982885e-08f
527 };
528 break;
529 }
530 case -2:
531 case 2:
532 {
533 inputShape = {2, 2, 5, 2};
534
535 inputData =
536 {
537 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
538 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
539 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
540 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
541 };
542
543 outputData =
544 {
545 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
546 0.087144312427294f,
547 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
548 7.246299848982885e-08f,
549 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
550 0.087144312427294f,
551 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
552 7.246299848982885e-08f,
553
554 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
555 0.087144312427294f,
556 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
557 7.246299848982885e-08f,
558 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
559 0.087144312427294f,
560 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
561 7.246299848982885e-08f
562 };
563 break;
564 }
565 case -1:
566 case 3:
567 {
568 inputShape = {2, 2, 2, 5};
569
570 inputData =
571 {
572 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
573 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
574 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
575 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
576 };
577
578 outputData =
579 {
580 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
581 7.246299848982885e-08f,
582 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
583 7.246299848982885e-08f,
584 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
585 7.246299848982885e-08f,
586 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
587 7.246299848982885e-08f,
588
589 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
590 7.246299848982885e-08f,
591 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
592 7.246299848982885e-08f,
593 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
594 7.246299848982885e-08f,
595 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
596 7.246299848982885e-08f
597 };
598 break;
599 }
600 }
601
602 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(
603 workloadFactory,
604 memoryManager,
605 tensorHandleFactory,
606 beta,
607 inputShape,
608 outputData,
609 inputData,
610 axis);
611 }
612
SimpleSoftmaxUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float beta)613 LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
614 armnn::IWorkloadFactory& workloadFactory,
615 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
616 const armnn::ITensorHandleFactory& tensorHandleFactory,
617 float beta)
618 {
619 return SimpleSoftmaxTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, beta);
620 }
621
Simple3dSoftmaxUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float beta)622 LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
623 armnn::IWorkloadFactory& workloadFactory,
624 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
625 const armnn::ITensorHandleFactory& tensorHandleFactory,
626 float beta)
627 {
628 Simple3dSoftmaxOutputData data;
629 return Simple3dSoftmaxTestImpl<armnn::DataType::QAsymmU8>(
630 workloadFactory,
631 memoryManager,
632 tensorHandleFactory,
633 beta,
634 data.inputShape,
635 data.outputData,
636 data.inputData);
637 }
638
Simple4dSoftmaxUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float beta)639 LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
640 armnn::IWorkloadFactory& workloadFactory,
641 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
642 const armnn::ITensorHandleFactory& tensorHandleFactory,
643 float beta)
644 {
645 Simple4dSoftmaxData data;
646
647 return Simple4dSoftmaxTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, beta,
648 data.inputShape, data.outputData, data.inputData);
649 }
650
SimpleSoftmaxFloat16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float beta)651 LayerTestResult<armnn::Half,2> SimpleSoftmaxFloat16Test(
652 armnn::IWorkloadFactory& workloadFactory,
653 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
654 const armnn::ITensorHandleFactory& tensorHandleFactory,
655 float beta)
656 {
657 return SimpleSoftmaxTestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, tensorHandleFactory, beta);
658 }
659
Simple3dSoftmaxFloat16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float beta)660 LayerTestResult<armnn::Half,3> Simple3dSoftmaxFloat16Test(
661 armnn::IWorkloadFactory& workloadFactory,
662 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
663 const armnn::ITensorHandleFactory& tensorHandleFactory,
664 float beta)
665 {
666 Simple3dSoftmaxOutputData data;
667 return Simple3dSoftmaxTestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, tensorHandleFactory, beta,
668 data.inputShape, data.outputData, data.inputData);
669 }
670
Simple4dSoftmaxFloat16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float beta)671 LayerTestResult<armnn::Half,4> Simple4dSoftmaxFloat16Test(
672 armnn::IWorkloadFactory& workloadFactory,
673 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
674 const armnn::ITensorHandleFactory& tensorHandleFactory,
675 float beta)
676 {
677 Simple4dSoftmaxData data;
678 return Simple4dSoftmaxTestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, tensorHandleFactory, beta,
679 data.inputShape, data.outputData, data.inputData);
680 }
681
SimpleSoftmaxUint16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float beta)682 LayerTestResult<int16_t,2> SimpleSoftmaxUint16Test(
683 armnn::IWorkloadFactory& workloadFactory,
684 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
685 const armnn::ITensorHandleFactory& tensorHandleFactory,
686 float beta)
687 {
688 return SimpleSoftmaxTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, beta);
689 }
690
Simple3dSoftmaxUint16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float beta)691 LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
692 armnn::IWorkloadFactory& workloadFactory,
693 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
694 const armnn::ITensorHandleFactory& tensorHandleFactory,
695 float beta)
696 {
697 Simple3dSoftmaxOutputData data;
698 return Simple3dSoftmaxTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, beta,
699 data.inputShape, data.outputData, data.inputData);
700 }
701
Simple4dSoftmaxUint16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float beta)702 LayerTestResult<int16_t,4> Simple4dSoftmaxUint16Test(
703 armnn::IWorkloadFactory& workloadFactory,
704 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
705 const armnn::ITensorHandleFactory& tensorHandleFactory,
706 float beta)
707 {
708 Simple4dSoftmaxData data;
709
710 return Simple4dSoftmaxTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, beta,
711 data.inputShape, data.outputData, data.inputData);
712 }
713
CompareSoftmaxTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,armnn::IWorkloadFactory & refWorkloadFactory,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::ITensorHandleFactory & refTensorHandleFactory,float beta)714 LayerTestResult<float,2> CompareSoftmaxTest(
715 armnn::IWorkloadFactory& workloadFactory,
716 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
717 armnn::IWorkloadFactory& refWorkloadFactory,
718 const armnn::ITensorHandleFactory& tensorHandleFactory,
719 const armnn::ITensorHandleFactory& refTensorHandleFactory,
720 float beta)
721 {
722 return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
723 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, beta);
724 }
725
CompareSoftmaxUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,armnn::IWorkloadFactory & refWorkloadFactory,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::ITensorHandleFactory & refTensorHandleFactory,float beta)726 LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
727 armnn::IWorkloadFactory& workloadFactory,
728 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
729 armnn::IWorkloadFactory& refWorkloadFactory,
730 const armnn::ITensorHandleFactory& tensorHandleFactory,
731 const armnn::ITensorHandleFactory& refTensorHandleFactory,
732 float beta)
733 {
734 return CompareSoftmaxTestImpl<armnn::DataType::QAsymmU8>(
735 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, beta);
736 }
737