• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "L2NormalizationTestImpl.hpp"
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
11 #include <armnnUtils/TensorUtils.hpp>
12 #include <armnnUtils/Permute.hpp>
13 
14 #include <backendsCommon/test/TensorCopyUtils.hpp>
15 #include <backendsCommon/test/WorkloadTestUtils.hpp>
16 
17 #include <test/TensorHelpers.hpp>
18 
19 namespace
20 {
21 
22 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
L2NormalizationTestImpl(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::TensorShape & inputOutputTensorShape,float scale,int32_t offset,const std::vector<float> & inputValues,float outScale,int32_t outOffset,const std::vector<float> & expectedOutputValues,const armnn::DataLayout layout,float epsilon=1e-12f)23 LayerTestResult<T, 4> L2NormalizationTestImpl(
24     armnn::IWorkloadFactory& workloadFactory,
25     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
26     const armnn::ITensorHandleFactory& tensorHandleFactory,
27     const armnn::TensorShape& inputOutputTensorShape,
28     float scale,
29     int32_t offset,
30     const std::vector<float>& inputValues,
31     float outScale,
32     int32_t outOffset,
33     const std::vector<float>& expectedOutputValues,
34     const armnn::DataLayout layout,
35     float epsilon = 1e-12f)
36 {
37     IgnoreUnused(memoryManager);
38     const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
39     const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
40 
41     // at this point if we require it permute the input data
42     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
43     std::vector<float> inputData = inputValues;
44     if (layout == armnn::DataLayout::NHWC)
45     {
46         std::vector<float> tmp(inputData.size());
47         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
48         inputData = tmp;
49     }
50 
51     auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
52                                         armnnUtils::QuantizedVector<T>(inputData,
53                                                                        inputTensorInfo.GetQuantizationScale(),
54                                                                        inputTensorInfo.GetQuantizationOffset()));
55 
56     std::vector<float> expectedOutputData = expectedOutputValues;
57     if (layout == armnn::DataLayout::NHWC)
58     {
59         std::vector<float> tmp(expectedOutputData.size());
60         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
61                             sizeof(float));
62         expectedOutputData = tmp;
63     }
64 
65     LayerTestResult<T, 4> result(outputTensorInfo);
66     result.outputExpected =
67         MakeTensor<T, 4>(outputTensorInfo,
68                          armnnUtils::QuantizedVector<T>(expectedOutputData,
69                                                         outputTensorInfo.GetQuantizationScale(),
70                                                         outputTensorInfo.GetQuantizationOffset()));
71 
72     std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
73     std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
74 
75     armnn::L2NormalizationQueueDescriptor descriptor;
76     descriptor.m_Parameters.m_Eps = epsilon;
77     descriptor.m_Parameters.m_DataLayout = layout;
78     armnn::WorkloadInfo info;
79 
80     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
81     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
82 
83     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
84 
85     inputHandle->Allocate();
86     outputHandle->Allocate();
87 
88     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
89 
90     workload->PostAllocationConfigure();
91     ExecuteWorkload(*workload, memoryManager);
92 
93     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
94 
95     return result;
96 }
97 
CalcInvL2Norm(std::initializer_list<float> elements)98 float CalcInvL2Norm(std::initializer_list<float> elements)
99 {
100     const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
101         [](float acc, float element) { return acc + element * element; });
102     return 1.0f / sqrtf(reduction);
103 }
104 
105 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
L2NormalizationEpsilonTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float scale,int32_t offset,float outScale,int32_t outOffset,const armnn::DataLayout layout,float epsilon)106 LayerTestResult<T, 4> L2NormalizationEpsilonTestCommon(
107         armnn::IWorkloadFactory& workloadFactory,
108         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
109         const armnn::ITensorHandleFactory& tensorHandleFactory,
110         float scale,
111         int32_t offset,
112         float outScale,
113         int32_t outOffset,
114         const armnn::DataLayout layout,
115         float epsilon)
116 {
117     // Width: 1
118     // Height: 1
119     // Channels: 3
120     // BatchSize: 1
121     unsigned int numberOfBatches = 1;
122     unsigned int numberOfChannels = 3;
123     unsigned int height = 1;
124     unsigned int width = 1;
125 
126     const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
127             numberOfBatches, numberOfChannels, height, width, layout);
128 
129     // 0.0000001^2 + 0.00000002^2 + 0.00000003^2 < 1e-12
130     std::vector<float> inputValues
131     {
132         // Batch 0, Channel 0, Height (1) x Width (1)
133         0.00000001f,
134 
135         // Batch 0, Channel 1, Height (1) x Width (1)
136         0.00000002f,
137 
138         // Batch 0, Channel 2, Height (1) x Width (1)
139         0.00000003f,
140     };
141 
142     const float approxInvL2Norm = 1.f / sqrtf(epsilon);
143     std::vector<float> expectedOutputValues
144     {
145         // Batch 0, Channel 0, Height (1) x Width (1)
146         0.00000001f * approxInvL2Norm,
147         0.00000002f * approxInvL2Norm,
148         0.00000003f * approxInvL2Norm,
149     };
150 
151     return L2NormalizationTestImpl<ArmnnType>(
152         workloadFactory,
153         memoryManager,
154         tensorHandleFactory,
155         inputOutputShape,
156         scale,
157         offset,
158         inputValues,
159         outScale,
160         outOffset,
161         expectedOutputValues,
162         layout,
163         epsilon);
164 }
165 
166 
167 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
L2Normalization1dTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float scale,int32_t offset,float outScale,int32_t outOffset,const armnn::DataLayout layout)168 LayerTestResult<T, 4> L2Normalization1dTestCommon(
169         armnn::IWorkloadFactory& workloadFactory,
170         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
171         const armnn::ITensorHandleFactory& tensorHandleFactory,
172         float scale,
173         int32_t offset,
174         float outScale,
175         int32_t outOffset,
176         const armnn::DataLayout layout)
177 {
178     // Width: 1
179     // Height: 1
180     // Channels: 10
181     // BatchSize: 1
182     unsigned int numberOfBatches = 1;
183     unsigned int numberOfChannels = 10;
184     unsigned int height = 1;
185     unsigned int width = 1;
186 
187 
188     const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
189             numberOfBatches, numberOfChannels, height, width, layout);
190     std::vector<float> inputValues
191     {
192         // Batch 0, Channel 0, Height (1) x Width (1)
193         1.0f,
194 
195         // Batch 0, Channel 1, Height (1) x Width (1)
196         2.0f,
197 
198         // Batch 0, Channel 2, Height (1) x Width (1)
199         3.0f,
200 
201         // Batch 0, Channel 3, Height (1) x Width (1)
202         4.0f,
203 
204         // Batch 0, Channel 4, Height (1) x Width (1)
205         5.0f,
206 
207         // Batch 0, Channel 5, Height (1) x Width (1)
208         6.0f,
209 
210         // Batch 0, Channel 6, Height (1) x Width (1)
211         7.0f,
212 
213         // Batch 0, Channel 7, Height (1) x Width (1)
214         8.0f,
215 
216         // Batch 0, Channel 8, Height (1) x Width (1)
217         9.0f,
218 
219         // Batch 0, Channel 9, Height (1) x Width (1)
220         10.0f
221     };
222     const float approxInvL2Norm = 0.050964719f;
223     std::vector<float> expectedOutputValues
224     {
225         // Batch 0, Channel 0, Height (1) x Width (1)
226         1.0f * approxInvL2Norm,
227         2.0f * approxInvL2Norm,
228         3.0f * approxInvL2Norm,
229         4.0f * approxInvL2Norm,
230         5.0f * approxInvL2Norm,
231         6.0f * approxInvL2Norm,
232         7.0f * approxInvL2Norm,
233         8.0f * approxInvL2Norm,
234         9.0f * approxInvL2Norm,
235         10.0f * approxInvL2Norm
236     };
237 
238 
239     return L2NormalizationTestImpl<ArmnnType>(
240         workloadFactory,
241         memoryManager,
242         tensorHandleFactory,
243         inputOutputShape,
244         scale,
245         offset,
246         inputValues,
247         outScale,
248         outOffset,
249         expectedOutputValues,
250         layout);
251 }
252 
253 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
L2Normalization2dTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float scale,int32_t offset,float outScale,int32_t outOffset,const armnn::DataLayout layout)254 LayerTestResult<T, 4> L2Normalization2dTestCommon(
255     armnn::IWorkloadFactory& workloadFactory,
256     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
257     const armnn::ITensorHandleFactory& tensorHandleFactory,
258     float scale,
259     int32_t offset,
260     float outScale,
261     int32_t outOffset,
262     const armnn::DataLayout layout)
263 {
264     // Width: 5
265     // Height: 1
266     // Channels: 2
267     // BatchSize: 1
268     unsigned int numberOfBatches = 1;
269     unsigned int numberOfChannels = 2;
270     unsigned int height = 1;
271     unsigned int width = 5;
272 
273     const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
274             numberOfBatches, numberOfChannels, height, width, layout);
275     std::vector<float> inputValues
276     {
277         // Batch 0, Channel 0, Height (1) x Width (5)
278         1.0f, 3.0f, 5.0f, 7.0f,  9.0f,
279 
280         // Batch 0, Channel 1, Height (1) x Width (5)
281         2.0f, 4.0f, 6.0f, 8.0f, 10.0f
282     };
283     std::vector<float> expectedOutputValues
284     {
285         // Batch 0, Channel 0, Height (1) x Width (5)
286         1.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
287         3.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
288         5.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
289         7.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
290         9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
291 
292         // Batch 0, Channel 1, Height (1) x Width (5)
293         2.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
294         4.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
295         6.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
296         8.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
297         10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
298     };
299 
300     return L2NormalizationTestImpl<ArmnnType>(
301         workloadFactory,
302         memoryManager,
303         tensorHandleFactory,
304         inputOutputShape,
305         scale,
306         offset,
307         inputValues,
308         outScale,
309         outOffset,
310         expectedOutputValues,
311         layout);
312 }
313 
314 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
L2Normalization3dTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float scale,int32_t offset,float outScale,int32_t outOffset,const armnn::DataLayout layout)315 LayerTestResult<T, 4> L2Normalization3dTestCommon(
316     armnn::IWorkloadFactory& workloadFactory,
317     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
318     const armnn::ITensorHandleFactory& tensorHandleFactory,
319     float scale,
320     int32_t offset,
321     float outScale,
322     int32_t outOffset,
323     const armnn::DataLayout layout)
324 {
325     // Width: 3
326     // Height: 4
327     // Channels: 2
328     // BatchSize: 1
329     unsigned int numberOfBatches = 1;
330     unsigned int numberOfChannels = 2;
331     unsigned int height = 4;
332     unsigned int width = 3;
333 
334     const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
335             numberOfBatches, numberOfChannels, height, width, layout);
336     std::vector<float> inputValues
337     {
338         // Batch 0, Channel 0, Height (4) x Width (3)
339         119.0f,  21.0f, 150.0f,
340         149.0f,  32.0f, 179.0f,
341         15.0f, 227.0f, 141.0f,
342         147.0f, 199.0f, 220.0f,
343 
344         // Batch 0, Channel 1, Height (4) x Width (3)
345         110.0f, 140.0f,  73.0f,
346         211.0f, 212.0f,  89.0f,
347         24.0f, 138.0f, 188.0f,
348         162.0f,  12.0f, 161.0f
349     };
350     std::vector<float> expectedOutputValues
351     {
352         // Batch 0, Channel 0, Height (4) x Width (3)
353         119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
354         21.0f * CalcInvL2Norm({  21.0f, 140.0f }),
355         150.0f * CalcInvL2Norm({ 150.0f,  73.0f }),
356         149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
357         32.0f * CalcInvL2Norm({  32.0f, 212.0f }),
358         179.0f * CalcInvL2Norm({ 179.0f,  89.0f }),
359         15.0f * CalcInvL2Norm({  15.0f,  24.0f }),
360         227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
361         141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
362         147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
363         199.0f * CalcInvL2Norm({ 199.0f,  12.0f }),
364         220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
365 
366         // Batch 0, Channel 1, Height (4) x Width (3)
367         110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
368         140.0f * CalcInvL2Norm({  21.0f, 140.0f }),
369         73.0f * CalcInvL2Norm({ 150.0f,  73.0f }),
370         211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
371         212.0f * CalcInvL2Norm({  32.0f, 212.0f }),
372         89.0f * CalcInvL2Norm({ 179.0f,  89.0f }),
373         24.0f * CalcInvL2Norm({  15.0f,  24.0f }),
374         138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
375         188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
376         162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
377         12.0f * CalcInvL2Norm({ 199.0f,  12.0f }),
378         161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
379     };
380 
381     return L2NormalizationTestImpl<ArmnnType>(
382         workloadFactory,
383         memoryManager,
384         tensorHandleFactory,
385         inputOutputShape,
386         scale,
387         offset,
388         inputValues,
389         outScale,
390         outOffset,
391         expectedOutputValues,
392         layout);
393 }
394 
395 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
L2Normalization4dTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float scale,int32_t offset,float outScale,int32_t outOffset,const armnn::DataLayout layout)396 LayerTestResult<T, 4> L2Normalization4dTestCommon(
397     armnn::IWorkloadFactory& workloadFactory,
398     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
399     const armnn::ITensorHandleFactory& tensorHandleFactory,
400     float scale,
401     int32_t offset,
402     float outScale,
403     int32_t outOffset,
404     const armnn::DataLayout layout)
405 {
406     // Width: 3
407     // Height: 4
408     // Channels: 3
409     // BatchSize: 2
410     unsigned int numberOfBatches = 2;
411     unsigned int numberOfChannels = 3;
412     unsigned int height = 4;
413     unsigned int width = 3;
414 
415     const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
416             numberOfBatches, numberOfChannels, height, width, layout);
417     std::vector<float> inputValues
418     {
419         // Batch 0, Channel 0, Height (4) x Width (3)
420         235.0f,  46.0f, 178.0f,
421         100.0f, 123.0f,  19.0f,
422         172.0f,  74.0f, 250.0f,
423         6.0f, 195.0f,  80.0f,
424 
425         // Batch 0, Channel 1, Height (4) x Width (3)
426         113.0f,  95.0f, 202.0f,
427         77.0f, 114.0f,  71.0f,
428         122.0f, 246.0f, 166.0f,
429         82.0f,  28.0f,  37.0f,
430 
431         // Batch 0, Channel 2, Height (4) x Width (3)
432         56.0f, 170.0f, 162.0f,
433         194.0f,  89.0f, 254.0f,
434         12.0f, 209.0f, 200.0f,
435         1.0f,  64.0f,  54.0f,
436 
437         // Batch 1, Channel 0, Height (4) x Width (3)
438         67.0f,  90.0f,  49.0f,
439         7.0f, 163.0f,  18.0f,
440         25.0f, 117.0f, 103.0f,
441         247.0f,  59.0f, 189.0f,
442 
443         // Batch 1, Channel 1, Height (4) x Width (3)
444         239.0f, 104.0f, 199.0f,
445         17.0f, 124.0f, 153.0f,
446         222.0f, 217.0f, 75.0f,
447         32.0f, 126.0f, 21.0f,
448 
449         // Batch 1, Channel 2, Height (4) x Width (3)
450         97.0f, 145.0f, 215.0f,
451         115.0f, 116.0f, 238.0f,
452         226.0f,  16.0f, 132.0f,
453         92.0f, 125.0f,  88.0f
454     };
455     std::vector<float> expectedOutputValues
456     {
457         // Batch 0, Channel 0, Height (4) x Width (3)
458         235.0f * CalcInvL2Norm({ 235.0f, 113.0f,  56.0f }),
459         46.0f * CalcInvL2Norm({  46.0f,  95.0f, 170.0f }),
460         178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
461         100.0f * CalcInvL2Norm({ 100.0f,  77.0f, 194.0f }),
462         123.0f * CalcInvL2Norm({ 123.0f, 114.0f,  89.0f }),
463         19.0f * CalcInvL2Norm({  19.0f,  71.0f, 254.0f }),
464         172.0f * CalcInvL2Norm({ 172.0f, 122.0f,  12.0f }),
465         74.0f * CalcInvL2Norm({  74.0f, 246.0f, 209.0f }),
466         250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
467         6.0f * CalcInvL2Norm({   6.0f,  82.0f,   1.0f }),
468         195.0f * CalcInvL2Norm({ 195.0f,  28.0f,  64.0f }),
469         80.0f * CalcInvL2Norm({  80.0f,  37.0f,  54.0f }),
470 
471         // Batch 0, Channel 1, Height (4) x Width (3)
472         113.0f * CalcInvL2Norm({ 235.0f, 113.0f,  56.0f }),
473         95.0f * CalcInvL2Norm({  46.0f,  95.0f, 170.0f }),
474         202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
475         77.0f * CalcInvL2Norm({ 100.0f,  77.0f, 194.0f }),
476         114.0f * CalcInvL2Norm({ 123.0f, 114.0f,  89.0f }),
477         71.0f * CalcInvL2Norm({  19.0f,  71.0f, 254.0f }),
478         122.0f * CalcInvL2Norm({ 172.0f, 122.0f,  12.0f }),
479         246.0f * CalcInvL2Norm({  74.0f, 246.0f, 209.0f }),
480         166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
481         82.0f * CalcInvL2Norm({   6.0f,  82.0f,   1.0f }),
482         28.0f * CalcInvL2Norm({ 195.0f,  28.0f,  64.0f }),
483         37.0f * CalcInvL2Norm({  80.0f,  37.0f,  54.0f }),
484 
485         // Batch 0, Channel 2, Height (4) x Width (3)
486         56.0f * CalcInvL2Norm({ 235.0f, 113.0f,  56.0f }),
487         170.0f * CalcInvL2Norm({  46.0f,  95.0f, 170.0f }),
488         162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
489         194.0f * CalcInvL2Norm({ 100.0f,  77.0f, 194.0f }),
490         89.0f * CalcInvL2Norm({ 123.0f, 114.0f,  89.0f }),
491         254.0f * CalcInvL2Norm({  19.0f,  71.0f, 254.0f }),
492         12.0f * CalcInvL2Norm({ 172.0f, 122.0f,  12.0f }),
493         209.0f * CalcInvL2Norm({  74.0f, 246.0f, 209.0f }),
494         200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
495         1.0f * CalcInvL2Norm({   6.0f,  82.0f,   1.0f }),
496         64.0f * CalcInvL2Norm({ 195.0f,  28.0f,  64.0f }),
497         54.0f * CalcInvL2Norm({  80.0f,  37.0f,  54.0f }),
498 
499         // Batch 1, Channel 0, Height (4) x Width (3)
500         67.0f * CalcInvL2Norm({  67.0f, 239.0f,  97.0f }),
501         90.0f * CalcInvL2Norm({  90.0f, 104.0f, 145.0f }),
502         49.0f * CalcInvL2Norm({  49.0f, 199.0f, 215.0f }),
503         7.0f * CalcInvL2Norm({   7.0f,  17.0f, 115.0f }),
504         163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
505         18.0f * CalcInvL2Norm({  18.0f, 153.0f, 238.0f }),
506         25.0f * CalcInvL2Norm({  25.0f, 222.0f, 226.0f }),
507         117.0f * CalcInvL2Norm({ 117.0f, 217.0f,  16.0f }),
508         103.0f * CalcInvL2Norm({ 103.0f,  75.0f, 132.0f }),
509         247.0f * CalcInvL2Norm({ 247.0f,  32.0f,  92.0f }),
510         59.0f * CalcInvL2Norm({  59.0f, 126.0f, 125.0f }),
511         189.0f * CalcInvL2Norm({ 189.0f,  21.0f,  88.0f }),
512 
513         // Batch 1, Channel 1, Height (4) x Width (3)
514         239.0f * CalcInvL2Norm({  67.0f, 239.0f,  97.0f }),
515         104.0f * CalcInvL2Norm({  90.0f, 104.0f, 145.0f }),
516         199.0f * CalcInvL2Norm({  49.0f, 199.0f, 215.0f }),
517         17.0f * CalcInvL2Norm({   7.0f,  17.0f, 115.0f }),
518         124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
519         153.0f * CalcInvL2Norm({  18.0f, 153.0f, 238.0f }),
520         222.0f * CalcInvL2Norm({  25.0f, 222.0f, 226.0f }),
521         217.0f * CalcInvL2Norm({ 117.0f, 217.0f,  16.0f }),
522         75.0f * CalcInvL2Norm({ 103.0f,  75.0f, 132.0f }),
523         32.0f * CalcInvL2Norm({ 247.0f,  32.0f,  92.0f }),
524         126.0f * CalcInvL2Norm({  59.0f, 126.0f, 125.0f }),
525         21.0f * CalcInvL2Norm({ 189.0f,  21.0f,  88.0f }),
526 
527         // Batch 1, Channel 2, Height (4) x Width (3)
528         97.0f * CalcInvL2Norm({  67.0f, 239.0f,  97.0f }),
529         145.0f * CalcInvL2Norm({  90.0f, 104.0f, 145.0f }),
530         215.0f * CalcInvL2Norm({  49.0f, 199.0f, 215.0f }),
531         115.0f * CalcInvL2Norm({   7.0f,  17.0f, 115.0f }),
532         116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
533         238.0f * CalcInvL2Norm({  18.0f, 153.0f, 238.0f }),
534         226.0f * CalcInvL2Norm({  25.0f, 222.0f, 226.0f }),
535         16.0f * CalcInvL2Norm({ 117.0f, 217.0f,  16.0f }),
536         132.0f * CalcInvL2Norm({ 103.0f,  75.0f, 132.0f }),
537         92.0f * CalcInvL2Norm({ 247.0f,  32.0f,  92.0f }),
538         125.0f * CalcInvL2Norm({  59.0f, 126.0f, 125.0f }),
539         88.0f * CalcInvL2Norm({ 189.0f,  21.0f,  88.0f })
540     };
541 
542     return L2NormalizationTestImpl<ArmnnType>(
543         workloadFactory,
544         memoryManager,
545         tensorHandleFactory,
546         inputOutputShape,
547         scale,
548         offset,
549         inputValues,
550         outScale,
551         outOffset,
552         expectedOutputValues,
553         layout);
554 }
555 
556 } // anonymous namespace
557 
L2NormalizationDefaultEpsilonTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::DataLayout layout)558 LayerTestResult<float, 4> L2NormalizationDefaultEpsilonTest(
559         armnn::IWorkloadFactory& workloadFactory,
560         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
561         const armnn::ITensorHandleFactory& tensorHandleFactory,
562         const armnn::DataLayout layout)
563 {
564     // Dummy descriptor to get the default value of epsilon.
565     armnn::L2NormalizationDescriptor descriptor;
566 
567     return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
568         workloadFactory,
569         memoryManager,
570         tensorHandleFactory,
571         0.f,
572         0,
573         0.f,
574         0,
575         layout,
576         descriptor.m_Eps);
577 }
578 
L2NormalizationNonDefaultEpsilonTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::DataLayout layout)579 LayerTestResult<float, 4> L2NormalizationNonDefaultEpsilonTest(
580         armnn::IWorkloadFactory& workloadFactory,
581         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
582         const armnn::ITensorHandleFactory& tensorHandleFactory,
583         const armnn::DataLayout layout)
584 {
585     return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
586         workloadFactory,
587         memoryManager,
588         tensorHandleFactory,
589         0.f,
590         0,
591         0.f,
592         0,
593         layout,
594         1e-9f);
595 }
596 
L2Normalization1dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::DataLayout layout)597 LayerTestResult<float, 4> L2Normalization1dTest(
598     armnn::IWorkloadFactory& workloadFactory,
599     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
600     const armnn::ITensorHandleFactory& tensorHandleFactory,
601     const armnn::DataLayout layout)
602 {
603     return L2Normalization1dTestCommon<armnn::DataType::Float32>(
604         workloadFactory,
605         memoryManager,
606         tensorHandleFactory,
607         0.f,
608         0,
609         0.f,
610         0,
611         layout);
612 }
613 
L2Normalization1dInt16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::DataLayout layout)614 LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
615     armnn::IWorkloadFactory& workloadFactory,
616     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
617     const armnn::ITensorHandleFactory& tensorHandleFactory,
618     const armnn::DataLayout layout)
619 {
620     return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
621         workloadFactory,
622         memoryManager,
623         tensorHandleFactory,
624         1.f,
625         0,
626         1.f,
627         0,
628         layout);
629 }
630 
L2Normalization1dUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::DataLayout layout)631 LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
632     armnn::IWorkloadFactory& workloadFactory,
633     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
634     const armnn::ITensorHandleFactory& tensorHandleFactory,
635     const armnn::DataLayout layout)
636 {
637     return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
638         workloadFactory,
639         memoryManager,
640         tensorHandleFactory,
641         1.f,
642         0,
643         1.f / 128,
644         128,
645         layout);
646 }
647 
L2Normalization2dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::DataLayout layout)648 LayerTestResult<float, 4> L2Normalization2dTest(
649     armnn::IWorkloadFactory& workloadFactory,
650     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
651     const armnn::ITensorHandleFactory& tensorHandleFactory,
652     const armnn::DataLayout layout)
653 {
654     return L2Normalization2dTestCommon<armnn::DataType::Float32>(
655         workloadFactory,
656         memoryManager,
657         tensorHandleFactory,
658         0.f,
659         0,
660         0.f,
661         0,
662         layout);
663 }
664 
L2Normalization2dInt16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::DataLayout layout)665 LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
666     armnn::IWorkloadFactory& workloadFactory,
667     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
668     const armnn::ITensorHandleFactory& tensorHandleFactory,
669     const armnn::DataLayout layout)
670 {
671     return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
672         workloadFactory,
673         memoryManager,
674         tensorHandleFactory,
675         1.f,
676         0,
677         1.f,
678         0,
679         layout);
680 }
681 
L2Normalization2dUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::DataLayout layout)682 LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
683     armnn::IWorkloadFactory& workloadFactory,
684     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
685     const armnn::ITensorHandleFactory& tensorHandleFactory,
686     const armnn::DataLayout layout)
687 {
688     return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
689         workloadFactory,
690         memoryManager,
691         tensorHandleFactory,
692         1.f,
693         0,
694         1.f / 128,
695         128,
696         layout);
697 }
698 
L2Normalization2dShapeTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)699 LayerTestResult<float, 2> L2Normalization2dShapeTest(
700     armnn::IWorkloadFactory& workloadFactory,
701     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
702     const armnn::ITensorHandleFactory& tensorHandleFactory)
703 {
704     const armnn::DataLayout layout = armnn::DataLayout::NHWC;
705     const armnn::TensorShape inputOutputTensorShape = armnn::TensorShape({ 5, 2 });
706 
707     std::vector<float> inputData
708     {
709         1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f
710     };
711     std::vector<float> expectedOutputData
712     {
713         1.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
714         2.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
715         3.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
716         4.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
717         5.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
718         6.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
719         7.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
720         8.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
721         9.0f  * CalcInvL2Norm({ 9.0f, 10.0f }),
722         10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
723     };
724 
725     const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
726     const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
727 
728     auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, inputData);
729 
730     LayerTestResult<float, 2> result(outputTensorInfo);
731     result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, expectedOutputData);
732 
733     std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
734     std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
735 
736     armnn::L2NormalizationQueueDescriptor descriptor;
737     descriptor.m_Parameters.m_Eps = 1e-12f;
738     descriptor.m_Parameters.m_DataLayout = layout;
739     armnn::WorkloadInfo info;
740 
741     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
742     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
743 
744     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
745 
746     inputHandle->Allocate();
747     outputHandle->Allocate();
748 
749     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
750 
751     workload->PostAllocationConfigure();
752     ExecuteWorkload(*workload, memoryManager);
753 
754     CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
755 
756     return result;
757 }
758 
L2Normalization3dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::DataLayout layout)759 LayerTestResult<float, 4> L2Normalization3dTest(
760     armnn::IWorkloadFactory& workloadFactory,
761     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
762     const armnn::ITensorHandleFactory& tensorHandleFactory,
763     const armnn::DataLayout layout)
764 {
765     return L2Normalization3dTestCommon<armnn::DataType::Float32>(
766         workloadFactory,
767         memoryManager,
768         tensorHandleFactory,
769         0.f,
770         0,
771         0.f,
772         0,
773         layout);
774 }
775 
L2Normalization3dInt16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::DataLayout layout)776 LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
777     armnn::IWorkloadFactory& workloadFactory,
778     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
779     const armnn::ITensorHandleFactory& tensorHandleFactory,
780     const armnn::DataLayout layout)
781 {
782     return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
783         workloadFactory,
784         memoryManager,
785         tensorHandleFactory,
786         1.f,
787         0,
788         1.f,
789         0,
790         layout);
791 }
792 
L2Normalization3dUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::DataLayout layout)793 LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
794     armnn::IWorkloadFactory& workloadFactory,
795     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
796     const armnn::ITensorHandleFactory& tensorHandleFactory,
797     const armnn::DataLayout layout)
798 {
799     return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
800         workloadFactory,
801         memoryManager,
802         tensorHandleFactory,
803         1.f,
804         0,
805         1.f / 128,
806         128,
807         layout);
808 }
809 
L2Normalization4dTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::DataLayout layout)810 LayerTestResult<float, 4> L2Normalization4dTest(
811     armnn::IWorkloadFactory& workloadFactory,
812     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
813     const armnn::ITensorHandleFactory& tensorHandleFactory,
814     const armnn::DataLayout layout)
815 {
816     return L2Normalization4dTestCommon<armnn::DataType::Float32>(
817         workloadFactory,
818         memoryManager,
819         tensorHandleFactory,
820         0.f,
821         0,
822         0.f,
823         0,
824         layout);
825 }
826 
L2Normalization4dInt16Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::DataLayout layout)827 LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
828     armnn::IWorkloadFactory& workloadFactory,
829     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
830     const armnn::ITensorHandleFactory& tensorHandleFactory,
831     const armnn::DataLayout layout)
832 {
833     return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
834         workloadFactory,
835         memoryManager,
836         tensorHandleFactory,
837         1.f,
838         0,
839         1.f,
840         0,
841         layout);
842 }
843 
L2Normalization4dUint8Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::DataLayout layout)844 LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
845     armnn::IWorkloadFactory& workloadFactory,
846     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
847     const armnn::ITensorHandleFactory& tensorHandleFactory,
848     const armnn::DataLayout layout)
849 {
850     return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
851         workloadFactory,
852         memoryManager,
853         tensorHandleFactory,
854         1.f,
855         0,
856         1.f / 128,
857         128,
858         layout);
859 }
860