• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "FullyConnectedTestImpl.hpp"
7 
8 
9 #include <QuantizeHelper.hpp>
10 
11 #include <backendsCommon/CpuTensorHandle.hpp>
12 
13 #include <backendsCommon/test/DataTypeUtils.hpp>
14 #include <backendsCommon/test/TensorCopyUtils.hpp>
15 #include <backendsCommon/test/WorkloadTestUtils.hpp>
16 
17 #include <test/TensorHelpers.hpp>
18 
19 //
20 // Implementation templates
21 //
22 
23 template<typename T, typename B>
SimpleFullyConnectedTestImpl(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,armnn::TensorInfo inputTensorInfo,armnn::TensorInfo outputTensorInfo,armnn::TensorInfo weightsDesc,armnn::TensorInfo biasesDesc,boost::multi_array<T,2> & weights,boost::multi_array<B,1> & bias,boost::multi_array<T,4> & input,bool biasEnabled,bool transposeWeights)24 LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
25         armnn::IWorkloadFactory& workloadFactory,
26         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
27         const armnn::ITensorHandleFactory& tensorHandleFactory,
28         armnn::TensorInfo inputTensorInfo,
29         armnn::TensorInfo outputTensorInfo,
30         armnn::TensorInfo weightsDesc,
31         armnn::TensorInfo biasesDesc,
32         boost::multi_array<T, 2>& weights,
33         boost::multi_array<B, 1>& bias,
34         boost::multi_array<T, 4>& input,
35         bool biasEnabled,
36         bool transposeWeights)
37 {
38     std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
39     std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
40 
41     armnn::FullyConnectedQueueDescriptor data;
42     armnn::WorkloadInfo info;
43     armnn::ScopedCpuTensorHandle weightsTensor(weightsDesc);
44     armnn::ScopedCpuTensorHandle biasTensor(biasesDesc);
45 
46     AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]);
47     AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
48 
49     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
50     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
51     data.m_Weight = &weightsTensor;
52     data.m_Bias = &biasTensor;
53     data.m_Parameters.m_BiasEnabled = biasEnabled;
54     data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
55 
56     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFullyConnected(data, info);
57     LayerTestResult<T, 2> result(outputTensorInfo);
58 
59     inputHandle->Allocate();
60     outputHandle->Allocate();
61     CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
62 
63     ExecuteWorkload(*workload, memoryManager);
64 
65     CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
66 
67     return result;
68 }
69 
70 template<armnn::DataType ArmnnType, typename T>
FullyConnectedTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,bool biasEnabled)71 LayerTestResult<T, 2> FullyConnectedTest(
72         armnn::IWorkloadFactory& workloadFactory,
73         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
74         const armnn::ITensorHandleFactory& tensorHandleFactory,
75         bool biasEnabled)
76 {
77     constexpr static unsigned int inputWidth = 3u;
78     constexpr static unsigned int inputHeight = 2u;
79     constexpr static unsigned int inputChannels = 1u;
80 
81     constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
82 
83     constexpr static unsigned int outputChannels = 2u;
84 
85     armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType);
86     inputTensorInfo.SetQuantizationScale(0.1f);
87     inputTensorInfo.SetQuantizationOffset(63);
88 
89     armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType);
90     outputTensorInfo.SetQuantizationScale(5.f);
91     outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
92 
93     armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType);
94     weightsDesc.SetQuantizationScale(0.2f);
95     weightsDesc.SetQuantizationOffset(93);
96 
97     armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value());
98     biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
99     biasesDesc.SetQuantizationOffset(0);
100 
101     LayerTestResult<T, 2> result(outputTensorInfo);
102 
103     auto input = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(
104         {
105             -1.2f, 6.1f, -3.5f,
106             18.8f, -5.5f, 2.9f
107         },
108         inputTensorInfo));
109 
110     auto weights = MakeTensor<T, 2>(weightsDesc, ConvertToDataType<ArmnnType>(
111         {
112             -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
113             23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
114         },
115         weightsDesc));
116 
117     auto bias = MakeTensor<int32_t, 1>(biasesDesc, std::vector<int32_t>{9250, 67500});
118 
119     result = SimpleFullyConnectedTestImpl<T>(
120             workloadFactory,
121             memoryManager,
122             tensorHandleFactory,
123             inputTensorInfo, outputTensorInfo,
124             weightsDesc, biasesDesc,
125             weights, bias, input,
126             biasEnabled, true
127     );
128 
129     if (biasEnabled)
130     {
131         result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
132                                                  ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo));
133     }
134     else
135     {
136         result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
137                                                  ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo));
138     }
139 
140     return result;
141 }
142 
143 //
144 // ArmNN variant of the AndroidNN fully_connected_float_large test.
145 //
146 // Tests the fully connected layer with large values, optionally transposing weights.
147 // Note this is templated for consistency, but the nature of this tests makes it unlikely to be useful in Uint8 mode.
148 //
149 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
FullyConnectedLargeTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,bool transposeWeights,float qScale=0.0f,int32_t qOffset=0)150 LayerTestResult<T, 2> FullyConnectedLargeTestCommon(
151     armnn::IWorkloadFactory& workloadFactory,
152     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
153     const armnn::ITensorHandleFactory& tensorHandleFactory,
154     bool transposeWeights,
155     float qScale = 0.0f,
156     int32_t qOffset = 0)
157 {
158     unsigned int inputWidth = 1;
159     unsigned int inputHeight = 1;
160     unsigned int inputChannels = 5;
161     unsigned int inputNum = 1;
162 
163     unsigned int outputChannels = 1;
164     unsigned int outputNum = 1;
165 
166     // Define the tensor descriptors.
167     armnn::TensorInfo inputTensorInfo;
168     armnn::TensorInfo outputTensorInfo;
169     armnn::TensorInfo weightsDesc;
170     armnn::TensorInfo biasesDesc;
171 
172     unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
173     unsigned int outputShape[] = { outputNum, outputChannels };
174     unsigned int weightsShape[] = { inputChannels, outputChannels };
175     if (transposeWeights)
176     {
177         std::swap(weightsShape[0], weightsShape[1]);
178     }
179 
180     unsigned int biasShape[] = { outputChannels };
181 
182     inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
183     outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
184     weightsDesc = armnn::TensorInfo(2, weightsShape, ArmnnType);
185     biasesDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
186 
187     // Set quantization parameters if the requested type is a quantized type.
188     if(armnn::IsQuantizedType<T>())
189     {
190         inputTensorInfo.SetQuantizationScale(qScale);
191         inputTensorInfo.SetQuantizationOffset(qOffset);
192         outputTensorInfo.SetQuantizationScale(qScale);
193         outputTensorInfo.SetQuantizationOffset(qOffset);
194     }
195 
196     LayerTestResult<T, 2> result(outputTensorInfo);
197 
198     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
199         armnnUtils::QuantizedVector<T>({
200             1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
201         },
202         qScale, qOffset)
203     );
204 
205     boost::multi_array<T, 2> weights = MakeTensor<T, 2>(weightsDesc,
206         armnnUtils::QuantizedVector<T>({
207             2.0f, 3.0f, 4.0f, 5.0f, 6.0f
208         },
209         qScale, qOffset)
210     );
211 
212     std::vector<T> biasValues({900000.f});
213     boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasesDesc, biasValues);
214 
215     result = SimpleFullyConnectedTestImpl<T>(
216         workloadFactory,
217         memoryManager,
218         tensorHandleFactory,
219         inputTensorInfo, outputTensorInfo,
220         weightsDesc, biasesDesc,
221         weights, bias, input,
222         true, transposeWeights
223     );
224 
225     result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
226                                              armnnUtils::QuantizedVector<T>({ 965432.0f }, qScale, qOffset));
227 
228     return result;
229 }
230 
231 //
232 // Explicit template specializations
233 //
234 
235 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
236 FullyConnectedTest<armnn::DataType::QAsymmU8>(
237     armnn::IWorkloadFactory& workloadFactory,
238     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
239     const armnn::ITensorHandleFactory& tensorHandleFactory,
240     bool biasEnabled);
241 
242 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
243 FullyConnectedTest<armnn::DataType::QSymmS16>(
244     armnn::IWorkloadFactory& workloadFactory,
245     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
246     const armnn::ITensorHandleFactory& tensorHandleFactory,
247     bool biasEnabled);
248 
249 //
250 // Implementation functions
251 //
252 
FullyConnectedFloat32Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,bool biasEnabled,bool transposeWeights)253 LayerTestResult<float, 2> FullyConnectedFloat32Test(
254     armnn::IWorkloadFactory& workloadFactory,
255     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
256     const armnn::ITensorHandleFactory& tensorHandleFactory,
257     bool biasEnabled,
258     bool transposeWeights)
259 {
260     unsigned int inputWidth = 1;
261     unsigned int inputHeight = 1;
262     unsigned int inputChannels = 5;
263     unsigned int inputNum = 2;
264 
265     unsigned int outputChannels = 3;
266     unsigned int outputNum = 2;
267 
268     // Define the tensor descriptors.
269     armnn::TensorInfo inputTensorInfo;
270     armnn::TensorInfo outputTensorInfo;
271     armnn::TensorInfo weightsDesc;
272     armnn::TensorInfo biasesDesc;
273 
274     unsigned int inputShape[]   = { inputNum, inputChannels, inputHeight, inputWidth };
275     unsigned int outputShape[]  = { outputNum, outputChannels };
276     unsigned int weightsShape[] = { inputChannels, outputChannels };
277 
278     if (transposeWeights)
279     {
280         std::swap(weightsShape[0], weightsShape[1]);
281     }
282 
283     unsigned int biasShape[] = { outputChannels };
284 
285     inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
286     outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
287     weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
288     biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
289 
290     LayerTestResult<float, 2> result(outputTensorInfo);
291 
292     boost::multi_array<float, 4> input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(
293         {
294             1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
295 
296             5.0f, 4.0f, 3.0f, 2.0f, 1.0f
297         })
298     );
299 
300     boost::multi_array<float, 2> weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
301         {
302             .5f, 2.f, .5f,
303             .5f, 2.f, 1.f,
304             .5f, 2.f, 2.f,
305             .5f, 2.f, 3.f,
306             .5f, 2.f, 4.f
307         }));
308 
309     if (transposeWeights)
310     {
311         weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
312         {
313             .5f, .5f, .5f, .5f, .5f,
314             2.f, 2.f, 2.f, 2.f, 2.f,
315             .5f, 1.f, 2.f, 3.f, 4.f
316         }));
317     }
318 
319 
320     std::vector<float> biasValues({0.f, 0.f, 0.f});
321     if (biasEnabled)
322     {
323         biasValues =  std::vector<float>({10.f, 20.f, 30.f});
324     }
325     boost::multi_array<float, 1> bias = MakeTensor<float, 1>(biasesDesc, biasValues);
326 
327     result = SimpleFullyConnectedTestImpl<float>(
328         workloadFactory,
329         memoryManager,
330         tensorHandleFactory,
331         inputTensorInfo, outputTensorInfo,
332         weightsDesc, biasesDesc,
333         weights, bias, input,
334         biasEnabled, transposeWeights
335     );
336 
337     result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(
338         {
339             0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
340             2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
341             0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
342 
343             2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
344             10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
345             2.5f + 4.0f + 6.0f + 6.f + 4.f   + biasValues[2]
346         })
347     );
348 
349     return result;
350 }
351 
FullyConnectedLargeTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,bool transposeWeights)352 LayerTestResult<float, 2> FullyConnectedLargeTest(
353     armnn::IWorkloadFactory& workloadFactory,
354     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
355     const armnn::ITensorHandleFactory& tensorHandleFactory,
356     bool transposeWeights)
357 {
358     return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory,
359                                                                    memoryManager,
360                                                                    tensorHandleFactory,
361                                                                    transposeWeights);
362 }
363