• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <armnnUtils/Permute.hpp>
8 
9 #include <armnnUtils/QuantizeHelper.hpp>
10 #include <ResolveType.hpp>
11 
12 #include <CommonTestUtils.hpp>
13 
14 #include <map>
15 #include <vector>
16 
17 namespace
18 {
19 
CreateTransposeConvolution2dNetwork(const armnn::TransposeConvolution2dDescriptor & descriptor,const armnn::TensorInfo & inputInfo,const armnn::TensorInfo & outputInfo,const armnn::ConstTensor & weights,const armnn::Optional<armnn::ConstTensor> & biases)20 INetworkPtr CreateTransposeConvolution2dNetwork(const armnn::TransposeConvolution2dDescriptor& descriptor,
21                                                 const armnn::TensorInfo& inputInfo,
22                                                 const armnn::TensorInfo& outputInfo,
23                                                 const armnn::ConstTensor& weights,
24                                                 const armnn::Optional<armnn::ConstTensor>& biases)
25 {
26     using namespace armnn;
27 
28     INetworkPtr network(INetwork::Create());
29     IConnectableLayer* input = network->AddInputLayer(0, "input");
30     IConnectableLayer* transposeConvolution2d =
31         network->AddTransposeConvolution2dLayer(descriptor, weights, biases, "transposeConvolution2d");
32     IConnectableLayer* output = network->AddOutputLayer(0, "output");
33 
34     Connect(input, transposeConvolution2d, inputInfo, 0, 0);
35     Connect(transposeConvolution2d, output, outputInfo, 0, 0);
36 
37     return network;
38 }
39 
40 } // anonymous namespace
41 
42 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType>
TransposeConvolution2dEndToEnd(const std::vector<armnn::BackendId> & backends,armnn::DataLayout dataLayout)43 void TransposeConvolution2dEndToEnd(const std::vector<armnn::BackendId>& backends,
44                                     armnn::DataLayout dataLayout)
45 {
46     using namespace armnn;
47     using T = ResolveType<ArmnnType>;
48 
49     constexpr unsigned int batches  = 1u;
50     constexpr unsigned int channels = 1u;
51 
52     constexpr unsigned int wInput = 3u;
53     constexpr unsigned int hInput = wInput;
54 
55     constexpr unsigned int wOutput = 5u;
56     constexpr unsigned int hOutput = wOutput;
57 
58     constexpr unsigned int wWeights = 3u;
59     constexpr unsigned int hWeights = wWeights;
60 
61     TensorShape inputShape   = MakeTensorShape(batches, channels, hInput, wInput, dataLayout);
62     TensorShape outputShape  = MakeTensorShape(batches, channels, hOutput, wOutput, dataLayout);
63     TensorShape weightsShape = MakeTensorShape(batches, channels, hWeights, wWeights, dataLayout);
64 
65     const float   qScale  = IsQuantizedType<T>() ? 0.25f : 1.0f;
66     const int32_t qOffset = IsQuantizedType<T>() ? 50    : 0;
67 
68     TensorInfo inputInfo(inputShape, ArmnnType, qScale, qOffset, true);
69     TensorInfo outputInfo(outputShape, ArmnnType, qScale, qOffset);
70     TensorInfo weightsInfo(weightsShape, ArmnnType, qScale, qOffset, true);
71     TensorInfo biasesInfo({ channels }, ArmnnBType, qScale * qScale, 0, true);
72 
73     std::vector<float> inputData =
74     {
75        1.f, 1.f, 1.f,
76        1.f, 1.f, 1.f,
77        1.f, 1.f, 1.f
78     };
79 
80     std::vector<float> weightsData =
81     {
82         1.f, 2.f, 3.f,
83         4.f, 5.f, 6.f,
84         7.f, 8.f, 9.f
85     };
86 
87     std::vector<float> biasesData = { 1.f };
88 
89     std::vector<float> expectedOutputData =
90     {
91          6.f, 11.f,  6.f, 11.f,  6.f,
92         11.f, 21.f, 11.f, 21.f, 11.f,
93          6.f, 11.f,  6.f, 11.f,  6.f,
94         11.f, 21.f, 11.f, 21.f, 11.f,
95          6.f, 11.f,  6.f, 11.f,  6.f
96     };
97 
98     TransposeConvolution2dDescriptor descriptor;
99     descriptor.m_PadLeft     = 1;
100     descriptor.m_PadRight    = 1;
101     descriptor.m_PadTop      = 1;
102     descriptor.m_PadBottom   = 1;
103     descriptor.m_StrideX     = 2;
104     descriptor.m_StrideY     = 2;
105     descriptor.m_BiasEnabled = true;
106     descriptor.m_DataLayout  = dataLayout;
107 
108     // swizzle data if needed
109     if (dataLayout == armnn::DataLayout::NHWC)
110     {
111         constexpr size_t dataTypeSize = sizeof(float);
112         const armnn::PermutationVector nchwToNhwc = { 0, 3, 1, 2 };
113 
114         std::vector<float> tmp(inputData.size());
115         armnnUtils::Permute(inputInfo.GetShape(), nchwToNhwc, inputData.data(), tmp.data(), dataTypeSize);
116         inputData = tmp;
117 
118         tmp.resize(weightsData.size());
119         armnnUtils::Permute(weightsInfo.GetShape(), nchwToNhwc, weightsData.data(), tmp.data(), dataTypeSize);
120         weightsData = tmp;
121 
122         tmp.resize(expectedOutputData.size());
123         armnnUtils::Permute(outputInfo.GetShape(), nchwToNhwc, expectedOutputData.data(), tmp.data(), dataTypeSize);
124         expectedOutputData = tmp;
125     }
126 
127     // quantize data
128     std::vector<T> qInputData          = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
129     std::vector<T> qWeightsData        = armnnUtils::QuantizedVector<T>(weightsData, qScale, qOffset);
130     std::vector<T> qExpectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset);
131 
132     using BT = ResolveType<ArmnnBType>;
133     std::vector<BT> qBiasesData = armnnUtils::QuantizedVector<BT>(biasesData, qScale * qScale, 0);
134 
135     ConstTensor weights(weightsInfo, qWeightsData);
136     ConstTensor biases(biasesInfo, qBiasesData);
137 
138     INetworkPtr network = CreateTransposeConvolution2dNetwork(descriptor,
139                                                               inputInfo,
140                                                               outputInfo,
141                                                               weights,
142                                                               Optional<ConstTensor>(biases));
143 
144 
145     EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
146                                                 { { 0, qInputData } },
147                                                 { { 0, qExpectedOutputData } },
148                                                 backends);
149 }
150 
151 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType>
SimpleTransposeConvolution2dEndToEnd(const std::vector<armnn::BackendId> & backends,armnn::DataLayout dataLayout)152 void SimpleTransposeConvolution2dEndToEnd(const std::vector<armnn::BackendId>& backends,
153                                           armnn::DataLayout dataLayout)
154 {
155     using namespace armnn;
156     using T = ResolveType<ArmnnType>;
157 
158     const float   qScale  = IsQuantizedType<T>() ? 0.25f : 1.0f;
159     const int32_t qOffset = IsQuantizedType<T>() ? 50    : 0;
160 
161     TensorInfo inputInfo({1, 2, 2, 1}, ArmnnType, qScale, qOffset, true);
162     TensorInfo outputInfo({1, 3, 3, 1}, ArmnnType, qScale, qOffset);
163     TensorInfo weightsInfo({1, 2, 2, 1}, ArmnnType, qScale, qOffset, true);
164     TensorInfo biasesInfo({ 1 }, ArmnnBType, qScale * qScale, 0, true);
165 
166     std::vector<float> inputData =
167     {
168         1, 2, 3, 4
169     };
170 
171     std::vector<float> weightsData =
172     {
173         0, 1, 2, 4
174     };
175     std::vector<float> biasesData = { 0.f };
176 
177     std::vector<float> expectedOutputData =
178     {
179         0, 1,  2,
180         2, 11, 12,
181         6, 20, 16
182     };
183 
184     TransposeConvolution2dDescriptor descriptor;
185     descriptor.m_PadLeft     = 0;
186     descriptor.m_PadRight    = 0;
187     descriptor.m_PadTop      = 0;
188     descriptor.m_PadBottom   = 0;
189     descriptor.m_StrideX     = 1;
190     descriptor.m_StrideY     = 1;
191     descriptor.m_BiasEnabled = true;
192     descriptor.m_DataLayout  = dataLayout;
193     descriptor.m_OutputShapeEnabled = true;
194     descriptor.m_OutputShape = { 1, 3, 3, 1 };
195 
196     // quantize data
197     std::vector<T> qInputData          = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
198     std::vector<T> qWeightsData        = armnnUtils::QuantizedVector<T>(weightsData, qScale, qOffset);
199     std::vector<T> qExpectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset);
200 
201     using BT = ResolveType<ArmnnBType>;
202     std::vector<BT> qBiasesData = armnnUtils::QuantizedVector<BT>(biasesData, qScale * qScale, 0);
203 
204     ConstTensor weights(weightsInfo, qWeightsData);
205     ConstTensor biases(biasesInfo, qBiasesData);
206 
207     INetworkPtr network = CreateTransposeConvolution2dNetwork(descriptor,
208                                                               inputInfo,
209                                                               outputInfo,
210                                                               weights,
211                                                               Optional<ConstTensor>(biases));
212 
213     EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
214                                                 { { 0, qInputData } },
215                                                 { { 0, qExpectedOutputData } },
216                                                 backends);
217 }
218