• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2020 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "QLstmEndToEndTestImpl.hpp"
7 
8 #include <CommonTestUtils.hpp>
9 #include "EndToEndTestImpl.hpp"
10 
11 #include <armnn/INetwork.hpp>
12 #include <armnn/LstmParams.hpp>
13 
14 #include <doctest/doctest.h>
15 
16 namespace
17 {
18 
19 // Checks if two values of an arithmetic type are close enough to each other
20 // with regard to a given tolerance value.
21 template<typename T>
22 typename std::enable_if<std::is_arithmetic<T>::value, bool>::type
IsCloseEnough(T value1,T value2,T tolerance)23 IsCloseEnough(T value1, T value2, T tolerance)
24 {
25     if (tolerance < 0)
26     {
27         throw armnn::InvalidArgumentException("Tolerance cannot be < 0");
28     }
29 
30     T diff = value1 >= value2 ? static_cast<T>(value1 - value2) : static_cast<T>(value2 - value1);
31     return diff <= tolerance;
32 }
33 
34 } // anonymous namespace
35 
QLstmEndToEnd(const std::vector<armnn::BackendId> & backends)36 void QLstmEndToEnd(const std::vector<armnn::BackendId>& backends)
37 {
38     const unsigned int numBatches = 2;
39     const unsigned int inputSize  = 5;
40     const unsigned int outputSize = 4;
41     const unsigned int numUnits   = 4;
42 
43     bool cifgEnabled       = true;
44     bool peepholeEnabled   = false;
45     bool projectionEnabled = false;
46     bool layerNormEnabled  = true;
47 
48     // Scale/Offset quantization info
49     const float inputScale    = 0.0078125f;
50     const int32_t inputOffset = 0;
51 
52     const int32_t hiddenStateZeroPoint = 0;
53     const float hiddenStateScale       = 0.007f;
54 
55     // if (!projectionEnabled) outputScale == hiddenStateScale
56     const float outputScale    = hiddenStateScale;
57     const int32_t outputOffset = hiddenStateZeroPoint;
58 
59     const float cellStateScale    = 3.05176e-05f;
60     const int32_t cellStateOffset = 0;
61 
62     const float weightsScale    = 0.00784314f;
63     const int32_t weightsOffset = 0;
64 
65     const float layerNormScale    = 3.05182e-05f;
66     const int32_t layerNormOffset = 0;
67 
68     const float biasScale    = layerNormScale / 1024;
69     const int32_t biasOffset = 0;
70 
71     const float inputIntermediateScale  = 0.007059f;
72     const float forgetIntermediateScale = 0.007812f;
73     const float cellIntermediateScale   = inputIntermediateScale;
74     const float outputIntermediateScale = forgetIntermediateScale;
75 
76     const float cellClip       = 0.0f;
77     const float projectionClip = 0.0f;
78 
79     // Weights and bias tensor info
80     const armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
81                                              armnn::DataType::QSymmS8,
82                                              weightsScale,
83                                              weightsOffset,
84                                              true);
85 
86     const armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
87                                                  armnn::DataType::QSymmS8,
88                                                  weightsScale,
89                                                  weightsOffset,
90                                                  true);
91 
92     const armnn::TensorInfo biasInfo({outputSize},
93                                      armnn::DataType::Signed32,
94                                      biasScale,
95                                      biasOffset,
96                                      true);
97 
98     const armnn::TensorInfo layerNormWeightsInfo({numUnits},
99                                                  armnn::DataType::QSymmS16,
100                                                  layerNormScale,
101                                                  layerNormOffset,
102                                                  true);
103 
104     // Mandatory params
105     const std::vector<int8_t> inputToForgetWeightsVector =
106             {-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64};
107     const std::vector<int8_t> inputToCellWeightsTensorVector =
108             {-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77};
109     const std::vector<int8_t> inputToOutputWeightsTensorVector =
110             {-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51};
111 
112     armnn::ConstTensor inputToForgetWeightsTensor(inputWeightsInfo, inputToForgetWeightsVector.data());
113     armnn::ConstTensor inputToCellWeightsTensor(inputWeightsInfo, inputToCellWeightsTensorVector.data());
114     armnn::ConstTensor inputToOutputWeightsTensor(inputWeightsInfo, inputToOutputWeightsTensorVector.data());
115 
116     const std::vector<int8_t> recurrentToForgetWeightsTensorVector =
117             {-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25, 25, 38, -13, 51};
118     const std::vector<int8_t> recurrentToCellWeightsTensorVector =
119             {-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25, 38, -13, 25, 64};
120     const std::vector<int8_t> recurrentToOutputWeightsTensorVector =
121             {38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25, 13, 64, 25, -38};
122 
123     armnn::ConstTensor recurrentToForgetWeightsTensor(recurrentWeightsInfo,
124                                                       recurrentToForgetWeightsTensorVector.data());
125     armnn::ConstTensor recurrentToCellWeightsTensor(recurrentWeightsInfo,
126                                                     recurrentToCellWeightsTensorVector.data());
127     armnn::ConstTensor recurrentToOutputWeightsTensor(recurrentWeightsInfo,
128                                                       recurrentToOutputWeightsTensorVector.data());
129 
130     const std::vector<int32_t> forgetGateBiasTensorVector = {2147484, -6442451, -4294968, 2147484};
131     const std::vector<int32_t> cellBiasTensorVector       = {-1073742, 15461883, 5368709, 1717987};
132     const std::vector<int32_t> outputGateBiasTensorVector = {1073742, -214748, 4294968, 2147484};
133 
134     armnn::ConstTensor forgetGateBiasTensor(biasInfo, forgetGateBiasTensorVector.data());
135     armnn::ConstTensor cellBiasTensor(biasInfo, cellBiasTensorVector.data());
136     armnn::ConstTensor outputGateBiasTensor(biasInfo, outputGateBiasTensorVector.data());
137 
138     // Layer Norm
139     const std::vector<int16_t> forgetLayerNormWeightsVector = {6553, 6553, 13107, 9830};
140     const std::vector<int16_t> cellLayerNormWeightsVector   = {22937, 6553, 9830, 26214};
141     const std::vector<int16_t> outputLayerNormWeightsVector = {19660, 6553, 6553, 16384};
142 
143     armnn::ConstTensor forgetLayerNormWeights(layerNormWeightsInfo, forgetLayerNormWeightsVector.data());
144     armnn::ConstTensor cellLayerNormWeights(layerNormWeightsInfo, cellLayerNormWeightsVector.data());
145     armnn::ConstTensor outputLayerNormWeights(layerNormWeightsInfo, outputLayerNormWeightsVector.data());
146 
147     // Set up params
148     armnn::LstmInputParams params;
149     params.m_InputToForgetWeights = &inputToForgetWeightsTensor;
150     params.m_InputToCellWeights   = &inputToCellWeightsTensor;
151     params.m_InputToOutputWeights = &inputToOutputWeightsTensor;
152 
153     params.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
154     params.m_RecurrentToCellWeights   = &recurrentToCellWeightsTensor;
155     params.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
156 
157     params.m_ForgetGateBias = &forgetGateBiasTensor;
158     params.m_CellBias       = &cellBiasTensor;
159     params.m_OutputGateBias = &outputGateBiasTensor;
160 
161     params.m_ForgetLayerNormWeights = &forgetLayerNormWeights;
162     params.m_CellLayerNormWeights   = &cellLayerNormWeights;
163     params.m_OutputLayerNormWeights = &outputLayerNormWeights;
164 
165     QLstmDescriptor descriptor;
166     descriptor.m_CifgEnabled       = cifgEnabled;
167     descriptor.m_PeepholeEnabled   = peepholeEnabled;
168     descriptor.m_ProjectionEnabled = projectionEnabled;
169     descriptor.m_LayerNormEnabled  = layerNormEnabled;
170 
171     descriptor.m_CellClip       = cellClip;
172     descriptor.m_ProjectionClip = projectionClip;
173 
174     descriptor.m_HiddenStateZeroPoint = hiddenStateZeroPoint;
175     descriptor.m_HiddenStateScale     = hiddenStateScale;
176 
177     descriptor.m_InputIntermediateScale  = inputIntermediateScale;
178     descriptor.m_ForgetIntermediateScale = forgetIntermediateScale;
179     descriptor.m_CellIntermediateScale   = cellIntermediateScale;
180     descriptor.m_OutputIntermediateScale = outputIntermediateScale;
181 
182     // Input/Output tensor info
183     const armnn::TensorInfo inputInfo({numBatches , inputSize},
184                                       armnn::DataType::QAsymmS8,
185                                       inputScale,
186                                       inputOffset,
187                                       true);
188 
189     const armnn::TensorInfo cellStateInfo({numBatches , numUnits},
190                                           armnn::DataType::QSymmS16,
191                                           cellStateScale,
192                                           cellStateOffset,
193                                           true);
194 
195     const armnn::TensorInfo outputStateInfo({numBatches , outputSize},
196                                             armnn::DataType::QAsymmS8,
197                                             outputScale,
198                                             outputOffset,
199                                             true);
200 
201     // Input tensor data
202     const std::vector<int8_t> inputVector         = {90, 102, 13, 26, 38, 102, 13, 26, 51, 64};
203     const std::vector<int8_t> outputStateInVector = {0, 0, 0, 0, 0, 0, 0, 0};
204     const std::vector<int16_t> cellStateInVector  = {0, 0, 0, 0, 0, 0, 0, 0};
205 
206     // Expected output tensor data
207     const std::vector<int8_t> outputStateOutVector = {-15, 21, 14, 20, -15, 15, 5, 27};
208     const std::vector<int16_t> cellStateOutVector  = {-11692, 9960, 5491, 8861, -9422, 7726, 2056, 13149};
209     const std::vector<int8_t> outputVector         = {-15, 21, 14, 20, -15, 15, 5, 27};
210 
211     // Build network
212     armnn::INetworkPtr net(armnn::INetwork::Create());
213 
214     armnn::IConnectableLayer* const input         = net->AddInputLayer(0);
215     armnn::IConnectableLayer* const outputStateIn = net->AddInputLayer(1);
216     armnn::IConnectableLayer* const cellStateIn   = net->AddInputLayer(2);
217 
218     armnn::IConnectableLayer* const qLstmLayer = net->AddQLstmLayer(descriptor, params, "qLstm");
219 
220     armnn::IConnectableLayer* const outputStateOut = net->AddOutputLayer(0);
221     armnn::IConnectableLayer* const cellStateOut   = net->AddOutputLayer(1);
222     armnn::IConnectableLayer* const output         = net->AddOutputLayer(2);
223 
224     // Connect input/output slots
225     Connect(input, qLstmLayer, inputInfo, 0, 0);
226     Connect(outputStateIn, qLstmLayer, outputStateInfo, 0, 1);
227     Connect(cellStateIn, qLstmLayer, cellStateInfo, 0, 2);
228 
229     Connect(qLstmLayer, outputStateOut, outputStateInfo, 0, 0);
230     Connect(qLstmLayer, cellStateOut, cellStateInfo, 1, 0);
231     Connect(qLstmLayer, output, outputStateInfo, 2, 0);
232 
233     // Create runtime
234     IRuntime::CreationOptions options;
235     IRuntimePtr runtime(IRuntime::Create(options));
236 
237     // Optimize the network
238     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
239 
240     // Loads network into runtime
241     NetworkId netId;
242     runtime->LoadNetwork(netId, std::move(optNet));
243 
244     // Push back input tensors
245     InputTensors inputTensors;
246     inputTensors.reserve(3);
247 
248     inputTensors.push_back({0, ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputVector.data())});
249     inputTensors.push_back({1, ConstTensor(runtime->GetInputTensorInfo(netId, 1), outputStateInVector.data())});
250     inputTensors.push_back({2, ConstTensor(runtime->GetInputTensorInfo(netId, 2), cellStateInVector.data())});
251 
252     // Push back output tensors
253     OutputTensors outputTensors;
254     outputTensors.reserve(3);
255 
256     std::vector<int8_t> outputStateOutResult(outputStateOutVector.size());
257     std::vector<int16_t> cellStateOutResult(cellStateOutVector.size());
258     std::vector<int8_t> outputResult(outputStateOutVector.size());
259 
260     outputTensors.push_back({0, Tensor(runtime->GetOutputTensorInfo(netId, 0), outputStateOutResult.data())});
261     outputTensors.push_back({1, Tensor(runtime->GetOutputTensorInfo(netId, 1), cellStateOutResult.data())});
262     outputTensors.push_back({2, Tensor(runtime->GetOutputTensorInfo(netId, 2), outputResult.data())});
263 
264     // Execute inference
265     runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
266 
267     constexpr int8_t toleranceInt8 = 1;
268     for (unsigned int i = 0u; i < outputStateOutResult.size(); ++i)
269     {
270         CHECK(IsCloseEnough(outputStateOutVector[i], outputStateOutResult[i], toleranceInt8));
271     }
272 
273     for (unsigned int i = 0u; i < outputResult.size(); ++i)
274     {
275         CHECK(IsCloseEnough(outputVector[i], outputResult[i], toleranceInt8));
276     }
277 }