1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #pragma once
7
8 #include "DriverTestHelpers.hpp"
9
10 #include <armnn/utility/IgnoreUnused.hpp>
11
12 #include <boost/math/special_functions/relative_difference.hpp>
13
14 #include <array>
15
16 using ArmnnDriver = armnn_driver::ArmnnDriver;
17 using DriverOptions = armnn_driver::DriverOptions;
18
19 using namespace driverTestHelpers;
20 using namespace android::hardware;
21
22 namespace
23 {
24
25 template<typename T>
CreateRequestArgument(const std::vector<T> & value,unsigned int poolIndex)26 RequestArgument CreateRequestArgument(const std::vector<T>& value, unsigned int poolIndex)
27 {
28 DataLocation inputInloc = {};
29 inputInloc.poolIndex = poolIndex;
30 inputInloc.offset = 0;
31 inputInloc.length = value.size() * sizeof(T);
32 RequestArgument inputRequestArgument = {};
33 inputRequestArgument.location = inputInloc;
34 inputRequestArgument.dimensions = hidl_vec<uint32_t>{};
35 return inputRequestArgument;
36 }
37
38 // Returns true if the relative difference between two float values is less than the tolerance value given.
39 // This is used because the floating point comparison tolerance (set on each BOOST_AUTO_TEST_CASE) does not work!
TolerantCompareEqual(float a,float b,float tolerance=0.00001f)40 bool TolerantCompareEqual(float a, float b, float tolerance = 0.00001f)
41 {
42 float rd;
43 if (a == 0.0f)
44 {
45 rd = fabs(b);
46 }
47 else if (b == 0.0f)
48 {
49 rd = fabs(a);
50 }
51 else
52 {
53 rd = boost::math::relative_difference(a, b);
54 }
55 return rd < tolerance;
56 }
57
58 // Helper function to create an OperandLifeTime::NO_VALUE for testing.
59 // To be used on optional input operands that have no values - these are valid and should be tested.
CreateNoValueLifeTime(const hidl_vec<uint32_t> & dimensions)60 V1_0::OperandLifeTime CreateNoValueLifeTime(const hidl_vec<uint32_t>& dimensions)
61 {
62 // Only create a NO_VALUE for optional operands that have no elements
63 if (dimensions.size() == 0 || dimensions[0] == 0)
64 {
65 return V1_0::OperandLifeTime::NO_VALUE;
66 }
67 return V1_0::OperandLifeTime::CONSTANT_COPY;
68 }
69
70 template<typename HalModel>
ExecuteModel(const HalModel & model,armnn_driver::ArmnnDriver & driver,const V1_0::Request & request)71 void ExecuteModel(const HalModel& model, armnn_driver::ArmnnDriver& driver, const V1_0::Request& request)
72 {
73 android::sp<V1_0::IPreparedModel> preparedModel = PrepareModel(model, driver);
74 if (preparedModel.get() != nullptr)
75 {
76 Execute(preparedModel, request);
77 }
78 }
79
80 #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
81
82 template<>
ExecuteModel(const armnn_driver::hal_1_2::HalPolicy::Model & model,armnn_driver::ArmnnDriver & driver,const V1_0::Request & request)83 void ExecuteModel<armnn_driver::hal_1_2::HalPolicy::Model>(const armnn_driver::hal_1_2::HalPolicy::Model& model,
84 armnn_driver::ArmnnDriver& driver,
85 const V1_0::Request& request)
86 {
87 android::sp<V1_2::IPreparedModel> preparedModel = PrepareModel_1_2(model, driver);
88 if (preparedModel.get() != nullptr)
89 {
90 Execute(preparedModel, request);
91 }
92 }
93
94 #endif
95
96 } // anonymous namespace
97
98 #ifndef ARMCOMPUTECL_ENABLED
99 static const std::array<armnn::Compute, 1> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef }};
100 #else
101 static const std::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::GpuAcc }};
102 #endif
103
104 // Add our own tests here since we fail the lstm tests which Google supplies (because of non-const weights)
105 template <typename HalPolicy>
LstmTestImpl(const hidl_vec<uint32_t> & inputDimensions,const std::vector<float> & inputValue,const hidl_vec<uint32_t> & inputToInputWeightsDimensions,const std::vector<float> & inputToInputWeightsValue,const hidl_vec<uint32_t> & inputToForgetWeightsDimensions,const std::vector<float> & inputToForgetWeightsValue,const hidl_vec<uint32_t> & inputToCellWeightsDimensions,const std::vector<float> & inputToCellWeightsValue,const hidl_vec<uint32_t> & inputToOutputWeightsDimensions,const std::vector<float> & inputToOutputWeightsValue,const hidl_vec<uint32_t> & recurrentToInputWeightsDimensions,const std::vector<float> & recurrentToInputWeightsValue,const hidl_vec<uint32_t> & recurrentToForgetWeightsDimensions,const std::vector<float> & recurrentToForgetWeightsValue,const hidl_vec<uint32_t> & recurrentToCellWeightsDimensions,const std::vector<float> & recurrentToCellWeightsValue,const hidl_vec<uint32_t> & recurrentToOutputWeightsDimensions,const std::vector<float> & recurrentToOutputWeightsValue,const hidl_vec<uint32_t> & cellToInputWeightsDimensions,const std::vector<float> & cellToInputWeightsValue,const hidl_vec<uint32_t> & cellToForgetWeightsDimensions,const std::vector<float> & cellToForgetWeightsValue,const hidl_vec<uint32_t> & cellToOutputWeightsDimensions,const std::vector<float> & cellToOutputWeightsValue,const hidl_vec<uint32_t> & inputGateBiasDimensions,const std::vector<float> & inputGateBiasValue,const hidl_vec<uint32_t> & forgetGateBiasDimensions,const std::vector<float> & forgetGateBiasValue,const hidl_vec<uint32_t> & cellBiasDimensions,const std::vector<float> & cellBiasValue,const hidl_vec<uint32_t> & outputGateBiasDimensions,const std::vector<float> & outputGateBiasValue,const hidl_vec<uint32_t> & projectionWeightsDimensions,const std::vector<float> & projectionWeightsValue,const hidl_vec<uint32_t> & projectionBiasDimensions,const std::vector<float> & projectionBiasValue,const hidl_vec<uint32_t> & outputStateInDimensions,const std::vector<float> & outputStateInValue,const hidl_vec<uint32_t> & cellStateInDimensions,const std::vector<float> & cellStateInValue,const hidl_vec<uint32_t> & activationFunctionDimensions,const std::vector<int32_t> & activationFunctionValue,const hidl_vec<uint32_t> & cellClippingThresholdDimensions,const std::vector<float> & cellClippingThresholdValue,const hidl_vec<uint32_t> & projectionClippingThresholdDimensions,const std::vector<float> & projectionClippingThresholdValue,const hidl_vec<uint32_t> & inputLayerNormWeightsDimensions,const std::vector<float> & inputLayerNormWeightsValue,const hidl_vec<uint32_t> & forgetLayerNormWeightsDimensions,const std::vector<float> & forgetLayerNormWeightsValue,const hidl_vec<uint32_t> & cellLayerNormWeightsDimensions,const std::vector<float> & cellLayerNormWeightsValue,const hidl_vec<uint32_t> & outputLayerNormWeightsDimensions,const std::vector<float> & outputLayerNormWeightsValue,const hidl_vec<uint32_t> & scratchBufferDimensions,const std::vector<float> & scratchBufferValue,const hidl_vec<uint32_t> & outputStateOutDimensions,const std::vector<float> & outputStateOutValue,const hidl_vec<uint32_t> & cellStateOutDimensions,const std::vector<float> & cellStateOutValue,const hidl_vec<uint32_t> & outputDimensions,const std::vector<float> & outputValue,armnn::Compute compute)106 void LstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
107 const std::vector<float>& inputValue,
108 const hidl_vec<uint32_t>& inputToInputWeightsDimensions,
109 const std::vector<float>& inputToInputWeightsValue,
110 const hidl_vec<uint32_t>& inputToForgetWeightsDimensions,
111 const std::vector<float>& inputToForgetWeightsValue,
112 const hidl_vec<uint32_t>& inputToCellWeightsDimensions,
113 const std::vector<float>& inputToCellWeightsValue,
114 const hidl_vec<uint32_t>& inputToOutputWeightsDimensions,
115 const std::vector<float>& inputToOutputWeightsValue,
116 const hidl_vec<uint32_t>& recurrentToInputWeightsDimensions,
117 const std::vector<float>& recurrentToInputWeightsValue,
118 const hidl_vec<uint32_t>& recurrentToForgetWeightsDimensions,
119 const std::vector<float>& recurrentToForgetWeightsValue,
120 const hidl_vec<uint32_t>& recurrentToCellWeightsDimensions,
121 const std::vector<float>& recurrentToCellWeightsValue,
122 const hidl_vec<uint32_t>& recurrentToOutputWeightsDimensions,
123 const std::vector<float>& recurrentToOutputWeightsValue,
124 const hidl_vec<uint32_t>& cellToInputWeightsDimensions,
125 const std::vector<float>& cellToInputWeightsValue,
126 const hidl_vec<uint32_t>& cellToForgetWeightsDimensions,
127 const std::vector<float>& cellToForgetWeightsValue,
128 const hidl_vec<uint32_t>& cellToOutputWeightsDimensions,
129 const std::vector<float>& cellToOutputWeightsValue,
130 const hidl_vec<uint32_t>& inputGateBiasDimensions,
131 const std::vector<float>& inputGateBiasValue,
132 const hidl_vec<uint32_t>& forgetGateBiasDimensions,
133 const std::vector<float>& forgetGateBiasValue,
134 const hidl_vec<uint32_t>& cellBiasDimensions,
135 const std::vector<float>& cellBiasValue,
136 const hidl_vec<uint32_t>& outputGateBiasDimensions,
137 const std::vector<float>& outputGateBiasValue,
138 const hidl_vec<uint32_t>& projectionWeightsDimensions,
139 const std::vector<float>& projectionWeightsValue,
140 const hidl_vec<uint32_t>& projectionBiasDimensions,
141 const std::vector<float>& projectionBiasValue,
142 const hidl_vec<uint32_t>& outputStateInDimensions,
143 const std::vector<float>& outputStateInValue,
144 const hidl_vec<uint32_t>& cellStateInDimensions,
145 const std::vector<float>& cellStateInValue,
146 const hidl_vec<uint32_t>& activationFunctionDimensions,
147 const std::vector<int32_t>& activationFunctionValue,
148 const hidl_vec<uint32_t>& cellClippingThresholdDimensions,
149 const std::vector<float>& cellClippingThresholdValue,
150 const hidl_vec<uint32_t>& projectionClippingThresholdDimensions,
151 const std::vector<float>& projectionClippingThresholdValue,
152 const hidl_vec<uint32_t>& inputLayerNormWeightsDimensions,
153 const std::vector<float>& inputLayerNormWeightsValue,
154 const hidl_vec<uint32_t>& forgetLayerNormWeightsDimensions,
155 const std::vector<float>& forgetLayerNormWeightsValue,
156 const hidl_vec<uint32_t>& cellLayerNormWeightsDimensions,
157 const std::vector<float>& cellLayerNormWeightsValue,
158 const hidl_vec<uint32_t>& outputLayerNormWeightsDimensions,
159 const std::vector<float>& outputLayerNormWeightsValue,
160 const hidl_vec<uint32_t>& scratchBufferDimensions,
161 const std::vector<float>& scratchBufferValue,
162 const hidl_vec<uint32_t>& outputStateOutDimensions,
163 const std::vector<float>& outputStateOutValue,
164 const hidl_vec<uint32_t>& cellStateOutDimensions,
165 const std::vector<float>& cellStateOutValue,
166 const hidl_vec<uint32_t>& outputDimensions,
167 const std::vector<float>& outputValue,
168 armnn::Compute compute)
169 {
170 auto driver = std::make_unique<ArmnnDriver>(DriverOptions(compute));
171 using Model = typename HalPolicy::Model;
172 Model model = {};
173
174 // Inputs:
175 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
176 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
177 AddInputOperand<HalPolicy>(model, inputDimensions);
178
179 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
180 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
181 AddTensorOperand<HalPolicy>(model,
182 inputToInputWeightsDimensions,
183 inputToInputWeightsValue,
184 HalPolicy::OperandType::TENSOR_FLOAT32,
185 CreateNoValueLifeTime(inputToInputWeightsDimensions));
186 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
187 // [num_units, input_size].
188 AddTensorOperand<HalPolicy>(model, inputToForgetWeightsDimensions, inputToForgetWeightsValue);
189 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
190 // [num_units, input_size].
191 AddTensorOperand<HalPolicy>(model, inputToCellWeightsDimensions, inputToCellWeightsValue);
192 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
193 // [num_units, input_size].
194 AddTensorOperand<HalPolicy>(model, inputToOutputWeightsDimensions, inputToOutputWeightsValue);
195 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
196 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
197 // “num_units”), or the second dimension of the “projection_weights”, if defined.
198 AddTensorOperand<HalPolicy>(model,
199 recurrentToInputWeightsDimensions,
200 recurrentToInputWeightsValue,
201 HalPolicy::OperandType::TENSOR_FLOAT32,
202 CreateNoValueLifeTime(recurrentToInputWeightsDimensions));
203 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
204 // [num_units, output_size].
205 AddTensorOperand<HalPolicy>(model, recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue);
206 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
207 // [num_units, output_size].
208 AddTensorOperand<HalPolicy>(model, recurrentToCellWeightsDimensions, recurrentToCellWeightsValue);
209 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
210 // [num_units, output_size].
211 AddTensorOperand<HalPolicy>(model, recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue);
212 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
213 AddTensorOperand<HalPolicy>(model,
214 cellToInputWeightsDimensions,
215 cellToInputWeightsValue,
216 HalPolicy::OperandType::TENSOR_FLOAT32,
217 CreateNoValueLifeTime(cellToInputWeightsDimensions));
218 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
219 AddTensorOperand<HalPolicy>(model,
220 cellToForgetWeightsDimensions,
221 cellToForgetWeightsValue,
222 HalPolicy::OperandType::TENSOR_FLOAT32,
223 CreateNoValueLifeTime(cellToForgetWeightsDimensions));
224 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
225 AddTensorOperand<HalPolicy>(model,
226 cellToOutputWeightsDimensions,
227 cellToOutputWeightsValue,
228 HalPolicy::OperandType::TENSOR_FLOAT32,
229 CreateNoValueLifeTime(cellToOutputWeightsDimensions));
230 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
231 AddTensorOperand<HalPolicy>(model,
232 inputGateBiasDimensions,
233 inputGateBiasValue,
234 HalPolicy::OperandType::TENSOR_FLOAT32,
235 CreateNoValueLifeTime(inputGateBiasDimensions));
236 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
237 AddTensorOperand<HalPolicy>(model, forgetGateBiasDimensions, forgetGateBiasValue);
238 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
239 AddTensorOperand<HalPolicy>(model, cellBiasDimensions, cellBiasValue);
240 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
241 AddTensorOperand<HalPolicy>(model, outputGateBiasDimensions, outputGateBiasValue);
242 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
243 // [output_size, num_units].
244 AddTensorOperand<HalPolicy>(model,
245 projectionWeightsDimensions,
246 projectionWeightsValue,
247 HalPolicy::OperandType::TENSOR_FLOAT32,
248 CreateNoValueLifeTime(projectionWeightsDimensions));
249 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
250 AddTensorOperand<HalPolicy>(model,
251 projectionBiasDimensions,
252 projectionBiasValue,
253 HalPolicy::OperandType::TENSOR_FLOAT32,
254 CreateNoValueLifeTime(projectionBiasDimensions));
255
256 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
257 AddInputOperand<HalPolicy>(model, outputStateInDimensions);
258 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
259 AddInputOperand<HalPolicy>(model, cellStateInDimensions);
260
261 // Constant scalar values (the VTS test adds these as tensors of dim {})
262 // 20: The activation function: A value indicating the activation function:
263 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
264 AddTensorOperand<HalPolicy>(model,
265 activationFunctionDimensions,
266 activationFunctionValue,
267 HalPolicy::OperandType::INT32);
268 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
269 // If set to 0.0 then clipping is disabled.
270 AddTensorOperand<HalPolicy>(model,
271 cellClippingThresholdDimensions,
272 cellClippingThresholdValue,
273 HalPolicy::OperandType::FLOAT32);
274 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
275 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
276 AddTensorOperand<HalPolicy>(model,
277 projectionClippingThresholdDimensions,
278 projectionClippingThresholdValue,
279 HalPolicy::OperandType::FLOAT32);
280
281 bool normalizationEnabled = false;
282
283 // If any of the tensors have a value all normalization tensors are set
284 if (!inputLayerNormWeightsValue.empty() ||
285 !forgetLayerNormWeightsValue.empty() ||
286 !cellLayerNormWeightsValue.empty() ||
287 !outputLayerNormWeightsValue.empty())
288 {
289 // Normalization:
290 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
291 // Used to rescale normalized inputs to activation at input gate.
292 AddTensorOperand<HalPolicy>(model,
293 inputLayerNormWeightsDimensions,
294 inputLayerNormWeightsValue,
295 HalPolicy::OperandType::TENSOR_FLOAT32,
296 CreateNoValueLifeTime(inputLayerNormWeightsDimensions));
297 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
298 // Used to rescale normalized inputs to activation at forget gate.
299 AddTensorOperand<HalPolicy>(model,
300 forgetLayerNormWeightsDimensions,
301 forgetLayerNormWeightsValue,
302 HalPolicy::OperandType::TENSOR_FLOAT32,
303 CreateNoValueLifeTime(forgetLayerNormWeightsDimensions));
304 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
305 // Used to rescale normalized inputs to activation at cell gate.
306 AddTensorOperand<HalPolicy>(model,
307 cellLayerNormWeightsDimensions,
308 cellLayerNormWeightsValue,
309 HalPolicy::OperandType::TENSOR_FLOAT32,
310 CreateNoValueLifeTime(cellLayerNormWeightsDimensions));
311 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
312 // Used to rescale normalized inputs to activation at output gate.
313 AddTensorOperand<HalPolicy>(model,
314 outputLayerNormWeightsDimensions,
315 outputLayerNormWeightsValue,
316 HalPolicy::OperandType::TENSOR_FLOAT32,
317 CreateNoValueLifeTime(outputLayerNormWeightsDimensions));
318
319 normalizationEnabled = true;
320 }
321
322 // Outputs:
323 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
324 // CIFG, or [batch_size, num_units * 3] without CIFG.
325 AddOutputOperand<HalPolicy>(model, scratchBufferDimensions);
326 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
327 AddOutputOperand<HalPolicy>(model, outputStateOutDimensions);
328 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
329 AddOutputOperand<HalPolicy>(model, cellStateOutDimensions);
330 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
331 // effectively the same as the current “output state (out)” value.
332 AddOutputOperand<HalPolicy>(model, outputDimensions);
333
334 // make the lstm operation
335 model.operations.resize(1);
336 model.operations[0].type = HalPolicy::OperationType::LSTM;
337
338 if (normalizationEnabled)
339 {
340 model.operations[0].inputs = hidl_vec<uint32_t> { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
341 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26};
342 model.operations[0].outputs = hidl_vec<uint32_t> {27, 28, 29, 30};
343 }
344 else
345 {
346 model.operations[0].inputs = hidl_vec<uint32_t> { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
347 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22};
348 model.operations[0].outputs = hidl_vec<uint32_t> {23, 24, 25, 26};
349 }
350
351 // define the input values
352 hidl_vec<RequestArgument> inputArguments;
353 inputArguments.resize(3);
354
355 inputArguments[0] = CreateRequestArgument<float>(inputValue, 0);
356 inputArguments[1] = CreateRequestArgument<float>(outputStateInValue, 1);
357 inputArguments[2] = CreateRequestArgument<float>(cellStateInValue, 2);
358
359 // define the expected output values
360 hidl_vec<RequestArgument> outputArguments;
361 outputArguments.resize(4);
362
363 outputArguments[0] = CreateRequestArgument<float>(scratchBufferValue, 3);
364 outputArguments[1] = CreateRequestArgument<float>(outputStateOutValue, 4);
365 outputArguments[2] = CreateRequestArgument<float>(cellStateOutValue, 5);
366 outputArguments[3] = CreateRequestArgument<float>(outputValue, 6);
367
368 V1_0::Request request = {};
369 request.inputs = inputArguments;
370 request.outputs = outputArguments;
371
372 // set the input data
373 AddPoolAndSetData(inputValue.size(), request, inputValue.data());
374 AddPoolAndSetData(outputStateInValue.size(), request, outputStateInValue.data());
375 AddPoolAndSetData(cellStateInValue.size(), request, cellStateInValue.data());
376
377 // add memory for the outputs
378 AddPoolAndGetData<float>(scratchBufferValue.size(), request);
379 android::sp<IMemory> outputStateOutMemory = AddPoolAndGetData<float>(outputStateOutValue.size(), request);
380 float* outputStateOutData = static_cast<float*>(static_cast<void*>(outputStateOutMemory->getPointer()));
381 android::sp<IMemory> cellStateOutMemory = AddPoolAndGetData<float>(cellStateOutValue.size(), request);
382 float* cellStateOutData = static_cast<float*>(static_cast<void*>(cellStateOutMemory->getPointer()));
383 android::sp<IMemory> outputMemory = AddPoolAndGetData<float>(outputValue.size(), request);
384 float* outputData = static_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
385
386 // make the prepared model and run the execution
387 ExecuteModel(model, *driver, request);
388
389 // check the results
390 for (size_t i = 0; i < outputStateOutValue.size(); ++i)
391 {
392 BOOST_TEST(TolerantCompareEqual(outputStateOutValue[i], outputStateOutData[i]),
393 "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != " << outputStateOutData[i]);
394 }
395 for (size_t i = 0; i < cellStateOutValue.size(); ++i)
396 {
397 BOOST_TEST(TolerantCompareEqual(cellStateOutValue[i], cellStateOutData[i]),
398 "cellStateOut[" << i << "]: " << cellStateOutValue[i] << " != " << cellStateOutData[i]);
399 }
400 for (size_t i = 0; i < outputValue.size(); ++i)
401 {
402 BOOST_TEST(TolerantCompareEqual(outputValue[i], outputData[i]),
403 "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
404 }
405 }
406
407 template <typename HalPolicy>
QuantizedLstmTestImpl(const hidl_vec<uint32_t> & inputDimensions,const std::vector<uint8_t> & inputValue,const hidl_vec<uint32_t> & inputToInputWeightsDimensions,const std::vector<uint8_t> & inputToInputWeightsValue,const hidl_vec<uint32_t> & inputToForgetWeightsDimensions,const std::vector<uint8_t> & inputToForgetWeightsValue,const hidl_vec<uint32_t> & inputToCellWeightsDimensions,const std::vector<uint8_t> & inputToCellWeightsValue,const hidl_vec<uint32_t> & inputToOutputWeightsDimensions,const std::vector<uint8_t> & inputToOutputWeightsValue,const hidl_vec<uint32_t> & recurrentToInputWeightsDimensions,const std::vector<uint8_t> & recurrentToInputWeightsValue,const hidl_vec<uint32_t> & recurrentToForgetWeightsDimensions,const std::vector<uint8_t> & recurrentToForgetWeightsValue,const hidl_vec<uint32_t> & recurrentToCellWeightsDimensions,const std::vector<uint8_t> & recurrentToCellWeightsValue,const hidl_vec<uint32_t> & recurrentToOutputWeightsDimensions,const std::vector<uint8_t> & recurrentToOutputWeightsValue,const hidl_vec<uint32_t> & inputGateBiasDimensions,const std::vector<int32_t> & inputGateBiasValue,const hidl_vec<uint32_t> & forgetGateBiasDimensions,const std::vector<int32_t> & forgetGateBiasValue,const hidl_vec<uint32_t> & cellBiasDimensions,const std::vector<int32_t> & cellBiasValue,const hidl_vec<uint32_t> & outputGateBiasDimensions,const std::vector<int32_t> & outputGateBiasValue,const hidl_vec<uint32_t> & previousOutputInDimensions,const std::vector<uint8_t> & previousOutputInValue,const hidl_vec<uint32_t> & previousCellStateInDimensions,const std::vector<int16_t> & previousCellStateInValue,const hidl_vec<uint32_t> & cellStateOutDimensions,const std::vector<int16_t> & cellStateOutValue,const hidl_vec<uint32_t> & outputDimensions,const std::vector<uint8_t> & outputValue)408 void QuantizedLstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
409 const std::vector<uint8_t>& inputValue,
410 const hidl_vec<uint32_t>& inputToInputWeightsDimensions,
411 const std::vector<uint8_t>& inputToInputWeightsValue,
412 const hidl_vec<uint32_t>& inputToForgetWeightsDimensions,
413 const std::vector<uint8_t>& inputToForgetWeightsValue,
414 const hidl_vec<uint32_t>& inputToCellWeightsDimensions,
415 const std::vector<uint8_t>& inputToCellWeightsValue,
416 const hidl_vec<uint32_t>& inputToOutputWeightsDimensions,
417 const std::vector<uint8_t>& inputToOutputWeightsValue,
418 const hidl_vec<uint32_t>& recurrentToInputWeightsDimensions,
419 const std::vector<uint8_t>& recurrentToInputWeightsValue,
420 const hidl_vec<uint32_t>& recurrentToForgetWeightsDimensions,
421 const std::vector<uint8_t>& recurrentToForgetWeightsValue,
422 const hidl_vec<uint32_t>& recurrentToCellWeightsDimensions,
423 const std::vector<uint8_t>& recurrentToCellWeightsValue,
424 const hidl_vec<uint32_t>& recurrentToOutputWeightsDimensions,
425 const std::vector<uint8_t>& recurrentToOutputWeightsValue,
426 const hidl_vec<uint32_t>& inputGateBiasDimensions,
427 const std::vector<int32_t>& inputGateBiasValue,
428 const hidl_vec<uint32_t>& forgetGateBiasDimensions,
429 const std::vector<int32_t>& forgetGateBiasValue,
430 const hidl_vec<uint32_t>& cellBiasDimensions,
431 const std::vector<int32_t>& cellBiasValue,
432 const hidl_vec<uint32_t>& outputGateBiasDimensions,
433 const std::vector<int32_t>& outputGateBiasValue,
434 const hidl_vec<uint32_t>& previousOutputInDimensions,
435 const std::vector<uint8_t>& previousOutputInValue,
436 const hidl_vec<uint32_t>& previousCellStateInDimensions,
437 const std::vector<int16_t>& previousCellStateInValue,
438 const hidl_vec<uint32_t>& cellStateOutDimensions,
439 const std::vector<int16_t>& cellStateOutValue,
440 const hidl_vec<uint32_t>& outputDimensions,
441 const std::vector<uint8_t>& outputValue)
442 {
443 auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::GpuAcc));
444 using Model = typename HalPolicy::Model;
445 Model model = {};
446
447 float inputOutputScale = 0.0078125f;
448 int32_t inputOutputOffset = 128;
449
450 float cellStateScale = 0.00048828125f;
451 int32_t cellStateOffset = 0;
452
453 float weightsScale = 0.00408021f;
454 int32_t weightsOffset = 100;
455
456 float biasScale = 3.1876640625e-05f;
457 int32_t biasOffset = 0;
458
459 // Inputs:
460 // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
461 // specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
462 AddInputOperand<HalPolicy>(model,
463 inputDimensions,
464 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
465 inputOutputScale,
466 inputOutputOffset);
467
468 // 1: The input-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
469 // [outputSize, inputSize] specifying input-to-input part of weights for fully-connected layer inside the
470 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
471 AddTensorOperand<HalPolicy>(model,
472 inputToInputWeightsDimensions,
473 inputToInputWeightsValue,
474 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
475 CreateNoValueLifeTime(inputToInputWeightsDimensions),
476 weightsScale,
477 weightsOffset);
478 // 2: The input-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
479 // [outputSize, inputSize] specifying input-to-forget part of weights for fully-connected layer inside the
480 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
481 AddTensorOperand<HalPolicy>(model,
482 inputToForgetWeightsDimensions,
483 inputToForgetWeightsValue,
484 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
485 CreateNoValueLifeTime(inputToForgetWeightsDimensions),
486 weightsScale,
487 weightsOffset);
488 // 3: The input-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
489 // [outputSize, inputSize] specifying input-to-cell part of weights for fully-connected layer inside the
490 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
491 AddTensorOperand<HalPolicy>(model,
492 inputToCellWeightsDimensions,
493 inputToCellWeightsValue,
494 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
495 CreateNoValueLifeTime(inputToCellWeightsDimensions),
496 weightsScale,
497 weightsOffset);
498 // 4: The input-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
499 // [outputSize, inputSize] specifying input-to-output part of weights for fully-connected layer inside the
500 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
501 AddTensorOperand<HalPolicy>(model,
502 inputToOutputWeightsDimensions,
503 inputToOutputWeightsValue,
504 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
505 CreateNoValueLifeTime(inputToOutputWeightsDimensions),
506 weightsScale,
507 weightsOffset);
508 // 5: The recurrent-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
509 // [outputSize, outputSize] specifying recurrent-to-input part of weights for fully-connected layer inside
510 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
511 AddTensorOperand<HalPolicy>(model,
512 recurrentToInputWeightsDimensions,
513 recurrentToInputWeightsValue,
514 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
515 CreateNoValueLifeTime(recurrentToInputWeightsDimensions),
516 weightsScale,
517 weightsOffset);
518 // 6: The recurrent-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
519 // [outputSize, outputSize] specifying recurrent-to-forget part of weights for fully-connected layer inside
520 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
521 AddTensorOperand<HalPolicy>(model,
522 recurrentToForgetWeightsDimensions,
523 recurrentToForgetWeightsValue,
524 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
525 CreateNoValueLifeTime(recurrentToForgetWeightsDimensions),
526 weightsScale,
527 weightsOffset);
528 // 7: The recurrent-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
529 // [outputSize, outputSize] specifying recurrent-to-cell part of weights for fully-connected layer inside
530 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
531 AddTensorOperand<HalPolicy>(model,
532 recurrentToCellWeightsDimensions,
533 recurrentToCellWeightsValue,
534 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
535 CreateNoValueLifeTime(recurrentToCellWeightsDimensions),
536 weightsScale,
537 weightsOffset);
538 // 8: The recurrent-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
539 // [outputSize, outputSize] specifying recurrent-to-output part of weights for fully-connected layer inside
540 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
541 AddTensorOperand<HalPolicy>(model,
542 recurrentToOutputWeightsDimensions,
543 recurrentToOutputWeightsValue,
544 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
545 CreateNoValueLifeTime(recurrentToOutputWeightsDimensions),
546 weightsScale,
547 weightsOffset);
548 // 9: The input gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the
549 // bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
550 // of input and weights scales and zeroPoint equal to 0.
551 AddTensorOperand<HalPolicy>(model,
552 inputGateBiasDimensions,
553 inputGateBiasValue,
554 HalPolicy::OperandType::TENSOR_INT32,
555 CreateNoValueLifeTime(inputGateBiasDimensions),
556 biasScale,
557 biasOffset);
558 // 10: The forget gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
559 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
560 // of input and weights scales and zeroPoint equal to 0.
561 AddTensorOperand<HalPolicy>(model,
562 forgetGateBiasDimensions,
563 forgetGateBiasValue,
564 HalPolicy::OperandType::TENSOR_INT32,
565 CreateNoValueLifeTime(forgetGateBiasDimensions),
566 biasScale,
567 biasOffset);
568 // 11: The cell bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the bias
569 // for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product of input
570 // and weights scales and zeroPoint equal to 0.
571 AddTensorOperand<HalPolicy>(model,
572 cellBiasDimensions,
573 cellBiasValue,
574 HalPolicy::OperandType::TENSOR_INT32,
575 CreateNoValueLifeTime(cellBiasDimensions),
576 biasScale,
577 biasOffset);
578 // 12: The output gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
579 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
580 // of input and weights scales and zeroPoint equal to 0.
581 AddTensorOperand<HalPolicy>(model,
582 outputGateBiasDimensions,
583 outputGateBiasValue,
584 HalPolicy::OperandType::TENSOR_INT32,
585 CreateNoValueLifeTime(outputGateBiasDimensions),
586 biasScale,
587 biasOffset);
588
589 //13: The previous cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape
590 // [numBatches, outputSize] specifying the cell state from the previous time step of the LSTM cell.
591 // It is quantized using a quantization range of -2^4, 2^4 * 32767/32768.
592 AddInputOperand<HalPolicy>(model,
593 previousCellStateInDimensions,
594 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
595 cellStateScale,
596 cellStateOffset);
597 // 14: The previous output state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
598 // [numBathes, outputSize] specifying the output of the LSTM cell from previous time-step. Tensor
599 // is quantized with a fixed quantization range of -1, 127/128.
600 AddInputOperand<HalPolicy>(model,
601 previousOutputInDimensions,
602 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
603 inputOutputScale,
604 inputOutputOffset);
605
606 // Outputs:
607 // 0: The cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape [numBatches, outputSize]
608 // which contains a cell state from the current time step. Tensor is quantized using a quantization range
609 // of -2^4, 2^4 * 32767/32768.
610 AddOutputOperand<HalPolicy>(model,
611 cellStateOutDimensions,
612 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
613 cellStateScale,
614 cellStateOffset);
615 // 1: The output: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBathes, outputSize] which
616 // contains the output value. Tensor is quantized with a fixed quantization range of -1, 127/128.
617 AddOutputOperand<HalPolicy>(model,
618 outputDimensions,
619 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
620 inputOutputScale,
621 inputOutputOffset);
622
623 // make the lstm operation
624 model.operations.resize(1);
625 model.operations[0].type = HalPolicy::OperationType::QUANTIZED_16BIT_LSTM;
626
627 model.operations[0].inputs = hidl_vec<uint32_t> { 0, 1, 2, 3, 4, 5, 6, 7,
628 8, 9, 10, 11, 12, 13, 14};
629 model.operations[0].outputs = hidl_vec<uint32_t> {15, 16};
630
631 // define the input values
632 hidl_vec<RequestArgument> inputArguments;
633 inputArguments.resize(3);
634
635 inputArguments[0] = CreateRequestArgument<uint8_t>(inputValue, 0);
636 inputArguments[1] = CreateRequestArgument<int16_t>(previousCellStateInValue, 1);
637 inputArguments[2] = CreateRequestArgument<uint8_t>(previousOutputInValue, 2);
638
639 // define the expected output values
640 hidl_vec<RequestArgument> outputArguments;
641 outputArguments.resize(2);
642
643 outputArguments[0] = CreateRequestArgument<int16_t>(cellStateOutValue, 3);
644 outputArguments[1] = CreateRequestArgument<uint8_t>(outputValue, 4);
645
646 V1_0::Request request = {};
647 request.inputs = inputArguments;
648 request.outputs = outputArguments;
649
650 // set the input data
651 AddPoolAndSetData(inputValue.size(), request, inputValue.data());
652 AddPoolAndSetData(previousCellStateInValue.size(), request, previousCellStateInValue.data());
653 AddPoolAndSetData(previousOutputInValue.size(), request, previousOutputInValue.data());
654
655 // add memory for the outputs
656 android::sp<IMemory> cellStateOutMemory = AddPoolAndGetData<int16_t>(cellStateOutValue.size(), request);
657 int16_t* cellStateOutData = static_cast<int16_t*>(static_cast<void*>(cellStateOutMemory->getPointer()));
658 android::sp<IMemory> outputMemory = AddPoolAndGetData<uint8_t>(outputValue.size(), request);
659 uint8_t* outputData = static_cast<uint8_t*>(static_cast<void*>(outputMemory->getPointer()));
660
661 // make the prepared model and run the execution
662 ExecuteModel(model, *driver, request);
663
664 // check the results
665 for (size_t i = 0; i < cellStateOutValue.size(); ++i)
666 {
667 BOOST_TEST(TolerantCompareEqual(cellStateOutValue[i], cellStateOutData[i], 1.0f),
668 "cellStateOut[" << i << "]: " << cellStateOutValue[i] << " != " << cellStateOutData[i]);
669 }
670 for (size_t i = 0; i < outputValue.size(); ++i)
671 {
672 BOOST_TEST(TolerantCompareEqual(outputValue[i], outputData[i], 1.0f),
673 "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
674 }
675 }
676
677 template <typename HalPolicy>
LstmNoCifgNoPeepholeNoProjection(armnn::Compute compute)678 void LstmNoCifgNoPeepholeNoProjection(armnn::Compute compute)
679 {
680 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm.model.cpp
681 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm.example.cpp
682 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
683
684 uint32_t batchSize = 1;
685 uint32_t inputSize = 2;
686 uint32_t numUnits = 4;
687 uint32_t outputSize = numUnits;
688
689 // Inputs:
690 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
691 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
692 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
693 std::vector<float> inputValue{2.0f, 3.0f};
694
695 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
696 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
697 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
698 std::vector<float> inputToInputWeightsValue{-0.45018822f, -0.02338299f,
699 -0.08705890f, -0.34550029f,
700 0.04266912f, -0.15680569f,
701 -0.34856534f, 0.43890524f};
702 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
703 // [num_units, input_size].
704 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
705 std::vector<float> inputToForgetWeightsValue{ 0.09701663f, 0.20334584f,
706 -0.50592935f, -0.31343272f,
707 -0.40032279f, 0.44781327f,
708 0.01387155f, -0.35593212f};
709 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
710 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
711 std::vector<float> inputToCellWeightsValue{-0.50013041f, 0.13702840f,
712 0.11810488f, 0.20131630f,
713 -0.20583314f, 0.44344562f,
714 0.22077113f, -0.29909778f};
715 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
716 // [num_units, input_size].
717 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
718 std::vector<float> inputToOutputWeightsValue{-0.25065863f, -0.28290087f,
719 0.04613829f, 0.40525138f,
720 0.44272184f, 0.03897077f,
721 -0.15568960f, 0.19487578f};
722 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
723 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
724 // “num_units”), or the second dimension of the “projection_weights”, if defined.
725 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
726 std::vector<float> recurrentToInputWeightsValue{-0.00635350f, -0.20423880f, 0.31454784f, -0.35746509f,
727 0.28902304f, 0.08183324f, -0.16555229f, 0.02286911f,
728 -0.13566875f, 0.03034258f, 0.48091322f, -0.12528998f,
729 0.24077177f, -0.51332325f, -0.33502164f, 0.10629296f};
730 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
731 // [num_units, output_size].
732 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
733 std::vector<float> recurrentToForgetWeightsValue{-0.48684245f, -0.06655136f, 0.42224967f, 0.21126390f,
734 0.27654213f, 0.20864892f, -0.07646349f, 0.45877004f,
735 0.00141793f, -0.14609534f, 0.36447752f, 0.09196436f,
736 0.28053468f, 0.01560611f, -0.20127171f, -0.01140004f};
737 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
738 // [num_units, output_size].
739 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
740 std::vector<float> recurrentToCellWeightsValue{-0.34074140f, 0.24443203f, -0.20785320f, 0.26320225f,
741 0.05695659f, -0.00123841f, -0.47447860f, -0.35869038f,
742 -0.06418842f, -0.13502428f, -0.50176400f, 0.22830659f,
743 -0.46367589f, 0.26016325f, -0.03894562f, -0.16368064f};
744 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
745 // [num_units, output_size].
746 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
747 std::vector<float> recurrentToOutputWeightsValue{ 0.43385774f, -0.17194885f, 0.27182370f, 0.09215671f,
748 0.24107647f, -0.39835793f, 0.18212086f, 0.01301402f,
749 0.48572797f, -0.50656658f, 0.20047462f, -0.20607421f,
750 -0.51818722f, -0.15390486f, 0.04681480f, 0.39922136f};
751 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
752 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
753 std::vector<float> cellToInputWeightsValue;
754 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
755 hidl_vec<uint32_t> cellToForgetWeightsDimensions{0};
756 std::vector<float> cellToForgetWeightsValue;
757 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
758 hidl_vec<uint32_t> cellToOutputWeightsDimensions{0};
759 std::vector<float> cellToOutputWeightsValue;
760 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
761 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
762 std::vector<float> inputGateBiasValue(numUnits, 0.0f);
763 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
764 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
765 std::vector<float> forgetGateBiasValue(numUnits, 1.0f);
766 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
767 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
768 std::vector<float> cellBiasValue(numUnits, 0.0f);
769 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
770 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
771 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
772 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
773 // [output_size, num_units].
774 hidl_vec<uint32_t> projectionWeightsDimensions{0};
775 std::vector<float> projectionWeightsValue;
776 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
777 hidl_vec<uint32_t> projectionBiasDimensions{0};
778 std::vector<float> projectionBiasValue;
779
780 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
781 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
782 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
783 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
784 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
785 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
786
787 // Constant scalar values (the VTS test adds these as tensors of dim {})
788 // 20: The activation function: A value indicating the activation function:
789 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
790 hidl_vec<uint32_t> activationFunctionDimensions{};
791 std::vector<int32_t> activationFunctionValue{4};
792 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
793 // If set to 0.0 then clipping is disabled.
794 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
795 std::vector<float> cellClippingThresholdValue{0.0f};
796 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
797 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
798 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
799 std::vector<float> projectionClippingThresholdValue{0.0f};
800
801 // Normalization:
802 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
803 // Used to rescale normalized inputs to activation at input gate.
804 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
805 std::vector<float> inputLayerNormWeightsValue;
806 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
807 // Used to rescale normalized inputs to activation at forget gate.
808 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
809 std::vector<float> forgetLayerNormWeightsValue;
810 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
811 // Used to rescale normalized inputs to activation at cell gate.
812 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
813 std::vector<float> cellLayerNormWeightsValue;
814 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
815 // Used to rescale normalized inputs to activation at output gate.
816 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
817 std::vector<float> outputLayerNormWeightsValue;
818
819 // Outputs:
820 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
821 // CIFG, or [batch_size, num_units * 3] without CIFG.
822 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
823 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
824 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
825 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
826 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 4};
827 std::vector<float> scratchBufferValue(batchSize * numUnits * 4, 0.0f);
828 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
829 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
830 std::vector<float> outputStateOutValue {-0.0297319f, 0.122947f, 0.208851f, -0.153588f};
831 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
832 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
833 std::vector<float> cellStateOutValue {-0.145439f, 0.157475f, 0.293663f, -0.277353f};
834 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
835 // effectively the same as the current “output state (out)” value.
836 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
837 std::vector<float> outputValue {-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f};
838
839 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
840 inputToInputWeightsDimensions, inputToInputWeightsValue,
841 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
842 inputToCellWeightsDimensions, inputToCellWeightsValue,
843 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
844 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
845 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
846 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
847 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
848 cellToInputWeightsDimensions, cellToInputWeightsValue,
849 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
850 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
851 inputGateBiasDimensions, inputGateBiasValue,
852 forgetGateBiasDimensions, forgetGateBiasValue,
853 cellBiasDimensions, cellBiasValue,
854 outputGateBiasDimensions, outputGateBiasValue,
855 projectionWeightsDimensions, projectionWeightsValue,
856 projectionBiasDimensions, projectionBiasValue,
857 outputStateInDimensions, outputStateInValue,
858 cellStateInDimensions, cellStateInValue,
859 activationFunctionDimensions, activationFunctionValue,
860 cellClippingThresholdDimensions, cellClippingThresholdValue,
861 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
862 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
863 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
864 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
865 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
866 scratchBufferDimensions, scratchBufferValue,
867 outputStateOutDimensions, outputStateOutValue,
868 cellStateOutDimensions, cellStateOutValue,
869 outputDimensions, outputValue,
870 compute);
871 }
872
873 template <typename HalPolicy>
LstmCifgPeepholeNoProjection(armnn::Compute compute)874 void LstmCifgPeepholeNoProjection(armnn::Compute compute)
875 {
876 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm2.model.cpp
877 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm2.example.cpp
878 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
879
880 uint32_t batchSize = 1;
881 uint32_t inputSize = 2;
882 uint32_t numUnits = 4;
883 uint32_t outputSize = numUnits;
884
885 // Inputs:
886 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
887 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
888 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
889 std::vector<float> inputValue{2.0f, 3.0f};
890
891 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
892 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
893 hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
894 std::vector<float> inputToInputWeightsValue;
895 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
896 // [num_units, input_size].
897 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
898 std::vector<float> inputToForgetWeightsValue{-0.55291498f, -0.42866567f,
899 0.13056988f, -0.36333650f,
900 -0.22755712f, 0.28253698f,
901 0.24407166f, 0.33826375f};
902 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
903 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
904 std::vector<float> inputToCellWeightsValue{-0.49770179f, -0.27711356f,
905 -0.09624726f, 0.05100781f,
906 0.04717243f, 0.48944736f,
907 -0.38535351f, -0.17212132f};
908 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
909 // [num_units, input_size].
910 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
911 std::vector<float> inputToOutputWeightsValue{ 0.10725588f, -0.02335852f,
912 -0.55932593f, -0.09426838f,
913 -0.44257352f, 0.54939759f,
914 0.01533556f, 0.42751634f};
915 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
916 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
917 // “num_units”), or the second dimension of the “projection_weights”, if defined.
918 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0}; // VTS was {4, 4} -> {0} ?
919 std::vector<float> recurrentToInputWeightsValue;
920 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
921 // [num_units, output_size].
922 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
923 std::vector<float> recurrentToForgetWeightsValue{-0.13832897f, -0.05151010f, -0.23590070f, -0.16661474f,
924 -0.14340827f, 0.36986142f, 0.23414481f, 0.55899000f,
925 0.10798943f, -0.41174671f, 0.17751795f, -0.34484994f,
926 -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f};
927 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
928 // [num_units, output_size].
929 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
930 std::vector<float> recurrentToCellWeightsValue{ 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f,
931 0.42957711f, 0.01841056f, -0.32764608f, -0.33027974f,
932 -0.10826075f, 0.20675004f, 0.19069612f, -0.03026325f,
933 -0.54532051f, 0.33003211f, 0.44901288f, 0.21193194f};
934 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
935 // [num_units, output_size].
936 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
937 std::vector<float> recurrentToOutputWeightsValue{0.41613156f, 0.42610586f, -0.16495961f, -0.56638730f,
938 0.30579174f, -0.05115908f, -0.33941799f, 0.23364776f,
939 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
940 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f};
941 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
942 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
943 std::vector<float> cellToInputWeightsValue;
944 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
945 hidl_vec<uint32_t> cellToForgetWeightsDimensions{4};
946 std::vector<float> cellToForgetWeightsValue{0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
947 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
948 hidl_vec<uint32_t> cellToOutputWeightsDimensions{4};
949 std::vector<float> cellToOutputWeightsValue{-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
950 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
951 hidl_vec<uint32_t> inputGateBiasDimensions{0}; // VTS was {4} -> {0} ?
952 std::vector<float> inputGateBiasValue;
953 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
954 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
955 std::vector<float> forgetGateBiasValue(numUnits, 1.0f);
956 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
957 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
958 std::vector<float> cellBiasValue(numUnits, 0.0f);
959 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
960 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
961 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
962 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
963 // [output_size, num_units].
964 hidl_vec<uint32_t> projectionWeightsDimensions{0};
965 std::vector<float> projectionWeightsValue;
966 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
967 hidl_vec<uint32_t> projectionBiasDimensions{0};
968 std::vector<float> projectionBiasValue;
969
970 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
971 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
972 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
973 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
974 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
975 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
976
977 // Constant scalar values (the VTS test adds these as tensors of dim {})
978 // 20: The activation function: A value indicating the activation function:
979 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
980 hidl_vec<uint32_t> activationFunctionDimensions{};
981 std::vector<int32_t> activationFunctionValue{4};
982 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
983 // If set to 0.0 then clipping is disabled.
984 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
985 std::vector<float> cellClippingThresholdValue{0.0f};
986 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
987 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
988 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
989 std::vector<float> projectionClippingThresholdValue{0.0f};
990
991 // Normalization:
992 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
993 // Used to rescale normalized inputs to activation at input gate.
994 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
995 std::vector<float> inputLayerNormWeightsValue;
996 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
997 // Used to rescale normalized inputs to activation at forget gate.
998 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
999 std::vector<float> forgetLayerNormWeightsValue;
1000 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
1001 // Used to rescale normalized inputs to activation at cell gate.
1002 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
1003 std::vector<float> cellLayerNormWeightsValue;
1004 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
1005 // Used to rescale normalized inputs to activation at output gate.
1006 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
1007 std::vector<float> outputLayerNormWeightsValue;
1008
1009 // Outputs:
1010 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1011 // CIFG, or [batch_size, num_units * 3] without CIFG.
1012 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
1013 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
1014 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
1015 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
1016 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 3};
1017 std::vector<float> scratchBufferValue(batchSize * numUnits * 3, 0.0f);
1018 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1019 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
1020 std::vector<float> outputStateOutValue{-0.364445f, -0.00352185f, 0.128866f, -0.0516365f};
1021 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1022 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1023 std::vector<float> cellStateOutValue{-0.760444f, -0.0180416f, 0.182264f, -0.0649371f};
1024 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1025 // effectively the same as the current “output state (out)” value.
1026 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
1027 std::vector<float> outputValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f};
1028
1029 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
1030 inputToInputWeightsDimensions, inputToInputWeightsValue,
1031 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1032 inputToCellWeightsDimensions, inputToCellWeightsValue,
1033 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1034 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1035 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1036 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1037 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1038 cellToInputWeightsDimensions, cellToInputWeightsValue,
1039 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1040 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1041 inputGateBiasDimensions, inputGateBiasValue,
1042 forgetGateBiasDimensions, forgetGateBiasValue,
1043 cellBiasDimensions, cellBiasValue,
1044 outputGateBiasDimensions, outputGateBiasValue,
1045 projectionWeightsDimensions, projectionWeightsValue,
1046 projectionBiasDimensions, projectionBiasValue,
1047 outputStateInDimensions, outputStateInValue,
1048 cellStateInDimensions, cellStateInValue,
1049 activationFunctionDimensions, activationFunctionValue,
1050 cellClippingThresholdDimensions, cellClippingThresholdValue,
1051 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
1052 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
1053 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
1054 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
1055 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
1056 scratchBufferDimensions, scratchBufferValue,
1057 outputStateOutDimensions, outputStateOutValue,
1058 cellStateOutDimensions, cellStateOutValue,
1059 outputDimensions, outputValue,
1060 compute);
1061 }
1062
1063 template <typename HalPolicy>
LstmNoCifgPeepholeProjection(armnn::Compute compute)1064 void LstmNoCifgPeepholeProjection(armnn::Compute compute)
1065 {
1066 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm3.model.cpp
1067 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm3.example.cpp
1068 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
1069
1070 uint32_t batchSize = 2;
1071 uint32_t inputSize = 5;
1072 uint32_t numUnits = 20;
1073 uint32_t outputSize = 16;
1074
1075 // Inputs:
1076 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1077 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
1078 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
1079 std::vector<float> inputValue{0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1080 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f};
1081
1082 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1083 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
1084 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
1085 std::vector<float> inputToInputWeightsValue
1086 {
1087 0.0213936830f, 0.0612455100f, 0.0469051670f, -0.0146576770f, -0.0314946300f,
1088 0.0917180300f, 0.1464780100f, 0.1079719300f, -0.0057968358f, 0.0019193048f,
1089 -0.2726754000f, 0.1015402900f, -0.0185398850f, 0.0803498850f, -0.1026238500f,
1090 -0.0225997870f, -0.0912115500f, -0.0086759670f, -0.0452061030f, -0.0821282000f,
1091 -0.0080459520f, 0.0154780810f, 0.0552172470f, 0.0387195870f, 0.0441536270f,
1092 -0.0645324300f, 0.0503182500f, -0.0469351080f, -0.0081644309f, 0.0145742260f,
1093 -0.1671009000f, -0.1551955200f, -0.1681979700f, -0.1397126900f, -0.1195305900f,
1094 0.2500548700f, -0.2279098300f, 0.0098550870f, -0.0281409580f, -0.1120069800f,
1095 0.1129540800f, -0.0035217577f, 0.0544850750f, 0.0518469500f, 0.0647112060f,
1096 0.1098919300f, 0.1167478600f, 0.0349060700f, 0.0772735700f, 0.1139058500f,
1097 -0.1863375000f, -0.1034451000f, -0.1394518900f, -0.0494012270f, -0.1876706300f,
1098 0.0424839030f, 0.1423355200f, 0.1383258100f, 0.1835016500f, 0.1454560300f,
1099 -0.0285457040f, 0.0249395310f, 0.0509297180f, 0.0076203286f, -0.0029723682f,
1100 -0.0424842240f, -0.1182759600f, -0.0917110400f, -0.1080862800f, -0.1632798800f,
1101 -0.2273378000f, -0.0993647000f, -0.0171551070f, 0.0023917493f, 0.0492727640f,
1102 0.0038534778f, 0.0547645050f, 0.0897537840f, 0.0694723400f, 0.0801447600f,
1103 -0.0454423400f, -0.0497073000f, -0.0713563100f, -0.0489291060f, -0.0040420120f,
1104 -0.0092840260f, 0.0180420540f, 0.0036860977f, -0.0742730200f, -0.1143460400f,
1105 -0.0189954560f, 0.0314875430f, 0.0128349080f, 0.0199777540f, 0.0442566540f,
1106 -0.3929261300f, -0.1851933400f, -0.1165128100f, -0.0680989200f, 0.0113736770f
1107 };
1108 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1109 // [num_units, input_size].
1110 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
1111 std::vector<float> inputToForgetWeightsValue
1112 {
1113 -0.0018401089f, -0.0048522370f, 0.0369842400f, 0.0141817040f, 0.0282732360f,
1114 -0.0167261940f, -0.0524975900f, -0.1020426100f, 0.0086106600f, -0.0409795050f,
1115 -0.0098991870f, 0.0192389200f, -0.0281772690f, -0.0853510300f, -0.1458549500f,
1116 0.1066256700f, -0.0190973100f, -0.0178835340f, -0.0047269356f, -0.0451033230f,
1117 0.0030784295f, 0.0767847750f, 0.0746369600f, 0.0945313950f, 0.0814421000f,
1118 -0.1225789900f, -0.0339457580f, -0.0313034650f, 0.0456306260f, 0.0684388700f,
1119 -0.1349294500f, -0.0124800070f, -0.0811829000f, -0.0722449900f, -0.0962879100f,
1120 0.0451009460f, 0.0012300825f, 0.0139646620f, 0.0993723940f, 0.0254305900f,
1121 0.0695832400f, 0.0342572960f, 0.0482646000f, 0.0626799700f, 0.0526250680f,
1122 0.1278466600f, 0.0707789700f, 0.0257259350f, 0.0416500900f, 0.0724190500f,
1123 0.0186686440f, -0.0373772940f, -0.0627778300f, -0.0883363600f, -0.0401206050f,
1124 -0.0114055860f, -0.0078083350f, -0.0103013860f, -0.0051021670f, 0.0277174640f,
1125 0.0548342300f, 0.1144911100f, 0.1128965200f, 0.1093983900f, 0.1339650600f,
1126 -0.0840216600f, -0.0190146200f, -0.0446783040f, -0.0772056500f, 0.0143500630f,
1127 -0.1175795800f, -0.0652038000f, -0.0818573300f, -0.0767543240f, -0.0926143750f,
1128 0.1040549100f, 0.0529603360f, 0.0357558950f, 0.0358393860f, -0.0125405530f,
1129 0.0368812980f, 0.0291337600f, 0.0342015900f, 0.0544844700f, -0.0545233530f,
1130 0.0258271500f, 0.0232735500f, -0.0118571790f, -0.0011980024f, -0.0346417170f,
1131 -0.0261250940f, -0.1758261500f, -0.1592365700f, -0.2748677400f, -0.0006143371f,
1132 0.0001771948f, -8.470171e-05f, 0.0265180700f, 0.0457907650f, 0.069564960f
1133 };
1134 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
1135 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
1136 std::vector<float> inputToCellWeightsValue
1137 {
1138 -0.0458028300f, -0.0954946200f, -0.0324189850f, -0.0645463300f, -0.0435284530f,
1139 0.0430185870f, -0.0491523440f, -0.1241814400f, -0.0789854750f, -0.0759688900f,
1140 0.0194843620f, -0.1143496200f, -0.0074034138f, -0.0631484400f, -0.0929814950f,
1141 0.0062155537f, -0.0250343380f, -0.0028890965f, 0.0489295270f, 0.0623507500f,
1142 0.1066591800f, -0.0320367920f, -0.0850591600f, -0.1084335800f, -0.1300243300f,
1143 -0.0368164370f, -0.0213013400f, -0.0165182390f, 0.0047691227f, -0.0025825808f,
1144 0.0660178660f, 0.0299915340f, -0.1065283600f, -0.1037554000f, -0.1305607100f,
1145 -0.0326664300f, -0.0337024140f, -0.0064734240f, -0.0461169200f, 0.0144193390f,
1146 -0.0251743230f, 0.0396852000f, 0.0817775060f, 0.0615746800f, 0.1021009500f,
1147 -0.0096581940f, 0.0465117170f, 0.0360390600f, 0.0069369148f, 0.0159600950f,
1148 -0.0650766600f, 0.0955159800f, 0.0535688360f, 0.0640871400f, 0.1283566700f,
1149 -0.0087143290f, -0.2021196600f, -0.1209367400f, 0.0294504720f, 0.2849013000f,
1150 -0.0292279010f, 0.1164364000f, -0.0856026300f, 0.0994178600f, -0.0369995650f,
1151 -0.0288426260f, -0.0033637602f, -0.0170129020f, -0.0972086500f, -0.1119335100f,
1152 -0.0291551170f, -0.0179360340f, -0.0097689360f, -0.0422332400f, -0.0361596350f,
1153 0.0650511200f, -0.0217428920f, -0.0233772120f, -0.0722136400f, -0.0643055200f,
1154 0.0545386500f, 0.0911498140f, 0.0638733100f, 0.0075183930f, 0.0559609530f,
1155 0.0697793440f, 0.0464111680f, 0.1050991100f, 0.0746389400f, 0.0075130584f,
1156 0.0128509820f, 0.0455543100f, 0.0569556880f, 0.0655528500f, 0.0508014560f,
1157 -0.0098626830f, 0.0082677200f, -0.0265556090f, -0.0073611983f, -0.0014897042f
1158 };
1159 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1160 // [num_units, input_size].
1161 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
1162 std::vector<float> inputToOutputWeightsValue
1163 {
1164 -0.0998932000f, -0.0720195600f, -0.0528037730f, -0.1562959300f, -0.1500191800f,
1165 -0.0765075100f, 0.0235985500f, -0.0751553550f, -0.0803770900f, -0.1509353400f,
1166 0.0295175520f, -0.0475139300f, 0.0103505310f, -0.0266485100f, -0.0168397220f,
1167 -0.0231211630f, 0.0077019283f, 0.0128512570f, -0.0504064900f, -0.0129761000f,
1168 -0.0217377470f, -0.0383057930f, -0.0687058600f, -0.0148124700f, -0.0012853940f,
1169 0.1012423600f, 0.0831228350f, 0.0533130060f, -0.0622356460f, -0.0756371540f,
1170 -0.0278339030f, 0.0297749710f, 0.1130802000f, 0.0921890600f, 0.0950613500f,
1171 -0.0866657640f, -0.0371627060f, -0.0388809140f, -0.0358328450f, -0.0144815640f,
1172 -0.0982500300f, -0.1204856900f, -0.0976655860f, -0.0528763300f, -0.0964047000f,
1173 -0.1136642900f, 0.0357775050f, 0.1356881900f, 0.0524513830f, 0.0506493040f,
1174 0.0579895100f, -0.0218523350f, -0.0998488440f, 0.0147404750f, -0.0788979460f,
1175 0.0497469900f, 0.0141604730f, 0.0697393200f, 0.0496494200f, 0.0333646460f,
1176 0.0819012400f, 0.0255353670f, 0.0508931650f, 0.0485142540f, 0.0694581300f,
1177 -0.0789075640f, -0.0670761600f, -0.1184450800f, -0.0998668800f, -0.0750940300f,
1178 0.0626322600f, 0.1492558700f, 0.2018843600f, 0.1209845100f, 0.1463941500f,
1179 0.0015017595f, -0.0142673820f, -0.0341725700f, 0.0127114680f, 0.0028300495f,
1180 -0.0247584820f, -0.0509854800f, -0.0821182000f, 0.0142256720f, 0.0215441580f,
1181 0.0894972500f, 0.0750526800f, -0.0020780868f, 0.0490825800f, 0.0647629500f,
1182 -0.0229070630f, 0.0275624560f, 0.0401857350f, 0.0195675770f, -0.0155987390f,
1183 -0.0490973030f, -0.0171218660f, -0.0833682340f, -0.0233200200f, -0.084095600f
1184 };
1185 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1186 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1187 // “num_units”), or the second dimension of the “projection_weights”, if defined.
1188 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
1189 std::vector<float> recurrentToInputWeightsValue
1190 {
1191 -0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f, // 00
1192 -0.11585556f, 0.02557986f, -0.13446963f, -0.035785314f,
1193 -0.01244275f, 0.025961924f, -0.02337298f, -0.044228926f,
1194 -0.055839065f, -0.046598054f, -0.010546039f, -0.06900766f,
1195 0.027239809f, 0.022582639f, -0.013296484f, -0.05459212f, // 01
1196 0.08981f, -0.045407712f, 0.08682226f, -0.06867011f,
1197 -0.14390695f, -0.02916037f, 0.000996957f, 0.091420636f,
1198 0.14283475f, -0.07390571f, -0.06402044f, 0.062524505f,
1199 -0.093129106f, 0.04860203f, -0.08364217f, -0.08119002f, // 02
1200 0.009352075f, 0.22920375f, 0.0016303885f, 0.11583097f,
1201 -0.13732095f, 0.012405723f, -0.07551853f, 0.06343048f,
1202 0.12162708f, -0.031923793f, -0.014335606f, 0.01790974f,
1203 -0.10650317f, -0.0724401f, 0.08554849f, -0.05727212f, // 03
1204 0.06556731f, -0.042729504f, -0.043227166f, 0.011683251f,
1205 -0.013082158f, -0.029302018f, -0.010899579f, -0.062036745f,
1206 -0.022509435f, -0.00964907f, -0.01567329f, 0.04260106f,
1207 -0.07787477f, -0.11576462f, 0.017356863f, 0.048673786f, // 04
1208 -0.017577527f, -0.05527947f, -0.082487635f, -0.040137455f,
1209 -0.10820036f, -0.04666372f, 0.022746278f, -0.07851417f,
1210 0.01068115f, 0.032956902f, 0.022433773f, 0.0026891115f,
1211 0.08944216f, -0.0685835f, 0.010513544f, 0.07228705f, // 05
1212 0.02032331f, -0.059686817f, -0.0005566496f, -0.086984694f,
1213 0.040414046f, -0.1380399f, 0.094208956f, -0.05722982f,
1214 0.012092817f, -0.04989123f, -0.086576f, -0.003399834f,
1215 -0.04696032f, -0.045747425f, 0.10091314f, 0.048676282f, // 06
1216 -0.029037097f, 0.031399418f, -0.0040285117f, 0.047237843f,
1217 0.09504992f, 0.041799378f, -0.049185462f, -0.031518843f,
1218 -0.10516937f, 0.026374253f, 0.10058866f, -0.0033195973f,
1219 -0.041975245f, 0.0073591834f, 0.0033782164f, -0.004325073f, // 07
1220 -0.10167381f, 0.042500053f, -0.01447153f, 0.06464186f,
1221 -0.017142897f, 0.03312627f, 0.009205989f, 0.024138335f,
1222 -0.011337001f, 0.035530265f, -0.010912711f, 0.0706555f,
1223 -0.005894094f, 0.051841937f, -0.1401738f, -0.02351249f, // 08
1224 0.0365468f, 0.07590991f, 0.08838724f, 0.021681072f,
1225 -0.10086113f, 0.019608743f, -0.06195883f, 0.077335775f,
1226 0.023646897f, -0.095322326f, 0.02233014f, 0.09756986f,
1227 -0.048691444f, -0.009579111f, 0.07595467f, 0.11480546f, // 09
1228 -0.09801813f, 0.019894179f, 0.08502348f, 0.004032281f,
1229 0.037211012f, 0.068537936f, -0.048005626f, -0.091520436f,
1230 -0.028379958f, -0.01556313f, 0.06554592f, -0.045599163f,
1231 -0.01672207f, -0.020169014f, -0.011877351f, -0.20212261f, // 10
1232 0.010889619f, 0.0047078193f, 0.038385306f, 0.08540671f,
1233 -0.017140968f, -0.0035865551f, 0.016678626f, 0.005633034f,
1234 0.015963363f, 0.00871737f, 0.060130805f, 0.028611384f,
1235 0.10109069f, -0.015060172f, -0.07894427f, 0.06401885f, // 11
1236 0.011584063f, -0.024466386f, 0.0047652307f, -0.09041358f,
1237 0.030737216f, -0.0046374933f, 0.14215417f, -0.11823516f,
1238 0.019899689f, 0.006106124f, -0.027092824f, 0.0786356f,
1239 0.05052217f, -0.058925f, -0.011402121f, -0.024987547f, // 12
1240 -0.0013661642f, -0.06832946f, -0.015667673f, -0.1083353f,
1241 -0.00096863037f, -0.06988685f, -0.053350925f, -0.027275559f,
1242 -0.033664223f, -0.07978348f, -0.025200296f, -0.017207067f,
1243 -0.058403496f, -0.055697463f, 0.005798788f, 0.12965427f, // 13
1244 -0.062582195f, 0.0013350133f, -0.10482091f, 0.0379771f,
1245 0.072521195f, -0.0029455067f, -0.13797039f, -0.03628521f,
1246 0.013806405f, -0.017858358f, -0.01008298f, -0.07700066f,
1247 -0.017081132f, 0.019358726f, 0.0027079724f, 0.004635139f, // 14
1248 0.062634714f, -0.02338735f, -0.039547626f, -0.02050681f,
1249 0.03385117f, -0.083611414f, 0.002862572f, -0.09421313f,
1250 0.058618143f, -0.08598433f, 0.00972939f, 0.023867095f,
1251 -0.053934585f, -0.023203006f, 0.07452513f, -0.048767887f, // 15
1252 -0.07314807f, -0.056307215f, -0.10433547f, -0.06440842f,
1253 0.04328182f, 0.04389765f, -0.020006588f, -0.09076438f,
1254 -0.11652589f, -0.021705797f, 0.03345259f, -0.010329105f,
1255 -0.025767034f, 0.013057034f, -0.07316461f, -0.10145612f, // 16
1256 0.06358255f, 0.18531723f, 0.07759293f, 0.12006465f,
1257 0.1305557f, 0.058638252f, -0.03393652f, 0.09622831f,
1258 -0.16253184f, -2.4580743e-06f, 0.079869635f, -0.070196845f,
1259 -0.005644518f, 0.06857898f, -0.12598175f, -0.035084512f, // 17
1260 0.03156317f, -0.12794146f, -0.031963028f, 0.04692781f,
1261 0.030070418f, 0.0071660685f, -0.095516115f, -0.004643372f,
1262 0.040170413f, -0.062104587f, -0.0037324072f, 0.0554317f,
1263 0.08184801f, -0.019164372f, 0.06791302f, 0.034257166f, // 18
1264 -0.10307039f, 0.021943003f, 0.046745934f, 0.0790918f,
1265 -0.0265588f, -0.007824208f, 0.042546265f, -0.00977924f,
1266 -0.0002440307f, -0.017384544f, -0.017990116f, 0.12252321f,
1267 -0.014512694f, -0.08251313f, 0.08861942f, 0.13589665f, // 19
1268 0.026351685f, 0.012641483f, 0.07466548f, 0.044301085f,
1269 -0.045414884f, -0.051112458f, 0.03444247f, -0.08502782f,
1270 -0.04106223f, -0.028126027f, 0.028473156f, 0.10467447f
1271 };
1272 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1273 // [num_units, output_size].
1274 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
1275 std::vector<float> recurrentToForgetWeightsValue
1276 {
1277 -0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f, // 00
1278 0.14811787f, 0.10826372f, 0.09471067f, 0.03987225f,
1279 -0.0039523416f, 0.00030638507f, 0.053185795f, 0.10572994f,
1280 0.08414449f, -0.022036452f, -0.00066928595f, -0.09203576f,
1281 0.032950465f, -0.10985798f, -0.023809856f, 0.0021431844f, // 01
1282 -0.02196096f, -0.00326074f, 0.00058621005f, -0.074678116f,
1283 -0.06193199f, 0.055729095f, 0.03736828f, 0.020123724f,
1284 0.061878487f, -0.04729229f, 0.034919553f, -0.07585433f,
1285 -0.04421272f, -0.044019096f, 0.085488975f, 0.04058006f, // 02
1286 -0.06890133f, -0.030951202f, -0.024628663f, -0.07672815f,
1287 0.034293607f, 0.08556707f, -0.05293577f, -0.033561368f,
1288 -0.04899627f, 0.0241671f, 0.015736353f, -0.095442444f,
1289 -0.029564252f, 0.016493602f, -0.035026584f, 0.022337519f, // 03
1290 -0.026871363f, 0.004780428f, 0.0077918363f, -0.03601621f,
1291 0.016435321f, -0.03263031f, -0.09543275f, -0.047392778f,
1292 0.013454138f, 0.028934088f, 0.01685226f, -0.086110644f,
1293 -0.046250615f, -0.01847454f, 0.047608484f, 0.07339695f, // 04
1294 0.034546845f, -0.04881143f, 0.009128804f, -0.08802852f,
1295 0.03761666f, 0.008096139f, -0.014454086f, 0.014361001f,
1296 -0.023502491f, -0.0011840804f, -0.07607001f, 0.001856849f,
1297 -0.06509276f, -0.006021153f, -0.08570962f, -0.1451793f, // 05
1298 0.060212336f, 0.055259194f, 0.06974018f, 0.049454916f,
1299 -0.027794661f, -0.08077226f, -0.016179763f, 0.1169753f,
1300 0.17213494f, -0.0056326236f, -0.053934924f, -0.0124349f,
1301 -0.11520337f, 0.05409887f, 0.088759385f, 0.0019655675f, // 06
1302 0.0042065294f, 0.03881498f, 0.019844765f, 0.041858196f,
1303 -0.05695512f, 0.047233116f, 0.038937137f, -0.06542224f,
1304 0.014429736f, -0.09719407f, 0.13908425f, -0.05379757f,
1305 0.012321099f, 0.082840554f, -0.029899208f, 0.044217527f, // 07
1306 0.059855383f, 0.07711018f, -0.045319796f, 0.0948846f,
1307 -0.011724666f, -0.0033288454f, -0.033542685f, -0.04764985f,
1308 -0.13873616f, 0.040668588f, 0.034832682f, -0.015319203f,
1309 -0.018715994f, 0.046002675f, 0.0599172f, -0.043107376f, // 08
1310 0.0294216f, -0.002314414f, -0.022424703f, 0.0030315618f,
1311 0.0014641669f, 0.0029166266f, -0.11878115f, 0.013738511f,
1312 0.12375372f, -0.0006038222f, 0.029104086f, 0.087442465f,
1313 0.052958444f, 0.07558703f, 0.04817258f, 0.044462286f, // 09
1314 -0.015213451f, -0.08783778f, -0.0561384f, -0.003008196f,
1315 0.047060397f, -0.002058388f, 0.03429439f, -0.018839769f,
1316 0.024734668f, 0.024614193f, -0.042046934f, 0.09597743f,
1317 -0.0043254104f, 0.04320769f, 0.0064070094f, -0.0019131786f, // 10
1318 -0.02558259f, -0.022822596f, -0.023273505f, -0.02464396f,
1319 -0.10991725f, -0.006240552f, 0.0074488563f, 0.024044557f,
1320 0.04383914f, -0.046476185f, 0.028658995f, 0.060410924f,
1321 0.050786525f, 0.009452605f, -0.0073054377f, -0.024810238f, // 11
1322 0.0052906186f, 0.0066939713f, -0.0020913032f, 0.014515517f,
1323 0.015898481f, 0.021362653f, -0.030262267f, 0.016587038f,
1324 -0.011442813f, 0.041154444f, -0.007631438f, -0.03423484f,
1325 -0.010977775f, 0.036152758f, 0.0066366293f, 0.11915515f, // 12
1326 0.02318443f, -0.041350313f, 0.021485701f, -0.10906167f,
1327 -0.028218046f, -0.00954771f, 0.020531068f, -0.11995105f,
1328 -0.03672871f, 0.024019798f, 0.014255957f, -0.05221243f,
1329 -0.00661567f, -0.04630967f, 0.033188973f, 0.10107534f, // 13
1330 -0.014027541f, 0.030796422f, -0.10270911f, -0.035999842f,
1331 0.15443139f, 0.07684145f, 0.036571592f, -0.035900835f,
1332 -0.0034699554f, 0.06209149f, 0.015920248f, -0.031122351f,
1333 -0.03858649f, 0.01849943f, 0.13872518f, 0.01503974f, // 14
1334 0.069941424f, -0.06948533f, -0.0088794185f, 0.061282158f,
1335 -0.047401894f, 0.03100163f, -0.041533746f, -0.10430945f,
1336 0.044574402f, -0.01425562f, -0.024290353f, 0.034563623f,
1337 0.05866852f, 0.023947537f, -0.09445152f, 0.035450947f, // 15
1338 0.02247216f, -0.0042998926f, 0.061146557f, -0.10250651f,
1339 0.020881841f, -0.06747029f, 0.10062043f, -0.0023941975f,
1340 0.03532124f, -0.016341697f, 0.09685456f, -0.016764693f,
1341 0.051808182f, 0.05875331f, -0.04536488f, 0.001626336f, // 16
1342 -0.028892258f, -0.01048663f, -0.009793449f, -0.017093895f,
1343 0.010987891f, 0.02357273f, -0.00010856845f, 0.0099760275f,
1344 -0.001845119f, -0.03551521f, 0.0018358806f, 0.05763657f,
1345 -0.01769146f, 0.040995963f, 0.02235177f, -0.060430344f, // 17
1346 0.11475477f, -0.023854522f, 0.10071741f, 0.0686208f,
1347 -0.014250481f, 0.034261297f, 0.047418304f, 0.08562733f,
1348 -0.030519066f, 0.0060542435f, 0.014653856f, -0.038836084f,
1349 0.04096551f, 0.032249358f, -0.08355519f, -0.026823482f, // 18
1350 0.056386515f, -0.010401743f, -0.028396193f, 0.08507674f,
1351 0.014410365f, 0.020995233f, 0.17040324f, 0.11511526f,
1352 0.02459721f, 0.0066619175f, 0.025853224f, -0.023133837f,
1353 -0.081302024f, 0.017264642f, -0.009585969f, 0.09491168f, // 19
1354 -0.051313367f, 0.054532815f, -0.014298593f, 0.10657464f,
1355 0.007076659f, 0.10964551f, 0.0409152f, 0.008275321f,
1356 -0.07283536f, 0.07937492f, 0.04192024f, -0.1075027f
1357 };
1358 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1359 // [num_units, output_size].
1360 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
1361 std::vector<float> recurrentToCellWeightsValue
1362 {
1363 -0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f,
1364 0.055647098f, -0.05713207f, -0.05626563f, 0.005559383f,
1365 0.03375411f, -0.025757805f, -0.088049285f, 0.06017052f,
1366 -0.06570978f, 0.007384076f, 0.035123326f, -0.07920549f,
1367 0.053676967f, 0.044480428f, -0.07663568f, 0.0071805613f,
1368 0.08089997f, 0.05143358f, 0.038261272f, 0.03339287f,
1369 -0.027673481f, 0.044746667f, 0.028349208f, 0.020090483f,
1370 -0.019443132f, -0.030755889f, -0.0040000007f, 0.04465846f,
1371 -0.021585021f, 0.0031670958f, 0.0053199246f, -0.056117613f,
1372 -0.10893326f, 0.076739706f, -0.08509834f, -0.027997585f,
1373 0.037871376f, 0.01449768f, -0.09002357f, -0.06111149f,
1374 -0.046195522f, 0.0422062f, -0.005683705f, -0.1253618f,
1375 -0.012925729f, -0.04890792f, 0.06985068f, 0.037654128f,
1376 0.03398274f, -0.004781977f, 0.007032333f, -0.031787455f,
1377 0.010868644f, -0.031489216f, 0.09525667f, 0.013939797f,
1378 0.0058680447f, 0.0167067f, 0.02668468f, -0.04797466f,
1379 -0.048885044f, -0.12722108f, 0.035304096f, 0.06554885f,
1380 0.00972396f, -0.039238118f, -0.05159735f, -0.11329045f,
1381 0.1613692f, -0.03750952f, 0.06529313f, -0.071974665f,
1382 -0.11769596f, 0.015524369f, -0.0013754242f, -0.12446318f,
1383 0.02786344f, -0.014179351f, 0.005264273f, 0.14376344f,
1384 0.015983658f, 0.03406988f, -0.06939408f, 0.040699873f,
1385 0.02111075f, 0.09669095f, 0.041345075f, -0.08316494f,
1386 -0.07684199f, -0.045768797f, 0.032298047f, -0.041805092f,
1387 0.0119405f, 0.0061010392f, 0.12652606f, 0.0064572375f,
1388 -0.024950314f, 0.11574242f, 0.04508852f, -0.04335324f,
1389 0.06760663f, -0.027437469f, 0.07216407f, 0.06977076f,
1390 -0.05438599f, 0.034033038f, -0.028602652f, 0.05346137f,
1391 0.043184172f, -0.037189785f, 0.10420091f, 0.00882477f,
1392 -0.054019816f, -0.074273005f, -0.030617684f, -0.0028467078f,
1393 0.024302477f, -0.0038869337f, 0.005332455f, 0.0013399826f,
1394 0.04361412f, -0.007001822f, 0.09631092f, -0.06702025f,
1395 -0.042049985f, -0.035070654f, -0.04103342f, -0.10273396f,
1396 0.0544271f, 0.037184782f, -0.13150354f, -0.0058036847f,
1397 -0.008264958f, 0.042035464f, 0.05891794f, 0.029673764f,
1398 0.0063542654f, 0.044788733f, 0.054816857f, 0.062257513f,
1399 -0.00093483756f, 0.048938446f, -0.004952862f, -0.007730018f,
1400 -0.04043371f, -0.017094059f, 0.07229206f, -0.023670016f,
1401 -0.052195564f, -0.025616996f, -0.01520939f, 0.045104615f,
1402 -0.007376126f, 0.003533447f, 0.006570588f, 0.056037236f,
1403 0.12436656f, 0.051817212f, 0.028532185f, -0.08686856f,
1404 0.11868599f, 0.07663395f, -0.07323171f, 0.03463402f,
1405 -0.050708205f, -0.04458982f, -0.11590894f, 0.021273347f,
1406 0.1251325f, -0.15313013f, -0.12224372f, 0.17228661f,
1407 0.023029093f, 0.086124025f, 0.006445803f, -0.03496501f,
1408 0.028332196f, 0.04449512f, -0.042436164f, -0.026587414f,
1409 -0.006041347f, -0.09292539f, -0.05678812f, 0.03897832f,
1410 0.09465633f, 0.008115513f, -0.02171956f, 0.08304309f,
1411 0.071401566f, 0.019622514f, 0.032163795f, -0.004167056f,
1412 0.02295182f, 0.030739572f, 0.056506045f, 0.004612461f,
1413 0.06524936f, 0.059999723f, 0.046395954f, -0.0045512207f,
1414 -0.1335546f, -0.030136576f, 0.11584653f, -0.014678886f,
1415 0.0020118146f, -0.09688814f, -0.0790206f, 0.039770417f,
1416 -0.0329582f, 0.07922767f, 0.029322514f, 0.026405897f,
1417 0.04207835f, -0.07073373f, 0.063781224f, 0.0859677f,
1418 -0.10925287f, -0.07011058f, 0.048005477f, 0.03438226f,
1419 -0.09606514f, -0.006669445f, -0.043381985f, 0.04240257f,
1420 -0.06955775f, -0.06769346f, 0.043903265f, -0.026784198f,
1421 -0.017840602f, 0.024307009f, -0.040079936f, -0.019946516f,
1422 0.045318738f, -0.12233574f, 0.026170589f, 0.0074471775f,
1423 0.15978073f, 0.10185836f, 0.10298046f, -0.015476589f,
1424 -0.039390966f, -0.072174534f, 0.0739445f, -0.1211869f,
1425 -0.0347889f, -0.07943156f, 0.014809798f, -0.12412325f,
1426 -0.0030663363f, 0.039695457f, 0.0647603f, -0.08291318f,
1427 -0.018529687f, -0.004423833f, 0.0037507233f, 0.084633216f,
1428 -0.01514876f, -0.056505352f, -0.012800942f, -0.06994386f,
1429 0.012962922f, -0.031234352f, 0.07029052f, 0.016418684f,
1430 0.03618972f, 0.055686004f, -0.08663945f, -0.017404709f,
1431 -0.054761406f, 0.029065743f, 0.052404847f, 0.020238016f,
1432 0.0048197987f, -0.0214882f, 0.07078733f, 0.013016777f,
1433 0.06262858f, 0.009184685f, 0.020785125f, -0.043904778f,
1434 -0.0270329f, -0.03299152f, -0.060088247f, -0.015162964f,
1435 -0.001828936f, 0.12642565f, -0.056757294f, 0.013586685f,
1436 0.09232601f, -0.035886683f, 0.06000002f, 0.05229691f,
1437 -0.052580316f, -0.082029596f, -0.010794592f, 0.012947712f,
1438 -0.036429964f, -0.085508935f, -0.13127148f, -0.017744139f,
1439 0.031502828f, 0.036232427f, -0.031581745f, 0.023051167f,
1440 -0.05325106f, -0.03421577f, 0.028793324f, -0.034633752f,
1441 -0.009881397f, -0.043551125f, -0.018609839f, 0.0019097115f,
1442 -0.008799762f, 0.056595087f, 0.0022273948f, 0.055752404f
1443 };
1444 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1445 // [num_units, output_size].
1446 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
1447 std::vector<float> recurrentToOutputWeightsValue
1448 {
1449 0.025825322f, -0.05813119f, 0.09495884f, -0.045984812f,
1450 -0.01255415f, -0.0026479573f, -0.08196161f, -0.054914974f,
1451 -0.0046604523f, -0.029587349f, -0.044576716f, -0.07480124f,
1452 -0.082868785f, 0.023254942f, 0.027502948f, -0.0039728214f,
1453 -0.08683098f, -0.08116779f, -0.014675607f, -0.037924774f,
1454 -0.023314456f, -0.007401714f, -0.09255757f, 0.029460307f,
1455 -0.08829125f, -0.005139627f, -0.08989442f, -0.0555066f,
1456 0.13596267f, -0.025062224f, -0.048351806f, -0.03850004f,
1457 0.07266485f, -0.022414139f, 0.05940088f, 0.075114764f,
1458 0.09597592f, -0.010211725f, -0.0049794707f, -0.011523867f,
1459 -0.025980417f, 0.072999895f, 0.11091378f, -0.081685916f,
1460 0.014416728f, 0.043229222f, 0.034178585f, -0.07530371f,
1461 0.035837382f, -0.085607f, -0.007721233f, -0.03287832f,
1462 -0.043848954f, -0.06404588f, -0.06632928f, -0.073643476f,
1463 0.008214239f, -0.045984086f, 0.039764922f, 0.03474462f,
1464 0.060612556f, -0.080590084f, 0.049127717f, 0.04151091f,
1465 -0.030063879f, 0.008801774f, -0.023021035f, -0.019558564f,
1466 0.05158114f, -0.010947698f, -0.011825728f, 0.0075720972f,
1467 0.0699727f, -0.0039981045f, 0.069350146f, 0.08799282f,
1468 0.016156472f, 0.035502106f, 0.11695009f, 0.006217345f,
1469 0.13392477f, -0.037875112f, 0.025745004f, 0.08940699f,
1470 -0.00924166f, 0.0046702605f, -0.036598757f, -0.08811812f,
1471 0.10522024f, -0.032441203f, 0.008176899f, -0.04454919f,
1472 0.07058152f, 0.0067963637f, 0.039206743f, 0.03259838f,
1473 0.03725492f, -0.09515802f, 0.013326398f, -0.052055415f,
1474 -0.025676316f, 0.03198509f, -0.015951829f, -0.058556724f,
1475 0.036879618f, 0.043357447f, 0.028362012f, -0.05908629f,
1476 0.0059240665f, -0.04995891f, -0.019187413f, 0.0276265f,
1477 -0.01628143f, 0.0025863599f, 0.08800015f, 0.035250366f,
1478 -0.022165963f, -0.07328642f, -0.009415526f, -0.07455109f,
1479 0.11690406f, 0.0363299f, 0.07411125f, 0.042103454f,
1480 -0.009660886f, 0.019076364f, 0.018299393f, -0.046004917f,
1481 0.08891175f, 0.0431396f, -0.026327137f, -0.051502608f,
1482 0.08979574f, -0.051670972f, 0.04940282f, -0.07491107f,
1483 -0.021240504f, 0.022596184f, -0.034280192f, 0.060163025f,
1484 -0.058211457f, -0.051837247f, -0.01349775f, -0.04639988f,
1485 -0.035936575f, -0.011681591f, 0.064818054f, 0.0073146066f,
1486 -0.021745546f, -0.043124277f, -0.06471268f, -0.07053354f,
1487 -0.029321948f, -0.05330136f, 0.016933719f, -0.053782392f,
1488 0.13747959f, -0.1361751f, -0.11569455f, 0.0033329215f,
1489 0.05693899f, -0.053219706f, 0.063698f, 0.07977434f,
1490 -0.07924483f, 0.06936997f, 0.0034815092f, -0.007305279f,
1491 -0.037325785f, -0.07251102f, -0.033633437f, -0.08677009f,
1492 0.091591336f, -0.14165086f, 0.021752775f, 0.019683983f,
1493 0.0011612234f, -0.058154266f, 0.049996935f, 0.0288841f,
1494 -0.0024567875f, -0.14345716f, 0.010955264f, -0.10234828f,
1495 0.1183656f, -0.0010731248f, -0.023590032f, -0.072285876f,
1496 -0.0724771f, -0.026382286f, -0.0014920527f, 0.042667855f,
1497 0.0018776858f, 0.02986552f, 0.009814309f, 0.0733756f,
1498 0.12289186f, 0.018043943f, -0.0458958f, 0.049412545f,
1499 0.033632483f, 0.05495232f, 0.036686596f, -0.013781798f,
1500 -0.010036754f, 0.02576849f, -0.08307328f, 0.010112348f,
1501 0.042521734f, -0.05869831f, -0.071689695f, 0.03876447f,
1502 -0.13275425f, -0.0352966f, -0.023077697f, 0.10285965f,
1503 0.084736146f, 0.15568255f, -0.00040734606f, 0.027835453f,
1504 -0.10292561f, -0.032401145f, 0.10053256f, -0.026142767f,
1505 -0.08271222f, -0.0030240538f, -0.016368777f, 0.1070414f,
1506 0.042672627f, 0.013456989f, -0.0437609f, -0.022309763f,
1507 0.11576483f, 0.04108048f, 0.061026827f, -0.0190714f,
1508 -0.0869359f, 0.037901703f, 0.0610107f, 0.07202949f,
1509 0.01675338f, 0.086139716f, -0.08795751f, -0.014898893f,
1510 -0.023771819f, -0.01965048f, 0.007955471f, -0.043740474f,
1511 0.03346837f, -0.10549954f, 0.090567775f, 0.042013682f,
1512 -0.03176985f, 0.12569028f, -0.02421228f, -0.029526481f,
1513 0.023851605f, 0.031539805f, 0.05292009f, -0.02344001f,
1514 -0.07811758f, -0.08834428f, 0.10094801f, 0.16594367f,
1515 -0.06861939f, -0.021256343f, -0.041093912f, -0.06669611f,
1516 0.035498552f, 0.021757556f, -0.09302526f, -0.015403468f,
1517 -0.06614931f, -0.051798206f, -0.013874718f, 0.03630673f,
1518 0.010412845f, -0.08077351f, 0.046185967f, 0.0035662893f,
1519 0.03541868f, -0.094149634f, -0.034814864f, 0.003128424f,
1520 -0.020674974f, -0.03944324f, -0.008110165f, -0.11113267f,
1521 0.08484226f, 0.043586485f, 0.040582247f, 0.0968012f,
1522 -0.065249965f, -0.028036479f, 0.0050708856f, 0.0017462453f,
1523 0.0326779f, 0.041296225f, 0.09164146f, -0.047743853f,
1524 -0.015952192f, -0.034451712f, 0.084197424f, -0.05347844f,
1525 -0.11768019f, 0.085926116f, -0.08251791f, -0.045081906f,
1526 0.0948852f, 0.068401024f, 0.024856757f, 0.06978981f,
1527 -0.057309967f, -0.012775832f, -0.0032452994f, 0.01977615f,
1528 -0.041040014f, -0.024264973f, 0.063464895f, 0.05431621f
1529 };
1530 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1531 hidl_vec<uint32_t> cellToInputWeightsDimensions{numUnits};
1532 std::vector<float> cellToInputWeightsValue
1533 {
1534 0.040369894f, 0.030746894f, 0.24704495f, 0.018586371f, -0.037586458f,
1535 -0.15312155f, -0.11812848f, -0.11465643f, 0.20259799f, 0.11418174f,
1536 -0.10116027f, -0.011334949f, 0.12411352f, -0.076769054f, -0.052169047f,
1537 0.21198851f, -0.38871562f, -0.09061183f, -0.09683246f, -0.21929175f
1538 };
1539 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1540 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
1541 std::vector<float> cellToForgetWeightsValue
1542 {
1543 -0.01998659f, -0.15568835f, -0.24248174f, -0.012770197f, 0.041331276f,
1544 -0.072311886f, -0.052123554f, -0.0066330447f, -0.043891653f, 0.036225766f,
1545 -0.047248036f, 0.021479502f, 0.033189066f, 0.11952997f, -0.020432774f,
1546 0.64658105f, -0.06650122f, -0.03467612f, 0.095340036f, 0.23647355f
1547 };
1548 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1549 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
1550 std::vector<float> cellToOutputWeightsValue
1551 {
1552 0.08286371f, -0.08261836f, -0.51210177f, 0.002913762f, 0.17764764f,
1553 -0.5495371f, -0.08460716f, -0.24552552f, 0.030037103f, 0.04123544f,
1554 -0.11940523f, 0.007358328f, 0.1890978f, 0.4833202f, -0.34441817f,
1555 0.36312827f, -0.26375428f, 0.1457655f, -0.19724406f, 0.15548733f
1556 };
1557 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1558 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
1559 std::vector<float> inputGateBiasValue
1560 {
1561 0.02234832f, 0.14757581f, 0.18176508f, 0.10380666f, 0.053110216f,
1562 -0.06928846f, -0.13942584f, -0.11816189f, 0.19483899f, 0.03652339f,
1563 -0.10250295f, 0.036714908f, -0.18426876f, 0.036065217f, 0.21810818f,
1564 0.02383196f, -0.043370757f, 0.08690144f, -0.04444982f, 0.00030581196f
1565 };
1566 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1567 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
1568 std::vector<float> forgetGateBiasValue
1569 {
1570 0.035185695f, -0.042891346f, -0.03032477f, 0.23027696f, 0.11098921f,
1571 0.15378423f, 0.09263801f, 0.09790885f, 0.09508917f, 0.061199076f,
1572 0.07665568f, -0.015443159f, -0.03499149f, 0.046190713f, 0.08895977f,
1573 0.10899629f, 0.40694186f, 0.06030037f, 0.012413437f, -0.06108739f
1574 };
1575 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1576 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
1577 std::vector<float> cellBiasValue
1578 {
1579 -0.024379363f, 0.0055531194f, 0.23377132f, 0.033463873f, -0.1483596f,
1580 -0.10639995f, -0.091433935f, 0.058573797f, -0.06809782f, -0.07889636f,
1581 -0.043246906f, -0.09829136f, -0.4279842f, 0.034901652f, 0.18797937f,
1582 0.0075234566f, 0.016178843f, 0.1749513f, 0.13975595f, 0.92058027f
1583 };
1584 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1585 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
1586 std::vector<float> outputGateBiasValue
1587 {
1588 0.046159424f, -0.0012809046f, 0.03563469f, 0.12648113f, 0.027195795f,
1589 0.35373217f, -0.018957434f, 0.008907322f, -0.0762701f, 0.12018895f,
1590 0.04216877f, 0.0022856654f, 0.040952638f, 0.3147856f, 0.08225149f,
1591 -0.057416286f, -0.14995944f, -0.008040261f, 0.13208859f, 0.029760877f
1592 };
1593 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1594 // [output_size, num_units].
1595 hidl_vec<uint32_t> projectionWeightsDimensions{outputSize, numUnits};
1596 std::vector<float> projectionWeightsValue
1597 {
1598 -0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f,
1599 0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f,
1600 -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f,
1601 -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f,
1602 0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f,
1603 0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f,
1604 0.08682067f, 0.17240396f, 0.014975425f, 0.056431185f, 0.031037588f,
1605 0.16702051f, 0.0077946745f, 0.15140012f, 0.29405436f, 0.120285f,
1606 -0.188994f, -0.027265169f, 0.043389652f, -0.022061434f, 0.014777949f,
1607 -0.20203483f, 0.094781205f, 0.19100232f, 0.13987629f, -0.036132768f,
1608 -0.06426278f, -0.05108664f, 0.13221376f, 0.009441198f, -0.16715929f,
1609 0.15859416f, -0.040437475f, 0.050779544f, -0.022187516f, 0.012166504f,
1610 0.027685808f, -0.07675938f, -0.0055694645f, -0.09444123f, 0.0046453946f,
1611 0.050794356f, 0.10770313f, -0.20790008f, -0.07149004f, -0.11425117f,
1612 0.008225835f, -0.035802525f, 0.14374903f, 0.15262283f, 0.048710253f,
1613 0.1847461f, -0.007487823f, 0.11000021f, -0.09542012f, 0.22619456f,
1614 -0.029149994f, 0.08527916f, 0.009043713f, 0.0042746216f, 0.016261552f,
1615 0.022461696f, 0.12689082f, -0.043589946f, -0.12035478f, -0.08361797f,
1616 -0.050666027f, -0.1248618f, -0.1275799f, -0.071875185f, 0.07377272f,
1617 0.09944291f, -0.18897448f, -0.1593054f, -0.06526116f, -0.040107165f,
1618 -0.004618631f, -0.067624845f, -0.007576253f, 0.10727444f, 0.041546922f,
1619 -0.20424393f, 0.06907816f, 0.050412357f, 0.00724631f, 0.039827548f,
1620 0.12449835f, 0.10747581f, 0.13708383f, 0.09134148f, -0.12617786f,
1621 -0.06428341f, 0.09956831f, 0.1208086f, -0.14676677f, -0.0727722f,
1622 0.1126304f, 0.010139365f, 0.015571211f, -0.038128063f, 0.022913318f,
1623 -0.042050496f, 0.16842307f, -0.060597885f, 0.10531834f, -0.06411776f,
1624 -0.07451711f, -0.03410368f, -0.13393489f, 0.06534304f, 0.003620307f,
1625 0.04490757f, 0.05970546f, 0.05197996f, 0.02839995f, 0.10434969f,
1626 -0.013699693f, -0.028353551f, -0.07260381f, 0.047201227f, -0.024575593f,
1627 -0.036445823f, 0.07155557f, 0.009672501f, -0.02328883f, 0.009533515f,
1628 -0.03606021f, -0.07421458f, -0.028082801f, -0.2678904f, -0.13221288f,
1629 0.18419984f, -0.13012612f, -0.014588381f, -0.035059117f, -0.04824723f,
1630 0.07830115f, -0.056184657f, 0.03277091f, 0.025466874f, 0.14494097f,
1631 -0.12522776f, -0.098633975f, -0.10766018f, -0.08317623f, 0.08594209f,
1632 0.07749552f, 0.039474737f, 0.1776665f, -0.07409566f, -0.0477268f,
1633 0.29323658f, 0.10801441f, 0.1154011f, 0.013952499f, 0.10739139f,
1634 0.10708251f, -0.051456142f, 0.0074137426f, -0.10430189f, 0.10034707f,
1635 0.045594677f, 0.0635285f, -0.0715442f, -0.089667566f, -0.10811871f,
1636 0.00026344223f, 0.08298446f, -0.009525053f, 0.006585689f, -0.24567553f,
1637 -0.09450807f, 0.09648481f, 0.026996298f, -0.06419476f, -0.04752702f,
1638 -0.11063944f, -0.23441927f, -0.17608605f, -0.052156363f, 0.067035615f,
1639 0.19271925f, -0.0032889997f, -0.043264326f, 0.09663576f, -0.057112187f,
1640 -0.10100678f, 0.0628376f, 0.04447668f, 0.017961001f, -0.10094388f,
1641 -0.10190601f, 0.18335468f, 0.10494553f, -0.052095775f, -0.0026118709f,
1642 0.10539724f, -0.04383912f, -0.042349473f, 0.08438151f, -0.1947263f,
1643 0.02251204f, 0.11216432f, -0.10307853f, 0.17351969f, -0.039091777f,
1644 0.08066188f, -0.00561982f, 0.12633002f, 0.11335965f, -0.0088127935f,
1645 -0.019777594f, 0.06864014f, -0.059751723f, 0.016233567f, -0.06894641f,
1646 -0.28651384f, -0.004228674f, 0.019708522f, -0.16305895f, -0.07468996f,
1647 -0.0855457f, 0.099339016f, -0.07580735f, -0.13775392f, 0.08434318f,
1648 0.08330512f, -0.12131499f, 0.031935584f, 0.09180414f, -0.08876437f,
1649 -0.08049874f, 0.008753825f, 0.03498998f, 0.030215185f, 0.03907079f,
1650 0.089751154f, 0.029194152f, -0.03337423f, -0.019092513f, 0.04331237f,
1651 0.04299654f, -0.036394123f, -0.12915532f, 0.09793732f, 0.07512415f,
1652 -0.11319543f, -0.032502122f, 0.15661901f, 0.07671967f, -0.005491124f,
1653 -0.19379048f, -0.218606f, 0.21448623f, 0.017840758f, 0.1416943f,
1654 -0.07051762f, 0.19488361f, 0.02664691f, -0.18104725f, -0.09334311f,
1655 0.15026465f, -0.15493552f, -0.057762887f, -0.11604192f, -0.262013f,
1656 -0.01391798f, 0.012185008f, 0.11156489f, -0.07483202f, 0.06693364f,
1657 -0.26151478f, 0.046425626f, 0.036540434f, -0.16435726f, 0.17338543f,
1658 -0.21401681f, -0.11385144f, -0.08283257f, -0.069031075f, 0.030635102f,
1659 0.010969227f, 0.11109743f, 0.010919218f, 0.027526086f, 0.13519906f,
1660 0.01891392f, -0.046839405f, -0.040167913f, 0.017953383f, -0.09700955f,
1661 0.0061885654f, -0.07000971f, 0.026893595f, -0.038844477f, 0.14543656f
1662 };
1663 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
1664 hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
1665 std::vector<float> projectionBiasValue(outputSize, 0.0f);
1666
1667 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1668 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
1669 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
1670 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1671 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
1672 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
1673
1674 // Constant scalar values (the VTS test adds these as tensors of dim {})
1675 // 20: The activation function: A value indicating the activation function:
1676 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
1677 hidl_vec<uint32_t> activationFunctionDimensions{};
1678 std::vector<int32_t> activationFunctionValue{4};
1679 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1680 // If set to 0.0 then clipping is disabled.
1681 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
1682 std::vector<float> cellClippingThresholdValue{0.0f};
1683 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1684 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
1685 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
1686 std::vector<float> projectionClippingThresholdValue{0.0f};
1687
1688 // Normalization:
1689 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
1690 // Used to rescale normalized inputs to activation at input gate.
1691 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
1692 std::vector<float> inputLayerNormWeightsValue;
1693 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
1694 // Used to rescale normalized inputs to activation at forget gate.
1695 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
1696 std::vector<float> forgetLayerNormWeightsValue;
1697 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
1698 // Used to rescale normalized inputs to activation at cell gate.
1699 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
1700 std::vector<float> cellLayerNormWeightsValue;
1701 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
1702 // Used to rescale normalized inputs to activation at output gate.
1703 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
1704 std::vector<float> outputLayerNormWeightsValue;
1705
1706 // Outputs:
1707 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1708 // CIFG, or [batch_size, num_units * 3] without CIFG.
1709 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
1710 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
1711 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
1712 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
1713 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 4};
1714 std::vector<float> scratchBufferValue(batchSize * numUnits * 4, 0.0f);
1715 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1716 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
1717 std::vector<float> outputStateOutValue
1718 {
1719 -0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835577f, -0.0211779f, 0.0283512f, -0.0114597f,
1720 0.00907307f, -0.0244004f, -0.0152191f, -0.0259063f, 0.00914318f, 0.00415119f, 0.017147f, 0.0134203f,
1721 -0.013869f, 0.0287268f, -0.00334694f, 0.00733397f, -0.0287926f, -0.0186926f, 0.0193662f, -0.0115437f,
1722 0.00422612f, -0.0345232f, 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.0216801f
1723 };
1724 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1725 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1726 std::vector<float> cellStateOutValue
1727 {
1728 -0.0531632f, -0.0118138f, 0.0870833f, 0.0347929f, -0.076144f,
1729 -0.0659219f, -0.0463811f, 0.0141307f, -0.0127706f, -0.03782f,
1730 -0.00402401f, -0.00571876f, -0.187957f, -0.0247127f, 0.0711425f,
1731 0.008244f, 0.0492649f, 0.126972f, 0.0933097f, 0.29848f,
1732 -0.0966178f, -0.114417f, 0.0387229f, 0.0453255f, -0.181286f,
1733 -0.0651251f, -0.0996879f, -0.00276995f, 0.0617558f, -0.0100728f,
1734 0.056304f, -0.077416f, -0.162858f, -0.0541251f, 0.0571202f,
1735 -0.0525331f, 0.0724297f, 0.171029f, 0.141738f, 0.295483f
1736 };
1737 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1738 // effectively the same as the current “output state (out)” value.
1739 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
1740 std::vector<float> outputValue
1741 {
1742 -0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f, -0.0211779f, 0.0283512f, -0.0114597f,
1743 0.00907307f, -0.0244004f, -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f, 0.0134203f,
1744 -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f, -0.0186926f, 0.0193662f, -0.0115437f,
1745 0.00422612f, -0.0345232f, 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f
1746 };
1747
1748 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
1749 inputToInputWeightsDimensions, inputToInputWeightsValue,
1750 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1751 inputToCellWeightsDimensions, inputToCellWeightsValue,
1752 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1753 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1754 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1755 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1756 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1757 cellToInputWeightsDimensions, cellToInputWeightsValue,
1758 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1759 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1760 inputGateBiasDimensions, inputGateBiasValue,
1761 forgetGateBiasDimensions, forgetGateBiasValue,
1762 cellBiasDimensions, cellBiasValue,
1763 outputGateBiasDimensions, outputGateBiasValue,
1764 projectionWeightsDimensions, projectionWeightsValue,
1765 projectionBiasDimensions, projectionBiasValue,
1766 outputStateInDimensions, outputStateInValue,
1767 cellStateInDimensions, cellStateInValue,
1768 activationFunctionDimensions, activationFunctionValue,
1769 cellClippingThresholdDimensions, cellClippingThresholdValue,
1770 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
1771 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
1772 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
1773 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
1774 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
1775 scratchBufferDimensions, scratchBufferValue,
1776 outputStateOutDimensions, outputStateOutValue,
1777 cellStateOutDimensions, cellStateOutValue,
1778 outputDimensions, outputValue,
1779 compute);
1780 }
1781
1782 template <typename HalPolicy>
LstmCifgPeepholeNoProjectionBatch2(armnn::Compute compute)1783 void LstmCifgPeepholeNoProjectionBatch2(armnn::Compute compute)
1784 {
1785 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm2.model.cpp
1786 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm2.example.cpp
1787 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
1788 // The batch size has been increased to 2 (it was 1 in the VTS test) with appropriate input and output values added.
1789
1790 uint32_t batchSize = 2;
1791 uint32_t inputSize = 2;
1792 uint32_t numUnits = 4;
1793 uint32_t outputSize = numUnits;
1794
1795 // Inputs:
1796 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1797 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
1798 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
1799 std::vector<float> inputValue{2.0f, 3.0f, 3.0f, 4.0f};
1800
1801 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1802 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
1803 hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
1804 std::vector<float> inputToInputWeightsValue;
1805 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1806 // [num_units, input_size].
1807 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
1808 std::vector<float> inputToForgetWeightsValue{-0.55291498f, -0.42866567f,
1809 0.13056988f, -0.36333650f,
1810 -0.22755712f, 0.28253698f,
1811 0.24407166f, 0.33826375f};
1812 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
1813 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
1814 std::vector<float> inputToCellWeightsValue{-0.49770179f, -0.27711356f,
1815 -0.09624726f, 0.05100781f,
1816 0.04717243f, 0.48944736f,
1817 -0.38535351f, -0.17212132f};
1818 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1819 // [num_units, input_size].
1820 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
1821 std::vector<float> inputToOutputWeightsValue{ 0.10725588f, -0.02335852f,
1822 -0.55932593f, -0.09426838f,
1823 -0.44257352f, 0.54939759f,
1824 0.01533556f, 0.42751634f};
1825 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1826 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1827 // “num_units”), or the second dimension of the “projection_weights”, if defined.
1828 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0}; // VTS was {4, 4} -> {0} ?
1829 std::vector<float> recurrentToInputWeightsValue;
1830 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1831 // [num_units, output_size].
1832 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
1833 std::vector<float> recurrentToForgetWeightsValue{-0.13832897f, -0.05151010f, -0.23590070f, -0.16661474f,
1834 -0.14340827f, 0.36986142f, 0.23414481f, 0.55899000f,
1835 0.10798943f, -0.41174671f, 0.17751795f, -0.34484994f,
1836 -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f};
1837 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1838 // [num_units, output_size].
1839 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
1840 std::vector<float> recurrentToCellWeightsValue{ 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f,
1841 0.42957711f, 0.01841056f, -0.32764608f, -0.33027974f,
1842 -0.10826075f, 0.20675004f, 0.19069612f, -0.03026325f,
1843 -0.54532051f, 0.33003211f, 0.44901288f, 0.21193194f};
1844 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1845 // [num_units, output_size].
1846 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
1847 std::vector<float> recurrentToOutputWeightsValue{0.41613156f, 0.42610586f, -0.16495961f, -0.56638730f,
1848 0.30579174f, -0.05115908f, -0.33941799f, 0.23364776f,
1849 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
1850 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f};
1851 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1852 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
1853 std::vector<float> cellToInputWeightsValue;
1854 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1855 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
1856 std::vector<float> cellToForgetWeightsValue{0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
1857 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1858 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
1859 std::vector<float> cellToOutputWeightsValue{-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
1860 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1861 hidl_vec<uint32_t> inputGateBiasDimensions{0}; // VTS was {4} -> {0} ?
1862 std::vector<float> inputGateBiasValue;
1863 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1864 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
1865 std::vector<float> forgetGateBiasValue{1.0f, 1.0f, 1.0f, 1.0f};
1866 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1867 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
1868 std::vector<float> cellBiasValue(numUnits, 0.0f);
1869 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1870 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
1871 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
1872 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1873 // [output_size, num_units].
1874 hidl_vec<uint32_t> projectionWeightsDimensions{0};
1875 std::vector<float> projectionWeightsValue;
1876 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
1877 hidl_vec<uint32_t> projectionBiasDimensions{0};
1878 std::vector<float> projectionBiasValue;
1879
1880 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1881 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
1882 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
1883 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1884 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
1885 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
1886
1887 // Constant scalar values (the VTS test adds these as tensors of dim {})
1888 // 20: The activation function: A value indicating the activation function:
1889 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
1890 hidl_vec<uint32_t> activationFunctionDimensions{};
1891 std::vector<int32_t> activationFunctionValue{4};
1892 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1893 // If set to 0.0 then clipping is disabled.
1894 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
1895 std::vector<float> cellClippingThresholdValue{0.0f};
1896 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1897 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
1898 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
1899 std::vector<float> projectionClippingThresholdValue{0.0f};
1900
1901 // Normalization:
1902 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
1903 // Used to rescale normalized inputs to activation at input gate.
1904 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
1905 std::vector<float> inputLayerNormWeightsValue;
1906 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
1907 // Used to rescale normalized inputs to activation at forget gate.
1908 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
1909 std::vector<float> forgetLayerNormWeightsValue;
1910 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
1911 // Used to rescale normalized inputs to activation at cell gate.
1912 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
1913 std::vector<float> cellLayerNormWeightsValue;
1914 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
1915 // Used to rescale normalized inputs to activation at output gate.
1916 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
1917 std::vector<float> outputLayerNormWeightsValue;
1918
1919 // Outputs:
1920 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1921 // CIFG, or [batch_size, num_units * 3] without CIFG.
1922 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
1923 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
1924 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
1925 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
1926 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 3};
1927 std::vector<float> scratchBufferValue(batchSize * numUnits * 3, 0.0f);
1928 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1929 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
1930 std::vector<float> outputStateOutValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1931 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f};
1932 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1933 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1934 std::vector<float> cellStateOutValue{-0.76044439f, -0.01804161f, 0.18226376f, -0.06493707f,
1935 -0.90477051f, -0.04355603f, 0.18475688f, -0.04158677f};
1936 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1937 // effectively the same as the current “output state (out)” value.
1938 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
1939 std::vector<float> outputValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1940 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f};
1941
1942 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
1943 inputToInputWeightsDimensions, inputToInputWeightsValue,
1944 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1945 inputToCellWeightsDimensions, inputToCellWeightsValue,
1946 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1947 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1948 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1949 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1950 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1951 cellToInputWeightsDimensions, cellToInputWeightsValue,
1952 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1953 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1954 inputGateBiasDimensions, inputGateBiasValue,
1955 forgetGateBiasDimensions, forgetGateBiasValue,
1956 cellBiasDimensions, cellBiasValue,
1957 outputGateBiasDimensions, outputGateBiasValue,
1958 projectionWeightsDimensions, projectionWeightsValue,
1959 projectionBiasDimensions, projectionBiasValue,
1960 outputStateInDimensions, outputStateInValue,
1961 cellStateInDimensions, cellStateInValue,
1962 activationFunctionDimensions, activationFunctionValue,
1963 cellClippingThresholdDimensions, cellClippingThresholdValue,
1964 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
1965 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
1966 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
1967 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
1968 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
1969 scratchBufferDimensions, scratchBufferValue,
1970 outputStateOutDimensions, outputStateOutValue,
1971 cellStateOutDimensions, cellStateOutValue,
1972 outputDimensions, outputValue,
1973 compute);
1974 }
1975
1976 template <typename HalPolicy>
LstmNoCifgPeepholeProjectionNoClippingLayerNorm(armnn::Compute compute)1977 void LstmNoCifgPeepholeProjectionNoClippingLayerNorm(armnn::Compute compute)
1978 {
1979 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/layer_norm_lstm.model.cpp
1980 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/layer_norm_lstm.example.cpp
1981 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
1982
1983 uint32_t batchSize = 2;
1984 uint32_t inputSize = 5;
1985 uint32_t numUnits = 4;
1986 uint32_t outputSize = 3;
1987
1988 // Inputs:
1989 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1990 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
1991 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
1992 std::vector<float> inputValue{ 0.7f, 0.8f, 0.1f, 0.2f, 0.3f, // batch 0
1993 0.3f, 0.2f, 0.9f, 0.8f, 0.1f}; // batch 1
1994
1995 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1996 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
1997 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
1998 std::vector<float> inputToInputWeightsValue{ 0.5, 0.6, 0.7, -0.8, -0.9,
1999 0.1, 0.2, 0.3, -0.4, 0.5,
2000 -0.8, 0.7, -0.6, 0.5, -0.4,
2001 -0.5, -0.4, -0.3, -0.2, -0.1};
2002 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2003 // [num_units, input_size].
2004 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
2005 std::vector<float> inputToForgetWeightsValue{-0.6, -0.1, 0.3, 0.2, 0.9,
2006 -0.5, -0.2, -0.4, 0.3, -0.8,
2007 -0.4, 0.3, -0.5, -0.4, -0.6,
2008 0.3, -0.4, -0.6, -0.5, -0.5};
2009 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
2010 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
2011 std::vector<float> inputToCellWeightsValue{-0.4, -0.3, -0.2, -0.1, -0.5,
2012 0.5, -0.2, -0.3, -0.2, -0.6,
2013 0.6, -0.1, -0.4, -0.3, -0.7,
2014 0.7, -0.9, -0.5, 0.8, 0.6};
2015 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2016 // [num_units, input_size].
2017 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
2018 std::vector<float> inputToOutputWeightsValue{-0.8, -0.4, -0.2, -0.9, -0.1,
2019 -0.7, 0.3, -0.3, -0.8, -0.2,
2020 0.6, -0.2, 0.4, -0.7, -0.3,
2021 -0.5, 0.1, 0.5, -0.6, -0.4};
2022 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2023 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
2024 // “num_units”), or the second dimension of the “projection_weights”, if defined.
2025 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
2026 std::vector<float> recurrentToInputWeightsValue{-0.2, -0.3, 0.4,
2027 0.1, -0.5, 0.9,
2028 -0.2, -0.3, -0.7,
2029 0.05, -0.2, -0.6};
2030 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2031 // [num_units, output_size].
2032 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
2033 std::vector<float> recurrentToForgetWeightsValue{-0.5, -0.3, -0.5,
2034 -0.2, 0.6, 0.4,
2035 0.9, 0.3, -0.1,
2036 0.2, 0.5, 0.2};
2037 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2038 // [num_units, output_size].
2039 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
2040 std::vector<float> recurrentToCellWeightsValue{-0.3, 0.2, 0.1,
2041 -0.3, 0.8,-0.08,
2042 -0.2, 0.3, 0.8,
2043 -0.6, -0.1, 0.2};
2044 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2045 // [num_units, output_size].
2046 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
2047 std::vector<float> recurrentToOutputWeightsValue{ 0.3, -0.1, 0.1,
2048 -0.2, -0.5, -0.7,
2049 -0.2, -0.6, -0.1,
2050 -0.4, -0.7, -0.2};
2051 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2052 hidl_vec<uint32_t> cellToInputWeightsDimensions{numUnits};
2053 std::vector<float> cellToInputWeightsValue{0.05, 0.1, 0.25, 0.15};
2054 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2055 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
2056 std::vector<float> cellToForgetWeightsValue{-0.02, -0.15, -0.25, -0.03};
2057 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2058 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
2059 std::vector<float> cellToOutputWeightsValue{0.1, -0.1, -0.5, 0.05};
2060 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2061 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
2062 std::vector<float> inputGateBiasValue{0.03, 0.15, 0.22, 0.38};
2063 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2064 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
2065 std::vector<float> forgetGateBiasValue{0.1, -0.3, -0.2, 0.1};
2066 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2067 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
2068 std::vector<float> cellBiasValue{-0.05, 0.72, 0.25, 0.08};
2069 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2070 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
2071 std::vector<float> outputGateBiasValue{0.05, -0.01, 0.2, 0.1};
2072 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2073 // [output_size, num_units].
2074 hidl_vec<uint32_t> projectionWeightsDimensions{numUnits, outputSize};
2075 std::vector<float> projectionWeightsValue{-0.1, 0.2, 0.01,
2076 -0.2, 0.1, 0.5,
2077 0.3, 0.08, 0.07,
2078 0.2, -0.4, 0.2};
2079 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
2080 hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
2081 std::vector<float> projectionBiasValue(outputSize, 0.0f);
2082 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2083 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
2084 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
2085 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2086 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
2087 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
2088
2089 // Constant scalar values (the VTS test adds these as tensors of dim {})
2090 // 20: The activation function: A value indicating the activation function:
2091 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
2092 hidl_vec<uint32_t> activationFunctionDimensions{};
2093 std::vector<int32_t> activationFunctionValue{4};
2094 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
2095 // If set to 0.0 then clipping is disabled.
2096 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
2097 std::vector<float> cellClippingThresholdValue{0.0f};
2098 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
2099 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
2100 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
2101 std::vector<float> projectionClippingThresholdValue{0.0f};
2102
2103 // Normalization:
2104 // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
2105 // Used to rescale normalized inputs to activation at input gate.
2106 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{numUnits};
2107 std::vector<float> inputLayerNormWeightsValue{0.1, 0.2, 0.3, 0.5};
2108 // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
2109 // Used to rescale normalized inputs to activation at forget gate.
2110 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{numUnits};
2111 std::vector<float> forgetLayerNormWeightsValue{0.2, 0.2, 0.4, 0.3};
2112 // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
2113 // Used to rescale normalized inputs to activation at cell gate.
2114 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{numUnits};
2115 std::vector<float> cellLayerNormWeightsValue{0.7, 0.2, 0.3, 0.8};
2116 // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
2117 // Used to rescale normalized inputs to activation at output gate.
2118 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{numUnits};
2119 std::vector<float> outputLayerNormWeightsValue{0.6, 0.2, 0.2, 0.5};
2120
2121 // Outputs:
2122 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
2123 // CIFG, or [batch_size, num_units * 3] without CIFG.
2124 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
2125 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
2126 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
2127 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
2128 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 4};
2129 std::vector<float> scratchBufferValue(batchSize * numUnits * 4, 0.0f);
2130 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2131 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
2132 std::vector<float> outputStateOutValue { 0.02440767f, 0.12802738f, -0.00170918f,
2133 -0.00692428f, 0.08487406f, 0.06344498f};
2134 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2135 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
2136 std::vector<float> cellStateOutValue {-0.45177122f, 0.37691566f, 0.22542511f, 0.23240635f,
2137 -0.25258583f, 0.33042118f, 0.01730525f, 0.36660123f};
2138 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
2139 // effectively the same as the current “output state (out)” value.
2140 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
2141 std::vector<float> outputValue{ 0.02440767f, 0.12802738f, -0.00170918f,
2142 -0.00692428f, 0.08487406f, 0.06344498f};
2143
2144 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
2145 inputToInputWeightsDimensions, inputToInputWeightsValue,
2146 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
2147 inputToCellWeightsDimensions, inputToCellWeightsValue,
2148 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
2149 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
2150 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
2151 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
2152 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
2153 cellToInputWeightsDimensions, cellToInputWeightsValue,
2154 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
2155 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
2156 inputGateBiasDimensions, inputGateBiasValue,
2157 forgetGateBiasDimensions, forgetGateBiasValue,
2158 cellBiasDimensions, cellBiasValue,
2159 outputGateBiasDimensions, outputGateBiasValue,
2160 projectionWeightsDimensions, projectionWeightsValue,
2161 projectionBiasDimensions, projectionBiasValue,
2162 outputStateInDimensions, outputStateInValue,
2163 cellStateInDimensions, cellStateInValue,
2164 activationFunctionDimensions, activationFunctionValue,
2165 cellClippingThresholdDimensions, cellClippingThresholdValue,
2166 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
2167 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
2168 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
2169 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
2170 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
2171 scratchBufferDimensions, scratchBufferValue,
2172 outputStateOutDimensions, outputStateOutValue,
2173 cellStateOutDimensions, cellStateOutValue,
2174 outputDimensions, outputValue,
2175 compute);
2176 }
2177
2178 template <typename HalPolicy>
LstmCifgPeepholeProjectionNoClippingLayerNorm(armnn::Compute compute)2179 void LstmCifgPeepholeProjectionNoClippingLayerNorm(armnn::Compute compute)
2180 {
2181 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/layer_norm_lstm.model.cpp
2182 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/layer_norm_lstm.example.cpp
2183 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
2184
2185 uint32_t batchSize = 2;
2186 uint32_t inputSize = 5;
2187 uint32_t numUnits = 4;
2188 uint32_t outputSize = 3;
2189
2190 // Inputs:
2191 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
2192 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
2193 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
2194 std::vector<float> inputValue{ 0.7f, 0.8f, 0.1f, 0.2f, 0.3f, // batch 0
2195 0.3f, 0.2f, 0.9f, 0.8f, 0.1f}; // batch 1
2196
2197 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2198 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
2199 hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
2200 std::vector<float> inputToInputWeightsValue;
2201 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2202 // [num_units, input_size].
2203 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
2204 std::vector<float> inputToForgetWeightsValue{-0.6, -0.1, 0.3, 0.2, 0.9,
2205 -0.5, -0.2, -0.4, 0.3, -0.8,
2206 -0.4, 0.3, -0.5, -0.4, -0.6,
2207 0.3, -0.4, -0.6, -0.5, -0.5};
2208 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
2209 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
2210 std::vector<float> inputToCellWeightsValue{-0.4, -0.3, -0.2, -0.1, -0.5,
2211 0.5, -0.2, -0.3, -0.2, -0.6,
2212 0.6, -0.1, -0.4, -0.3, -0.7,
2213 0.7, -0.9, -0.5, 0.8, 0.6};
2214 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2215 // [num_units, input_size].
2216 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
2217 std::vector<float> inputToOutputWeightsValue{-0.8, -0.4, -0.2, -0.9, -0.1,
2218 -0.7, 0.3, -0.3, -0.8, -0.2,
2219 0.6, -0.2, 0.4, -0.7, -0.3,
2220 -0.5, 0.1, 0.5, -0.6, -0.4};
2221 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2222 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
2223 // “num_units”), or the second dimension of the “projection_weights”, if defined.
2224 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0};
2225 std::vector<float> recurrentToInputWeightsValue;
2226 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2227 // [num_units, output_size].
2228 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
2229 std::vector<float> recurrentToForgetWeightsValue{-0.5, -0.3, -0.5,
2230 -0.2, 0.6, 0.4,
2231 0.9, 0.3, -0.1,
2232 0.2, 0.5, 0.2};
2233 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2234 // [num_units, output_size].
2235 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
2236 std::vector<float> recurrentToCellWeightsValue{-0.3, 0.2, 0.1,
2237 -0.3, 0.8,-0.08,
2238 -0.2, 0.3, 0.8,
2239 -0.6, -0.1, 0.2};
2240 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2241 // [num_units, output_size].
2242 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
2243 std::vector<float> recurrentToOutputWeightsValue{ 0.3, -0.1, 0.1,
2244 -0.2, -0.5, -0.7,
2245 -0.2, -0.6, -0.1,
2246 -0.4, -0.7, -0.2};
2247 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2248 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
2249 std::vector<float> cellToInputWeightsValue;
2250 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2251 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
2252 std::vector<float> cellToForgetWeightsValue{-0.02, -0.15, -0.25, -0.03};
2253 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2254 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
2255 std::vector<float> cellToOutputWeightsValue{0.1, -0.1, -0.5, 0.05};
2256 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2257 hidl_vec<uint32_t> inputGateBiasDimensions{0};
2258 std::vector<float> inputGateBiasValue;
2259 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2260 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
2261 std::vector<float> forgetGateBiasValue{0.1, -0.3, -0.2, 0.1};
2262 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2263 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
2264 std::vector<float> cellBiasValue{-0.05, 0.72, 0.25, 0.08};
2265 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2266 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
2267 std::vector<float> outputGateBiasValue{0.05, -0.01, 0.2, 0.1};
2268 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2269 // [output_size, num_units].
2270 hidl_vec<uint32_t> projectionWeightsDimensions{numUnits, outputSize};
2271 std::vector<float> projectionWeightsValue{-0.1, 0.2, 0.01,
2272 -0.2, 0.1, 0.5,
2273 0.3, 0.08, 0.07,
2274 0.2, -0.4, 0.2};
2275 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
2276 hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
2277 std::vector<float> projectionBiasValue(outputSize, 0.0f);
2278 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2279 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
2280 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
2281 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2282 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
2283 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
2284
2285 // Constant scalar values (the VTS test adds these as tensors of dim {})
2286 // 20: The activation function: A value indicating the activation function:
2287 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
2288 hidl_vec<uint32_t> activationFunctionDimensions{};
2289 std::vector<int32_t> activationFunctionValue{4};
2290 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
2291 // If set to 0.0 then clipping is disabled.
2292 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
2293 std::vector<float> cellClippingThresholdValue{0.0f};
2294 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
2295 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
2296 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
2297 std::vector<float> projectionClippingThresholdValue{0.0f};
2298
2299 // Normalization:
2300 // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
2301 // Used to rescale normalized inputs to activation at input gate.
2302 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{numUnits};
2303 std::vector<float> inputLayerNormWeightsValue{0.1, 0.2, 0.3, 0.5};
2304 // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
2305 // Used to rescale normalized inputs to activation at forget gate.
2306 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{numUnits};
2307 std::vector<float> forgetLayerNormWeightsValue{0.2, 0.2, 0.4, 0.3};
2308 // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
2309 // Used to rescale normalized inputs to activation at cell gate.
2310 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{numUnits};
2311 std::vector<float> cellLayerNormWeightsValue{0.7, 0.2, 0.3, 0.8};
2312 // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
2313 // Used to rescale normalized inputs to activation at output gate.
2314 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{numUnits};
2315 std::vector<float> outputLayerNormWeightsValue{0.6, 0.2, 0.2, 0.5};
2316
2317 // Outputs:
2318 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
2319 // CIFG, or [batch_size, num_units * 3] without CIFG.
2320 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
2321 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
2322 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
2323 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
2324 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 3};
2325 std::vector<float> scratchBufferValue(batchSize * numUnits * 3, 0.0f);
2326 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2327 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
2328 std::vector<float> outputStateOutValue { 0.02129706f, 0.14081624f, 0.01127331f,
2329 -0.02263505f, 0.09169482f, 0.07691758f};
2330 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2331 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
2332 std::vector<float> cellStateOutValue{-0.35102980f, 0.42610350f, 0.21463650f, 0.27716520f,
2333 -0.18855170f, 0.32522000f, 0.02036650f, 0.48967660f};
2334 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
2335 // effectively the same as the current “output state (out)” value.
2336 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
2337 std::vector<float> outputValue{ 0.02129706f, 0.14081624f, 0.01127331f,
2338 -0.02263505f, 0.09169482f, 0.07691758f};
2339
2340 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
2341 inputToInputWeightsDimensions, inputToInputWeightsValue,
2342 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
2343 inputToCellWeightsDimensions, inputToCellWeightsValue,
2344 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
2345 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
2346 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
2347 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
2348 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
2349 cellToInputWeightsDimensions, cellToInputWeightsValue,
2350 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
2351 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
2352 inputGateBiasDimensions, inputGateBiasValue,
2353 forgetGateBiasDimensions, forgetGateBiasValue,
2354 cellBiasDimensions, cellBiasValue,
2355 outputGateBiasDimensions, outputGateBiasValue,
2356 projectionWeightsDimensions, projectionWeightsValue,
2357 projectionBiasDimensions, projectionBiasValue,
2358 outputStateInDimensions, outputStateInValue,
2359 cellStateInDimensions, cellStateInValue,
2360 activationFunctionDimensions, activationFunctionValue,
2361 cellClippingThresholdDimensions, cellClippingThresholdValue,
2362 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
2363 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
2364 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
2365 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
2366 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
2367 scratchBufferDimensions, scratchBufferValue,
2368 outputStateOutDimensions, outputStateOutValue,
2369 cellStateOutDimensions, cellStateOutValue,
2370 outputDimensions, outputValue,
2371 compute);
2372 }
2373
2374 template <typename HalPolicy>
QuantizedLstm(armnn::Compute compute)2375 void QuantizedLstm(armnn::Compute compute)
2376 {
2377 armnn::IgnoreUnused(compute);
2378 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/quantized_lstm.model.cpp
2379 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/quantized_lstm.example.cpp
2380 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
2381
2382 uint32_t batchSize = 2;
2383 uint32_t inputSize = 2;
2384 uint32_t outputSize = 4;
2385
2386 // Inputs:
2387 // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
2388 // specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
2389 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
2390 std::vector<uint8_t> inputValue{166, 179, 50, 150};
2391
2392 // 1: The input-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2393 // [outputSize, inputSize] specifying input-to-input part of weights for fully-connected layer inside the
2394 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
2395 hidl_vec<uint32_t> inputToInputWeightsDimensions{outputSize, inputSize};
2396 std::vector<uint8_t> inputToInputWeightsValue{146, 250, 235, 171, 10, 218, 171, 108};
2397 // 2: The input-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2398 // [outputSize, inputSize] specifying input-to-forget part of weights for fully-connected layer inside the
2399 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
2400 hidl_vec<uint32_t> inputToForgetWeightsDimensions{outputSize, inputSize};
2401 std::vector<uint8_t> inputToForgetWeightsValue{24, 50, 132, 179, 158, 110, 3, 169};
2402 // 3: The input-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2403 // [outputSize, inputSize] specifying input-to-cell part of weights for fully-connected layer inside the
2404 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
2405 hidl_vec<uint32_t> inputToCellWeightsDimensions{outputSize, inputSize};
2406 std::vector<uint8_t> inputToCellWeightsValue{133, 34, 29, 49, 206, 109, 54, 183};
2407 // 4: The input-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2408 // [outputSize, inputSize] specifying input-to-output part of weights for fully-connected layer inside the
2409 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
2410 hidl_vec<uint32_t> inputToOutputWeightsDimensions{outputSize, inputSize};
2411 std::vector<uint8_t> inputToOutputWeightsValue{195, 187, 11, 99, 109, 10, 218, 48};
2412 // 5: The recurrent-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2413 // [outputSize, outputSize] specifying recurrent-to-input part of weights for fully-connected layer inside
2414 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
2415 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{outputSize, outputSize};
2416 std::vector<uint8_t> recurrentToInputWeightsValue{254, 206, 77, 168, 71, 20, 215, 6,
2417 223, 7, 118, 225, 59, 130, 174, 26};
2418 // 6: The recurrent-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2419 // [outputSize, outputSize] specifying recurrent-to-forget part of weights for fully-connected layer inside
2420 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
2421 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{outputSize, outputSize};
2422 std::vector<uint8_t> recurrentToForgetWeightsValue{137, 240, 103, 52, 68, 51, 237, 112,
2423 0, 220, 89, 23, 69, 4, 207, 253};
2424 // 7: The recurrent-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2425 // [outputSize, outputSize] specifying recurrent-to-cell part of weights for fully-connected layer inside
2426 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
2427 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{outputSize, outputSize};
2428 std::vector<uint8_t> recurrentToCellWeightsValue{172, 60, 205, 65, 14, 0, 140, 168,
2429 240, 223, 133, 56, 142, 64, 246, 216};
2430 // 8: The recurrent-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2431 // [outputSize, outputSize] specifying recurrent-to-output part of weights for fully-connected layer inside
2432 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
2433 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{outputSize, outputSize};
2434 std::vector<uint8_t> recurrentToOutputWeightsValue{106, 214, 67, 23, 59, 158, 45, 3,
2435 119, 132, 49, 205, 129, 218, 11, 98};
2436 // 9: The input gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the
2437 // bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
2438 // of input and weights scales and zeroPoint equal to 0.
2439 hidl_vec<uint32_t> inputGateBiasDimensions{outputSize};
2440 std::vector<int32_t> inputGateBiasValue{-7876, 13488, -726, 32839};
2441 // 10: The forget gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
2442 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
2443 // of input and weights scales and zeroPoint equal to 0.
2444 hidl_vec<uint32_t> forgetGateBiasDimensions{outputSize};
2445 std::vector<int32_t> forgetGateBiasValue{9206, -46884, -11693, -38724};
2446 // 11:The cell bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the bias
2447 // for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product of input
2448 // and weights scales and zeroPoint equal to 0.
2449 hidl_vec<uint32_t> cellBiasDimensions{outputSize};
2450 std::vector<int32_t> cellBiasValue{39481, 48624, 48976, -21419};
2451 // 12:The output gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
2452 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
2453 // of input and weights scales and zeroPoint equal to 0.
2454 hidl_vec<uint32_t> outputGateBiasDimensions{outputSize};
2455 std::vector<int32_t> outputGateBiasValue{-58999, -17050, -41852, -40538};
2456
2457 //13: The previous cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape
2458 // [numBatches, outputSize] specifying the cell state from the previous time step of the LSTM cell.
2459 // It is quantized using a quantization range of -2^4, 2^4 * 32767/32768.
2460 hidl_vec<uint32_t> previousCellStateInDimensions{batchSize, outputSize};
2461 std::vector<int16_t> previousCellStateInValue{876, 1034, 955, -909, 761, 1029, 796, -1036};
2462 // 14: The previous output state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2463 // [numBathes, outputSize] specifying the output of the LSTM cell from previous time-step. Tensor
2464 // is quantized with a fixed quantization range of -1, 127/128.
2465 hidl_vec<uint32_t> previousOutputInDimensions{batchSize, outputSize};
2466 std::vector<uint8_t> previousOutputInValue{136, 150, 140, 115, 135, 152, 138, 112};
2467
2468 // 0: The cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape [numBatches, outputSize]
2469 // which contains a cell state from the current time step. Tensor is quantized using a quantization range
2470 // of -2^4, 2^4 * 32767/32768.
2471 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, outputSize};
2472 std::vector<int16_t> cellStateOutValue {1485, 1177, 1373, -1023, 1019, 1355, 1097, -1235};
2473 // 1: The output: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBathes, outputSize] which
2474 // contains the output value. Tensor is quantized with a fixed quantization range of -1, 127/128.
2475 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
2476 std::vector<uint8_t> outputValue {140, 151, 146, 112, 136, 156, 142, 112};
2477
2478
2479 QuantizedLstmTestImpl<HalPolicy>(inputDimensions, inputValue,
2480 inputToInputWeightsDimensions, inputToInputWeightsValue,
2481 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
2482 inputToCellWeightsDimensions, inputToCellWeightsValue,
2483 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
2484 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
2485 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
2486 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
2487 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
2488 inputGateBiasDimensions, inputGateBiasValue,
2489 forgetGateBiasDimensions, forgetGateBiasValue,
2490 cellBiasDimensions, cellBiasValue,
2491 outputGateBiasDimensions, outputGateBiasValue,
2492 previousOutputInDimensions, previousOutputInValue,
2493 previousCellStateInDimensions, previousCellStateInValue,
2494 cellStateOutDimensions, cellStateOutValue,
2495 outputDimensions, outputValue);
2496 }
2497