1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #pragma once
7
8 #include "DriverTestHelpers.hpp"
9
10 #include <armnn/utility/IgnoreUnused.hpp>
11
12 #include <array>
13
14 using ArmnnDriver = armnn_driver::ArmnnDriver;
15 using DriverOptions = armnn_driver::DriverOptions;
16 using RequestArgument = V1_0::RequestArgument;
17
18 #ifdef ARMNN_ANDROID_S
19 #include <nnapi/Types.h>
20 #endif
21
22 using namespace driverTestHelpers;
23 using namespace android::hardware;
24
25 namespace
26 {
27
28 template<typename T>
CreateRequestArgument(const std::vector<T> & value,unsigned int poolIndex)29 RequestArgument CreateRequestArgument(const std::vector<T>& value, unsigned int poolIndex)
30 {
31 V1_0::DataLocation inputInloc = {};
32 inputInloc.poolIndex = poolIndex;
33 inputInloc.offset = 0;
34 inputInloc.length = value.size() * sizeof(T);
35 RequestArgument inputRequestArgument = {};
36 inputRequestArgument.location = inputInloc;
37 inputRequestArgument.dimensions = hidl_vec<uint32_t>{};
38 return inputRequestArgument;
39 }
40
41 // Helper function to create an OperandLifeTime::NO_VALUE for testing.
42 // To be used on optional input operands that have no values - these are valid and should be tested.
CreateNoValueLifeTime(const hidl_vec<uint32_t> & dimensions)43 V1_0::OperandLifeTime CreateNoValueLifeTime(const hidl_vec<uint32_t>& dimensions)
44 {
45 // Only create a NO_VALUE for optional operands that have no elements
46 if (dimensions.size() == 0 || dimensions[0] == 0)
47 {
48 return V1_0::OperandLifeTime::NO_VALUE;
49 }
50 return V1_0::OperandLifeTime::CONSTANT_COPY;
51 }
52
53 template<typename HalModel>
ExecuteModel(const HalModel & model,armnn_driver::ArmnnDriver & driver,const V1_0::Request & request)54 void ExecuteModel(const HalModel& model, armnn_driver::ArmnnDriver& driver, const V1_0::Request& request)
55 {
56 android::sp<V1_0::IPreparedModel> preparedModel = PrepareModel(model, driver);
57 if (preparedModel.get() != nullptr)
58 {
59 Execute(preparedModel, request);
60 }
61 }
62
63 #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
64
65 template<>
ExecuteModel(const armnn_driver::hal_1_2::HalPolicy::Model & model,armnn_driver::ArmnnDriver & driver,const V1_0::Request & request)66 void ExecuteModel<armnn_driver::hal_1_2::HalPolicy::Model>(const armnn_driver::hal_1_2::HalPolicy::Model& model,
67 armnn_driver::ArmnnDriver& driver,
68 const V1_0::Request& request)
69 {
70 android::sp<V1_2::IPreparedModel> preparedModel = PrepareModel_1_2(model, driver);
71 if (preparedModel.get() != nullptr)
72 {
73 Execute(preparedModel, request);
74 }
75 }
76
77 #endif
78
79 } // anonymous namespace
80
81 // Add our own tests here since we fail the lstm tests which Google supplies (because of non-const weights)
82 template <typename HalPolicy>
LstmTestImpl(const hidl_vec<uint32_t> & inputDimensions,const std::vector<float> & inputValue,const hidl_vec<uint32_t> & inputToInputWeightsDimensions,const std::vector<float> & inputToInputWeightsValue,const hidl_vec<uint32_t> & inputToForgetWeightsDimensions,const std::vector<float> & inputToForgetWeightsValue,const hidl_vec<uint32_t> & inputToCellWeightsDimensions,const std::vector<float> & inputToCellWeightsValue,const hidl_vec<uint32_t> & inputToOutputWeightsDimensions,const std::vector<float> & inputToOutputWeightsValue,const hidl_vec<uint32_t> & recurrentToInputWeightsDimensions,const std::vector<float> & recurrentToInputWeightsValue,const hidl_vec<uint32_t> & recurrentToForgetWeightsDimensions,const std::vector<float> & recurrentToForgetWeightsValue,const hidl_vec<uint32_t> & recurrentToCellWeightsDimensions,const std::vector<float> & recurrentToCellWeightsValue,const hidl_vec<uint32_t> & recurrentToOutputWeightsDimensions,const std::vector<float> & recurrentToOutputWeightsValue,const hidl_vec<uint32_t> & cellToInputWeightsDimensions,const std::vector<float> & cellToInputWeightsValue,const hidl_vec<uint32_t> & cellToForgetWeightsDimensions,const std::vector<float> & cellToForgetWeightsValue,const hidl_vec<uint32_t> & cellToOutputWeightsDimensions,const std::vector<float> & cellToOutputWeightsValue,const hidl_vec<uint32_t> & inputGateBiasDimensions,const std::vector<float> & inputGateBiasValue,const hidl_vec<uint32_t> & forgetGateBiasDimensions,const std::vector<float> & forgetGateBiasValue,const hidl_vec<uint32_t> & cellBiasDimensions,const std::vector<float> & cellBiasValue,const hidl_vec<uint32_t> & outputGateBiasDimensions,const std::vector<float> & outputGateBiasValue,const hidl_vec<uint32_t> & projectionWeightsDimensions,const std::vector<float> & projectionWeightsValue,const hidl_vec<uint32_t> & projectionBiasDimensions,const std::vector<float> & projectionBiasValue,const hidl_vec<uint32_t> & outputStateInDimensions,const std::vector<float> & outputStateInValue,const hidl_vec<uint32_t> & cellStateInDimensions,const std::vector<float> & cellStateInValue,const hidl_vec<uint32_t> & activationFunctionDimensions,const std::vector<int32_t> & activationFunctionValue,const hidl_vec<uint32_t> & cellClippingThresholdDimensions,const std::vector<float> & cellClippingThresholdValue,const hidl_vec<uint32_t> & projectionClippingThresholdDimensions,const std::vector<float> & projectionClippingThresholdValue,const hidl_vec<uint32_t> & inputLayerNormWeightsDimensions,const std::vector<float> & inputLayerNormWeightsValue,const hidl_vec<uint32_t> & forgetLayerNormWeightsDimensions,const std::vector<float> & forgetLayerNormWeightsValue,const hidl_vec<uint32_t> & cellLayerNormWeightsDimensions,const std::vector<float> & cellLayerNormWeightsValue,const hidl_vec<uint32_t> & outputLayerNormWeightsDimensions,const std::vector<float> & outputLayerNormWeightsValue,const hidl_vec<uint32_t> & scratchBufferDimensions,const std::vector<float> & scratchBufferValue,const hidl_vec<uint32_t> & outputStateOutDimensions,const std::vector<float> & outputStateOutValue,const hidl_vec<uint32_t> & cellStateOutDimensions,const std::vector<float> & cellStateOutValue,const hidl_vec<uint32_t> & outputDimensions,const std::vector<float> & outputValue,armnn::Compute compute)83 void LstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
84 const std::vector<float>& inputValue,
85 const hidl_vec<uint32_t>& inputToInputWeightsDimensions,
86 const std::vector<float>& inputToInputWeightsValue,
87 const hidl_vec<uint32_t>& inputToForgetWeightsDimensions,
88 const std::vector<float>& inputToForgetWeightsValue,
89 const hidl_vec<uint32_t>& inputToCellWeightsDimensions,
90 const std::vector<float>& inputToCellWeightsValue,
91 const hidl_vec<uint32_t>& inputToOutputWeightsDimensions,
92 const std::vector<float>& inputToOutputWeightsValue,
93 const hidl_vec<uint32_t>& recurrentToInputWeightsDimensions,
94 const std::vector<float>& recurrentToInputWeightsValue,
95 const hidl_vec<uint32_t>& recurrentToForgetWeightsDimensions,
96 const std::vector<float>& recurrentToForgetWeightsValue,
97 const hidl_vec<uint32_t>& recurrentToCellWeightsDimensions,
98 const std::vector<float>& recurrentToCellWeightsValue,
99 const hidl_vec<uint32_t>& recurrentToOutputWeightsDimensions,
100 const std::vector<float>& recurrentToOutputWeightsValue,
101 const hidl_vec<uint32_t>& cellToInputWeightsDimensions,
102 const std::vector<float>& cellToInputWeightsValue,
103 const hidl_vec<uint32_t>& cellToForgetWeightsDimensions,
104 const std::vector<float>& cellToForgetWeightsValue,
105 const hidl_vec<uint32_t>& cellToOutputWeightsDimensions,
106 const std::vector<float>& cellToOutputWeightsValue,
107 const hidl_vec<uint32_t>& inputGateBiasDimensions,
108 const std::vector<float>& inputGateBiasValue,
109 const hidl_vec<uint32_t>& forgetGateBiasDimensions,
110 const std::vector<float>& forgetGateBiasValue,
111 const hidl_vec<uint32_t>& cellBiasDimensions,
112 const std::vector<float>& cellBiasValue,
113 const hidl_vec<uint32_t>& outputGateBiasDimensions,
114 const std::vector<float>& outputGateBiasValue,
115 const hidl_vec<uint32_t>& projectionWeightsDimensions,
116 const std::vector<float>& projectionWeightsValue,
117 const hidl_vec<uint32_t>& projectionBiasDimensions,
118 const std::vector<float>& projectionBiasValue,
119 const hidl_vec<uint32_t>& outputStateInDimensions,
120 const std::vector<float>& outputStateInValue,
121 const hidl_vec<uint32_t>& cellStateInDimensions,
122 const std::vector<float>& cellStateInValue,
123 const hidl_vec<uint32_t>& activationFunctionDimensions,
124 const std::vector<int32_t>& activationFunctionValue,
125 const hidl_vec<uint32_t>& cellClippingThresholdDimensions,
126 const std::vector<float>& cellClippingThresholdValue,
127 const hidl_vec<uint32_t>& projectionClippingThresholdDimensions,
128 const std::vector<float>& projectionClippingThresholdValue,
129 const hidl_vec<uint32_t>& inputLayerNormWeightsDimensions,
130 const std::vector<float>& inputLayerNormWeightsValue,
131 const hidl_vec<uint32_t>& forgetLayerNormWeightsDimensions,
132 const std::vector<float>& forgetLayerNormWeightsValue,
133 const hidl_vec<uint32_t>& cellLayerNormWeightsDimensions,
134 const std::vector<float>& cellLayerNormWeightsValue,
135 const hidl_vec<uint32_t>& outputLayerNormWeightsDimensions,
136 const std::vector<float>& outputLayerNormWeightsValue,
137 const hidl_vec<uint32_t>& scratchBufferDimensions,
138 const std::vector<float>& scratchBufferValue,
139 const hidl_vec<uint32_t>& outputStateOutDimensions,
140 const std::vector<float>& outputStateOutValue,
141 const hidl_vec<uint32_t>& cellStateOutDimensions,
142 const std::vector<float>& cellStateOutValue,
143 const hidl_vec<uint32_t>& outputDimensions,
144 const std::vector<float>& outputValue,
145 armnn::Compute compute)
146 {
147 auto driver = std::make_unique<ArmnnDriver>(DriverOptions(compute));
148 using Model = typename HalPolicy::Model;
149 Model model = {};
150
151 // Inputs:
152 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
153 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
154 AddInputOperand<HalPolicy>(model, inputDimensions);
155
156 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
157 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
158 AddTensorOperand<HalPolicy>(model,
159 inputToInputWeightsDimensions,
160 inputToInputWeightsValue,
161 HalPolicy::OperandType::TENSOR_FLOAT32,
162 CreateNoValueLifeTime(inputToInputWeightsDimensions));
163 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
164 // [num_units, input_size].
165 AddTensorOperand<HalPolicy>(model, inputToForgetWeightsDimensions, inputToForgetWeightsValue);
166 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
167 // [num_units, input_size].
168 AddTensorOperand<HalPolicy>(model, inputToCellWeightsDimensions, inputToCellWeightsValue);
169 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
170 // [num_units, input_size].
171 AddTensorOperand<HalPolicy>(model, inputToOutputWeightsDimensions, inputToOutputWeightsValue);
172 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
173 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
174 // “num_units”), or the second dimension of the “projection_weights”, if defined.
175 AddTensorOperand<HalPolicy>(model,
176 recurrentToInputWeightsDimensions,
177 recurrentToInputWeightsValue,
178 HalPolicy::OperandType::TENSOR_FLOAT32,
179 CreateNoValueLifeTime(recurrentToInputWeightsDimensions));
180 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
181 // [num_units, output_size].
182 AddTensorOperand<HalPolicy>(model, recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue);
183 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
184 // [num_units, output_size].
185 AddTensorOperand<HalPolicy>(model, recurrentToCellWeightsDimensions, recurrentToCellWeightsValue);
186 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
187 // [num_units, output_size].
188 AddTensorOperand<HalPolicy>(model, recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue);
189 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
190 AddTensorOperand<HalPolicy>(model,
191 cellToInputWeightsDimensions,
192 cellToInputWeightsValue,
193 HalPolicy::OperandType::TENSOR_FLOAT32,
194 CreateNoValueLifeTime(cellToInputWeightsDimensions));
195 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
196 AddTensorOperand<HalPolicy>(model,
197 cellToForgetWeightsDimensions,
198 cellToForgetWeightsValue,
199 HalPolicy::OperandType::TENSOR_FLOAT32,
200 CreateNoValueLifeTime(cellToForgetWeightsDimensions));
201 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
202 AddTensorOperand<HalPolicy>(model,
203 cellToOutputWeightsDimensions,
204 cellToOutputWeightsValue,
205 HalPolicy::OperandType::TENSOR_FLOAT32,
206 CreateNoValueLifeTime(cellToOutputWeightsDimensions));
207 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
208 AddTensorOperand<HalPolicy>(model,
209 inputGateBiasDimensions,
210 inputGateBiasValue,
211 HalPolicy::OperandType::TENSOR_FLOAT32,
212 CreateNoValueLifeTime(inputGateBiasDimensions));
213 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
214 AddTensorOperand<HalPolicy>(model, forgetGateBiasDimensions, forgetGateBiasValue);
215 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
216 AddTensorOperand<HalPolicy>(model, cellBiasDimensions, cellBiasValue);
217 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
218 AddTensorOperand<HalPolicy>(model, outputGateBiasDimensions, outputGateBiasValue);
219 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
220 // [output_size, num_units].
221 AddTensorOperand<HalPolicy>(model,
222 projectionWeightsDimensions,
223 projectionWeightsValue,
224 HalPolicy::OperandType::TENSOR_FLOAT32,
225 CreateNoValueLifeTime(projectionWeightsDimensions));
226 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
227 AddTensorOperand<HalPolicy>(model,
228 projectionBiasDimensions,
229 projectionBiasValue,
230 HalPolicy::OperandType::TENSOR_FLOAT32,
231 CreateNoValueLifeTime(projectionBiasDimensions));
232
233 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
234 AddInputOperand<HalPolicy>(model, outputStateInDimensions);
235 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
236 AddInputOperand<HalPolicy>(model, cellStateInDimensions);
237
238 // Constant scalar values (the VTS test adds these as tensors of dim {})
239 // 20: The activation function: A value indicating the activation function:
240 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
241 AddTensorOperand<HalPolicy>(model,
242 activationFunctionDimensions,
243 activationFunctionValue,
244 HalPolicy::OperandType::INT32);
245 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
246 // If set to 0.0 then clipping is disabled.
247 AddTensorOperand<HalPolicy>(model,
248 cellClippingThresholdDimensions,
249 cellClippingThresholdValue,
250 HalPolicy::OperandType::FLOAT32);
251 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
252 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
253 AddTensorOperand<HalPolicy>(model,
254 projectionClippingThresholdDimensions,
255 projectionClippingThresholdValue,
256 HalPolicy::OperandType::FLOAT32);
257
258 bool normalizationEnabled = false;
259
260 // If any of the tensors have a value all normalization tensors are set
261 if (!inputLayerNormWeightsValue.empty() ||
262 !forgetLayerNormWeightsValue.empty() ||
263 !cellLayerNormWeightsValue.empty() ||
264 !outputLayerNormWeightsValue.empty())
265 {
266 // Normalization:
267 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
268 // Used to rescale normalized inputs to activation at input gate.
269 AddTensorOperand<HalPolicy>(model,
270 inputLayerNormWeightsDimensions,
271 inputLayerNormWeightsValue,
272 HalPolicy::OperandType::TENSOR_FLOAT32,
273 CreateNoValueLifeTime(inputLayerNormWeightsDimensions));
274 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
275 // Used to rescale normalized inputs to activation at forget gate.
276 AddTensorOperand<HalPolicy>(model,
277 forgetLayerNormWeightsDimensions,
278 forgetLayerNormWeightsValue,
279 HalPolicy::OperandType::TENSOR_FLOAT32,
280 CreateNoValueLifeTime(forgetLayerNormWeightsDimensions));
281 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
282 // Used to rescale normalized inputs to activation at cell gate.
283 AddTensorOperand<HalPolicy>(model,
284 cellLayerNormWeightsDimensions,
285 cellLayerNormWeightsValue,
286 HalPolicy::OperandType::TENSOR_FLOAT32,
287 CreateNoValueLifeTime(cellLayerNormWeightsDimensions));
288 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
289 // Used to rescale normalized inputs to activation at output gate.
290 AddTensorOperand<HalPolicy>(model,
291 outputLayerNormWeightsDimensions,
292 outputLayerNormWeightsValue,
293 HalPolicy::OperandType::TENSOR_FLOAT32,
294 CreateNoValueLifeTime(outputLayerNormWeightsDimensions));
295
296 normalizationEnabled = true;
297 }
298
299 // Outputs:
300 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
301 // CIFG, or [batch_size, num_units * 3] without CIFG.
302 AddOutputOperand<HalPolicy>(model, scratchBufferDimensions);
303 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
304 AddOutputOperand<HalPolicy>(model, outputStateOutDimensions);
305 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
306 AddOutputOperand<HalPolicy>(model, cellStateOutDimensions);
307 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
308 // effectively the same as the current “output state (out)” value.
309 AddOutputOperand<HalPolicy>(model, outputDimensions);
310
311 // make the lstm operation
312 model.operations.resize(1);
313 model.operations[0].type = HalPolicy::OperationType::LSTM;
314
315 if (normalizationEnabled)
316 {
317 model.operations[0].inputs = hidl_vec<uint32_t> { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
318 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26};
319 model.operations[0].outputs = hidl_vec<uint32_t> {27, 28, 29, 30};
320 }
321 else
322 {
323 model.operations[0].inputs = hidl_vec<uint32_t> { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
324 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22};
325 model.operations[0].outputs = hidl_vec<uint32_t> {23, 24, 25, 26};
326 }
327
328 // define the input values
329 hidl_vec<RequestArgument> inputArguments;
330 inputArguments.resize(3);
331
332 inputArguments[0] = CreateRequestArgument<float>(inputValue, 0);
333 inputArguments[1] = CreateRequestArgument<float>(outputStateInValue, 1);
334 inputArguments[2] = CreateRequestArgument<float>(cellStateInValue, 2);
335
336 // define the expected output values
337 hidl_vec<RequestArgument> outputArguments;
338 outputArguments.resize(4);
339
340 outputArguments[0] = CreateRequestArgument<float>(scratchBufferValue, 3);
341 outputArguments[1] = CreateRequestArgument<float>(outputStateOutValue, 4);
342 outputArguments[2] = CreateRequestArgument<float>(cellStateOutValue, 5);
343 outputArguments[3] = CreateRequestArgument<float>(outputValue, 6);
344
345 V1_0::Request request = {};
346 request.inputs = inputArguments;
347 request.outputs = outputArguments;
348
349 // set the input data
350 AddPoolAndSetData(inputValue.size(), request, inputValue.data());
351 AddPoolAndSetData(outputStateInValue.size(), request, outputStateInValue.data());
352 AddPoolAndSetData(cellStateInValue.size(), request, cellStateInValue.data());
353
354 // add memory for the outputs
355 AddPoolAndGetData<float>(scratchBufferValue.size(), request);
356 android::sp<IMemory> outputStateOutMemory = AddPoolAndGetData<float>(outputStateOutValue.size(), request);
357 float* outputStateOutData = static_cast<float*>(static_cast<void*>(outputStateOutMemory->getPointer()));
358 android::sp<IMemory> cellStateOutMemory = AddPoolAndGetData<float>(cellStateOutValue.size(), request);
359 float* cellStateOutData = static_cast<float*>(static_cast<void*>(cellStateOutMemory->getPointer()));
360 android::sp<IMemory> outputMemory = AddPoolAndGetData<float>(outputValue.size(), request);
361 float* outputData = static_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
362
363 // make the prepared model and run the execution
364 ExecuteModel(model, *driver, request);
365
366 // check the results
367 for (size_t i = 0; i < outputStateOutValue.size(); ++i)
368 {
369 DOCTEST_CHECK_MESSAGE(outputStateOutValue[i] == doctest::Approx( outputStateOutData[i] ),
370 "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != "
371 << outputStateOutData[i]);
372 }
373 for (size_t i = 0; i < cellStateOutValue.size(); ++i)
374 {
375 DOCTEST_CHECK_MESSAGE(cellStateOutValue[i] == doctest::Approx( cellStateOutData[i] ),
376 "cellStateOutValue[" << i << "]: " << cellStateOutValue[i] << " != "
377 << cellStateOutData[i]);
378 }
379 for (size_t i = 0; i < outputValue.size(); ++i)
380 {
381 DOCTEST_CHECK_MESSAGE(outputValue[i] == doctest::Approx( outputData[i] ),
382 "outputValue[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
383 }
384 }
385
386 template <typename HalPolicy>
QuantizedLstmTestImpl(const hidl_vec<uint32_t> & inputDimensions,const std::vector<uint8_t> & inputValue,const hidl_vec<uint32_t> & inputToInputWeightsDimensions,const std::vector<uint8_t> & inputToInputWeightsValue,const hidl_vec<uint32_t> & inputToForgetWeightsDimensions,const std::vector<uint8_t> & inputToForgetWeightsValue,const hidl_vec<uint32_t> & inputToCellWeightsDimensions,const std::vector<uint8_t> & inputToCellWeightsValue,const hidl_vec<uint32_t> & inputToOutputWeightsDimensions,const std::vector<uint8_t> & inputToOutputWeightsValue,const hidl_vec<uint32_t> & recurrentToInputWeightsDimensions,const std::vector<uint8_t> & recurrentToInputWeightsValue,const hidl_vec<uint32_t> & recurrentToForgetWeightsDimensions,const std::vector<uint8_t> & recurrentToForgetWeightsValue,const hidl_vec<uint32_t> & recurrentToCellWeightsDimensions,const std::vector<uint8_t> & recurrentToCellWeightsValue,const hidl_vec<uint32_t> & recurrentToOutputWeightsDimensions,const std::vector<uint8_t> & recurrentToOutputWeightsValue,const hidl_vec<uint32_t> & inputGateBiasDimensions,const std::vector<int32_t> & inputGateBiasValue,const hidl_vec<uint32_t> & forgetGateBiasDimensions,const std::vector<int32_t> & forgetGateBiasValue,const hidl_vec<uint32_t> & cellBiasDimensions,const std::vector<int32_t> & cellBiasValue,const hidl_vec<uint32_t> & outputGateBiasDimensions,const std::vector<int32_t> & outputGateBiasValue,const hidl_vec<uint32_t> & previousOutputInDimensions,const std::vector<uint8_t> & previousOutputInValue,const hidl_vec<uint32_t> & previousCellStateInDimensions,const std::vector<int16_t> & previousCellStateInValue,const hidl_vec<uint32_t> & cellStateOutDimensions,const std::vector<int16_t> & cellStateOutValue,const hidl_vec<uint32_t> & outputDimensions,const std::vector<uint8_t> & outputValue)387 void QuantizedLstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
388 const std::vector<uint8_t>& inputValue,
389 const hidl_vec<uint32_t>& inputToInputWeightsDimensions,
390 const std::vector<uint8_t>& inputToInputWeightsValue,
391 const hidl_vec<uint32_t>& inputToForgetWeightsDimensions,
392 const std::vector<uint8_t>& inputToForgetWeightsValue,
393 const hidl_vec<uint32_t>& inputToCellWeightsDimensions,
394 const std::vector<uint8_t>& inputToCellWeightsValue,
395 const hidl_vec<uint32_t>& inputToOutputWeightsDimensions,
396 const std::vector<uint8_t>& inputToOutputWeightsValue,
397 const hidl_vec<uint32_t>& recurrentToInputWeightsDimensions,
398 const std::vector<uint8_t>& recurrentToInputWeightsValue,
399 const hidl_vec<uint32_t>& recurrentToForgetWeightsDimensions,
400 const std::vector<uint8_t>& recurrentToForgetWeightsValue,
401 const hidl_vec<uint32_t>& recurrentToCellWeightsDimensions,
402 const std::vector<uint8_t>& recurrentToCellWeightsValue,
403 const hidl_vec<uint32_t>& recurrentToOutputWeightsDimensions,
404 const std::vector<uint8_t>& recurrentToOutputWeightsValue,
405 const hidl_vec<uint32_t>& inputGateBiasDimensions,
406 const std::vector<int32_t>& inputGateBiasValue,
407 const hidl_vec<uint32_t>& forgetGateBiasDimensions,
408 const std::vector<int32_t>& forgetGateBiasValue,
409 const hidl_vec<uint32_t>& cellBiasDimensions,
410 const std::vector<int32_t>& cellBiasValue,
411 const hidl_vec<uint32_t>& outputGateBiasDimensions,
412 const std::vector<int32_t>& outputGateBiasValue,
413 const hidl_vec<uint32_t>& previousOutputInDimensions,
414 const std::vector<uint8_t>& previousOutputInValue,
415 const hidl_vec<uint32_t>& previousCellStateInDimensions,
416 const std::vector<int16_t>& previousCellStateInValue,
417 const hidl_vec<uint32_t>& cellStateOutDimensions,
418 const std::vector<int16_t>& cellStateOutValue,
419 const hidl_vec<uint32_t>& outputDimensions,
420 const std::vector<uint8_t>& outputValue)
421 {
422 auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::GpuAcc));
423 using Model = typename HalPolicy::Model;
424 Model model = {};
425
426 float inputOutputScale = 0.0078125f;
427 int32_t inputOutputOffset = 128;
428
429 float cellStateScale = 0.00048828125f;
430 int32_t cellStateOffset = 0;
431
432 float weightsScale = 0.00408021f;
433 int32_t weightsOffset = 100;
434
435 float biasScale = 3.1876640625e-05f;
436 int32_t biasOffset = 0;
437
438 // Inputs:
439 // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
440 // specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
441 AddInputOperand<HalPolicy>(model,
442 inputDimensions,
443 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
444 inputOutputScale,
445 inputOutputOffset);
446
447 // 1: The input-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
448 // [outputSize, inputSize] specifying input-to-input part of weights for fully-connected layer inside the
449 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
450 AddTensorOperand<HalPolicy>(model,
451 inputToInputWeightsDimensions,
452 inputToInputWeightsValue,
453 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
454 CreateNoValueLifeTime(inputToInputWeightsDimensions),
455 weightsScale,
456 weightsOffset);
457 // 2: The input-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
458 // [outputSize, inputSize] specifying input-to-forget part of weights for fully-connected layer inside the
459 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
460 AddTensorOperand<HalPolicy>(model,
461 inputToForgetWeightsDimensions,
462 inputToForgetWeightsValue,
463 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
464 CreateNoValueLifeTime(inputToForgetWeightsDimensions),
465 weightsScale,
466 weightsOffset);
467 // 3: The input-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
468 // [outputSize, inputSize] specifying input-to-cell part of weights for fully-connected layer inside the
469 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
470 AddTensorOperand<HalPolicy>(model,
471 inputToCellWeightsDimensions,
472 inputToCellWeightsValue,
473 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
474 CreateNoValueLifeTime(inputToCellWeightsDimensions),
475 weightsScale,
476 weightsOffset);
477 // 4: The input-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
478 // [outputSize, inputSize] specifying input-to-output part of weights for fully-connected layer inside the
479 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
480 AddTensorOperand<HalPolicy>(model,
481 inputToOutputWeightsDimensions,
482 inputToOutputWeightsValue,
483 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
484 CreateNoValueLifeTime(inputToOutputWeightsDimensions),
485 weightsScale,
486 weightsOffset);
487 // 5: The recurrent-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
488 // [outputSize, outputSize] specifying recurrent-to-input part of weights for fully-connected layer inside
489 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
490 AddTensorOperand<HalPolicy>(model,
491 recurrentToInputWeightsDimensions,
492 recurrentToInputWeightsValue,
493 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
494 CreateNoValueLifeTime(recurrentToInputWeightsDimensions),
495 weightsScale,
496 weightsOffset);
497 // 6: The recurrent-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
498 // [outputSize, outputSize] specifying recurrent-to-forget part of weights for fully-connected layer inside
499 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
500 AddTensorOperand<HalPolicy>(model,
501 recurrentToForgetWeightsDimensions,
502 recurrentToForgetWeightsValue,
503 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
504 CreateNoValueLifeTime(recurrentToForgetWeightsDimensions),
505 weightsScale,
506 weightsOffset);
507 // 7: The recurrent-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
508 // [outputSize, outputSize] specifying recurrent-to-cell part of weights for fully-connected layer inside
509 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
510 AddTensorOperand<HalPolicy>(model,
511 recurrentToCellWeightsDimensions,
512 recurrentToCellWeightsValue,
513 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
514 CreateNoValueLifeTime(recurrentToCellWeightsDimensions),
515 weightsScale,
516 weightsOffset);
517 // 8: The recurrent-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
518 // [outputSize, outputSize] specifying recurrent-to-output part of weights for fully-connected layer inside
519 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
520 AddTensorOperand<HalPolicy>(model,
521 recurrentToOutputWeightsDimensions,
522 recurrentToOutputWeightsValue,
523 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
524 CreateNoValueLifeTime(recurrentToOutputWeightsDimensions),
525 weightsScale,
526 weightsOffset);
527 // 9: The input gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the
528 // bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
529 // of input and weights scales and zeroPoint equal to 0.
530 AddTensorOperand<HalPolicy>(model,
531 inputGateBiasDimensions,
532 inputGateBiasValue,
533 HalPolicy::OperandType::TENSOR_INT32,
534 CreateNoValueLifeTime(inputGateBiasDimensions),
535 biasScale,
536 biasOffset);
537 // 10: The forget gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
538 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
539 // of input and weights scales and zeroPoint equal to 0.
540 AddTensorOperand<HalPolicy>(model,
541 forgetGateBiasDimensions,
542 forgetGateBiasValue,
543 HalPolicy::OperandType::TENSOR_INT32,
544 CreateNoValueLifeTime(forgetGateBiasDimensions),
545 biasScale,
546 biasOffset);
547 // 11: The cell bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the bias
548 // for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product of input
549 // and weights scales and zeroPoint equal to 0.
550 AddTensorOperand<HalPolicy>(model,
551 cellBiasDimensions,
552 cellBiasValue,
553 HalPolicy::OperandType::TENSOR_INT32,
554 CreateNoValueLifeTime(cellBiasDimensions),
555 biasScale,
556 biasOffset);
557 // 12: The output gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
558 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
559 // of input and weights scales and zeroPoint equal to 0.
560 AddTensorOperand<HalPolicy>(model,
561 outputGateBiasDimensions,
562 outputGateBiasValue,
563 HalPolicy::OperandType::TENSOR_INT32,
564 CreateNoValueLifeTime(outputGateBiasDimensions),
565 biasScale,
566 biasOffset);
567
568 //13: The previous cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape
569 // [numBatches, outputSize] specifying the cell state from the previous time step of the LSTM cell.
570 // It is quantized using a quantization range of -2^4, 2^4 * 32767/32768.
571 AddInputOperand<HalPolicy>(model,
572 previousCellStateInDimensions,
573 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
574 cellStateScale,
575 cellStateOffset);
576 // 14: The previous output state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
577 // [numBathes, outputSize] specifying the output of the LSTM cell from previous time-step. Tensor
578 // is quantized with a fixed quantization range of -1, 127/128.
579 AddInputOperand<HalPolicy>(model,
580 previousOutputInDimensions,
581 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
582 inputOutputScale,
583 inputOutputOffset);
584
585 // Outputs:
586 // 0: The cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape [numBatches, outputSize]
587 // which contains a cell state from the current time step. Tensor is quantized using a quantization range
588 // of -2^4, 2^4 * 32767/32768.
589 AddOutputOperand<HalPolicy>(model,
590 cellStateOutDimensions,
591 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
592 cellStateScale,
593 cellStateOffset);
594 // 1: The output: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBathes, outputSize] which
595 // contains the output value. Tensor is quantized with a fixed quantization range of -1, 127/128.
596 AddOutputOperand<HalPolicy>(model,
597 outputDimensions,
598 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
599 inputOutputScale,
600 inputOutputOffset);
601
602 // make the lstm operation
603 model.operations.resize(1);
604 model.operations[0].type = HalPolicy::OperationType::QUANTIZED_16BIT_LSTM;
605
606 model.operations[0].inputs = hidl_vec<uint32_t> { 0, 1, 2, 3, 4, 5, 6, 7,
607 8, 9, 10, 11, 12, 13, 14};
608 model.operations[0].outputs = hidl_vec<uint32_t> {15, 16};
609
610 // define the input values
611 hidl_vec<RequestArgument> inputArguments;
612 inputArguments.resize(3);
613
614 inputArguments[0] = CreateRequestArgument<uint8_t>(inputValue, 0);
615 inputArguments[1] = CreateRequestArgument<int16_t>(previousCellStateInValue, 1);
616 inputArguments[2] = CreateRequestArgument<uint8_t>(previousOutputInValue, 2);
617
618 // define the expected output values
619 hidl_vec<RequestArgument> outputArguments;
620 outputArguments.resize(2);
621
622 outputArguments[0] = CreateRequestArgument<int16_t>(cellStateOutValue, 3);
623 outputArguments[1] = CreateRequestArgument<uint8_t>(outputValue, 4);
624
625 V1_0::Request request = {};
626 request.inputs = inputArguments;
627 request.outputs = outputArguments;
628
629 // set the input data
630 AddPoolAndSetData(inputValue.size(), request, inputValue.data());
631 AddPoolAndSetData(previousCellStateInValue.size(), request, previousCellStateInValue.data());
632 AddPoolAndSetData(previousOutputInValue.size(), request, previousOutputInValue.data());
633
634 // add memory for the outputs
635 android::sp<IMemory> cellStateOutMemory = AddPoolAndGetData<int16_t>(cellStateOutValue.size(), request);
636 int16_t* cellStateOutData = static_cast<int16_t*>(static_cast<void*>(cellStateOutMemory->getPointer()));
637 android::sp<IMemory> outputMemory = AddPoolAndGetData<uint8_t>(outputValue.size(), request);
638 uint8_t* outputData = static_cast<uint8_t*>(static_cast<void*>(outputMemory->getPointer()));
639
640 // make the prepared model and run the execution
641 ExecuteModel(model, *driver, request);
642
643 // check the results
644 for (size_t i = 0; i < cellStateOutValue.size(); ++i)
645 {
646 DOCTEST_CHECK_MESSAGE(cellStateOutValue[i] == doctest::Approx( cellStateOutData[i] ),
647 "cellStateOutValue[" << i << "]: " << cellStateOutValue[i] << " != "
648 << cellStateOutData[i]);
649 }
650 for (size_t i = 0; i < outputValue.size(); ++i)
651 {
652 DOCTEST_CHECK_MESSAGE(outputValue[i] == doctest::Approx( outputData[i] ),
653 "outputValue[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
654 }
655 }
656
657 template <typename HalPolicy>
LstmNoCifgNoPeepholeNoProjection(armnn::Compute compute)658 void LstmNoCifgNoPeepholeNoProjection(armnn::Compute compute)
659 {
660 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm.model.cpp
661 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm.example.cpp
662 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
663
664 uint32_t batchSize = 1;
665 uint32_t inputSize = 2;
666 uint32_t numUnits = 4;
667 uint32_t outputSize = numUnits;
668
669 // Inputs:
670 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
671 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
672 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
673 std::vector<float> inputValue{2.0f, 3.0f};
674
675 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
676 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
677 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
678 std::vector<float> inputToInputWeightsValue{-0.45018822f, -0.02338299f,
679 -0.08705890f, -0.34550029f,
680 0.04266912f, -0.15680569f,
681 -0.34856534f, 0.43890524f};
682 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
683 // [num_units, input_size].
684 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
685 std::vector<float> inputToForgetWeightsValue{ 0.09701663f, 0.20334584f,
686 -0.50592935f, -0.31343272f,
687 -0.40032279f, 0.44781327f,
688 0.01387155f, -0.35593212f};
689 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
690 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
691 std::vector<float> inputToCellWeightsValue{-0.50013041f, 0.13702840f,
692 0.11810488f, 0.20131630f,
693 -0.20583314f, 0.44344562f,
694 0.22077113f, -0.29909778f};
695 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
696 // [num_units, input_size].
697 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
698 std::vector<float> inputToOutputWeightsValue{-0.25065863f, -0.28290087f,
699 0.04613829f, 0.40525138f,
700 0.44272184f, 0.03897077f,
701 -0.15568960f, 0.19487578f};
702 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
703 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
704 // “num_units”), or the second dimension of the “projection_weights”, if defined.
705 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
706 std::vector<float> recurrentToInputWeightsValue{-0.00635350f, -0.20423880f, 0.31454784f, -0.35746509f,
707 0.28902304f, 0.08183324f, -0.16555229f, 0.02286911f,
708 -0.13566875f, 0.03034258f, 0.48091322f, -0.12528998f,
709 0.24077177f, -0.51332325f, -0.33502164f, 0.10629296f};
710 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
711 // [num_units, output_size].
712 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
713 std::vector<float> recurrentToForgetWeightsValue{-0.48684245f, -0.06655136f, 0.42224967f, 0.21126390f,
714 0.27654213f, 0.20864892f, -0.07646349f, 0.45877004f,
715 0.00141793f, -0.14609534f, 0.36447752f, 0.09196436f,
716 0.28053468f, 0.01560611f, -0.20127171f, -0.01140004f};
717 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
718 // [num_units, output_size].
719 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
720 std::vector<float> recurrentToCellWeightsValue{-0.34074140f, 0.24443203f, -0.20785320f, 0.26320225f,
721 0.05695659f, -0.00123841f, -0.47447860f, -0.35869038f,
722 -0.06418842f, -0.13502428f, -0.50176400f, 0.22830659f,
723 -0.46367589f, 0.26016325f, -0.03894562f, -0.16368064f};
724 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
725 // [num_units, output_size].
726 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
727 std::vector<float> recurrentToOutputWeightsValue{ 0.43385774f, -0.17194885f, 0.27182370f, 0.09215671f,
728 0.24107647f, -0.39835793f, 0.18212086f, 0.01301402f,
729 0.48572797f, -0.50656658f, 0.20047462f, -0.20607421f,
730 -0.51818722f, -0.15390486f, 0.04681480f, 0.39922136f};
731 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
732 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
733 std::vector<float> cellToInputWeightsValue;
734 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
735 hidl_vec<uint32_t> cellToForgetWeightsDimensions{0};
736 std::vector<float> cellToForgetWeightsValue;
737 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
738 hidl_vec<uint32_t> cellToOutputWeightsDimensions{0};
739 std::vector<float> cellToOutputWeightsValue;
740 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
741 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
742 std::vector<float> inputGateBiasValue(numUnits, 0.0f);
743 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
744 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
745 std::vector<float> forgetGateBiasValue(numUnits, 1.0f);
746 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
747 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
748 std::vector<float> cellBiasValue(numUnits, 0.0f);
749 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
750 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
751 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
752 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
753 // [output_size, num_units].
754 hidl_vec<uint32_t> projectionWeightsDimensions{0};
755 std::vector<float> projectionWeightsValue;
756 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
757 hidl_vec<uint32_t> projectionBiasDimensions{0};
758 std::vector<float> projectionBiasValue;
759
760 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
761 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
762 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
763 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
764 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
765 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
766
767 // Constant scalar values (the VTS test adds these as tensors of dim {})
768 // 20: The activation function: A value indicating the activation function:
769 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
770 hidl_vec<uint32_t> activationFunctionDimensions{};
771 std::vector<int32_t> activationFunctionValue{4};
772 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
773 // If set to 0.0 then clipping is disabled.
774 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
775 std::vector<float> cellClippingThresholdValue{0.0f};
776 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
777 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
778 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
779 std::vector<float> projectionClippingThresholdValue{0.0f};
780
781 // Normalization:
782 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
783 // Used to rescale normalized inputs to activation at input gate.
784 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
785 std::vector<float> inputLayerNormWeightsValue;
786 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
787 // Used to rescale normalized inputs to activation at forget gate.
788 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
789 std::vector<float> forgetLayerNormWeightsValue;
790 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
791 // Used to rescale normalized inputs to activation at cell gate.
792 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
793 std::vector<float> cellLayerNormWeightsValue;
794 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
795 // Used to rescale normalized inputs to activation at output gate.
796 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
797 std::vector<float> outputLayerNormWeightsValue;
798
799 // Outputs:
800 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
801 // CIFG, or [batch_size, num_units * 3] without CIFG.
802 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
803 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
804 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
805 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
806 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 4};
807 std::vector<float> scratchBufferValue(batchSize * numUnits * 4, 0.0f);
808 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
809 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
810 std::vector<float> outputStateOutValue {-0.0297319f, 0.122947f, 0.208851f, -0.153588f};
811 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
812 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
813 std::vector<float> cellStateOutValue {-0.145439f, 0.157475f, 0.293663f, -0.277353f};
814 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
815 // effectively the same as the current “output state (out)” value.
816 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
817 std::vector<float> outputValue {-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f};
818
819 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
820 inputToInputWeightsDimensions, inputToInputWeightsValue,
821 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
822 inputToCellWeightsDimensions, inputToCellWeightsValue,
823 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
824 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
825 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
826 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
827 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
828 cellToInputWeightsDimensions, cellToInputWeightsValue,
829 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
830 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
831 inputGateBiasDimensions, inputGateBiasValue,
832 forgetGateBiasDimensions, forgetGateBiasValue,
833 cellBiasDimensions, cellBiasValue,
834 outputGateBiasDimensions, outputGateBiasValue,
835 projectionWeightsDimensions, projectionWeightsValue,
836 projectionBiasDimensions, projectionBiasValue,
837 outputStateInDimensions, outputStateInValue,
838 cellStateInDimensions, cellStateInValue,
839 activationFunctionDimensions, activationFunctionValue,
840 cellClippingThresholdDimensions, cellClippingThresholdValue,
841 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
842 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
843 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
844 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
845 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
846 scratchBufferDimensions, scratchBufferValue,
847 outputStateOutDimensions, outputStateOutValue,
848 cellStateOutDimensions, cellStateOutValue,
849 outputDimensions, outputValue,
850 compute);
851 }
852
853 template <typename HalPolicy>
LstmCifgPeepholeNoProjection(armnn::Compute compute)854 void LstmCifgPeepholeNoProjection(armnn::Compute compute)
855 {
856 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm2.model.cpp
857 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm2.example.cpp
858 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
859
860 uint32_t batchSize = 1;
861 uint32_t inputSize = 2;
862 uint32_t numUnits = 4;
863 uint32_t outputSize = numUnits;
864
865 // Inputs:
866 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
867 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
868 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
869 std::vector<float> inputValue{2.0f, 3.0f};
870
871 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
872 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
873 hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
874 std::vector<float> inputToInputWeightsValue;
875 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
876 // [num_units, input_size].
877 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
878 std::vector<float> inputToForgetWeightsValue{-0.55291498f, -0.42866567f,
879 0.13056988f, -0.36333650f,
880 -0.22755712f, 0.28253698f,
881 0.24407166f, 0.33826375f};
882 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
883 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
884 std::vector<float> inputToCellWeightsValue{-0.49770179f, -0.27711356f,
885 -0.09624726f, 0.05100781f,
886 0.04717243f, 0.48944736f,
887 -0.38535351f, -0.17212132f};
888 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
889 // [num_units, input_size].
890 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
891 std::vector<float> inputToOutputWeightsValue{ 0.10725588f, -0.02335852f,
892 -0.55932593f, -0.09426838f,
893 -0.44257352f, 0.54939759f,
894 0.01533556f, 0.42751634f};
895 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
896 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
897 // “num_units”), or the second dimension of the “projection_weights”, if defined.
898 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0}; // VTS was {4, 4} -> {0} ?
899 std::vector<float> recurrentToInputWeightsValue;
900 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
901 // [num_units, output_size].
902 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
903 std::vector<float> recurrentToForgetWeightsValue{-0.13832897f, -0.05151010f, -0.23590070f, -0.16661474f,
904 -0.14340827f, 0.36986142f, 0.23414481f, 0.55899000f,
905 0.10798943f, -0.41174671f, 0.17751795f, -0.34484994f,
906 -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f};
907 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
908 // [num_units, output_size].
909 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
910 std::vector<float> recurrentToCellWeightsValue{ 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f,
911 0.42957711f, 0.01841056f, -0.32764608f, -0.33027974f,
912 -0.10826075f, 0.20675004f, 0.19069612f, -0.03026325f,
913 -0.54532051f, 0.33003211f, 0.44901288f, 0.21193194f};
914 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
915 // [num_units, output_size].
916 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
917 std::vector<float> recurrentToOutputWeightsValue{0.41613156f, 0.42610586f, -0.16495961f, -0.56638730f,
918 0.30579174f, -0.05115908f, -0.33941799f, 0.23364776f,
919 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
920 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f};
921 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
922 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
923 std::vector<float> cellToInputWeightsValue;
924 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
925 hidl_vec<uint32_t> cellToForgetWeightsDimensions{4};
926 std::vector<float> cellToForgetWeightsValue{0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
927 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
928 hidl_vec<uint32_t> cellToOutputWeightsDimensions{4};
929 std::vector<float> cellToOutputWeightsValue{-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
930 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
931 hidl_vec<uint32_t> inputGateBiasDimensions{0}; // VTS was {4} -> {0} ?
932 std::vector<float> inputGateBiasValue;
933 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
934 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
935 std::vector<float> forgetGateBiasValue(numUnits, 1.0f);
936 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
937 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
938 std::vector<float> cellBiasValue(numUnits, 0.0f);
939 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
940 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
941 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
942 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
943 // [output_size, num_units].
944 hidl_vec<uint32_t> projectionWeightsDimensions{0};
945 std::vector<float> projectionWeightsValue;
946 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
947 hidl_vec<uint32_t> projectionBiasDimensions{0};
948 std::vector<float> projectionBiasValue;
949
950 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
951 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
952 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
953 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
954 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
955 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
956
957 // Constant scalar values (the VTS test adds these as tensors of dim {})
958 // 20: The activation function: A value indicating the activation function:
959 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
960 hidl_vec<uint32_t> activationFunctionDimensions{};
961 std::vector<int32_t> activationFunctionValue{4};
962 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
963 // If set to 0.0 then clipping is disabled.
964 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
965 std::vector<float> cellClippingThresholdValue{0.0f};
966 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
967 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
968 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
969 std::vector<float> projectionClippingThresholdValue{0.0f};
970
971 // Normalization:
972 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
973 // Used to rescale normalized inputs to activation at input gate.
974 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
975 std::vector<float> inputLayerNormWeightsValue;
976 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
977 // Used to rescale normalized inputs to activation at forget gate.
978 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
979 std::vector<float> forgetLayerNormWeightsValue;
980 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
981 // Used to rescale normalized inputs to activation at cell gate.
982 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
983 std::vector<float> cellLayerNormWeightsValue;
984 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
985 // Used to rescale normalized inputs to activation at output gate.
986 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
987 std::vector<float> outputLayerNormWeightsValue;
988
989 // Outputs:
990 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
991 // CIFG, or [batch_size, num_units * 3] without CIFG.
992 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
993 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
994 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
995 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
996 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 3};
997 std::vector<float> scratchBufferValue(batchSize * numUnits * 3, 0.0f);
998 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
999 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
1000 std::vector<float> outputStateOutValue{-0.364445f, -0.00352185f, 0.128866f, -0.0516365f};
1001 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1002 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1003 std::vector<float> cellStateOutValue{-0.760444f, -0.0180416f, 0.182264f, -0.0649371f};
1004 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1005 // effectively the same as the current “output state (out)” value.
1006 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
1007 std::vector<float> outputValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f};
1008
1009 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
1010 inputToInputWeightsDimensions, inputToInputWeightsValue,
1011 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1012 inputToCellWeightsDimensions, inputToCellWeightsValue,
1013 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1014 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1015 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1016 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1017 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1018 cellToInputWeightsDimensions, cellToInputWeightsValue,
1019 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1020 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1021 inputGateBiasDimensions, inputGateBiasValue,
1022 forgetGateBiasDimensions, forgetGateBiasValue,
1023 cellBiasDimensions, cellBiasValue,
1024 outputGateBiasDimensions, outputGateBiasValue,
1025 projectionWeightsDimensions, projectionWeightsValue,
1026 projectionBiasDimensions, projectionBiasValue,
1027 outputStateInDimensions, outputStateInValue,
1028 cellStateInDimensions, cellStateInValue,
1029 activationFunctionDimensions, activationFunctionValue,
1030 cellClippingThresholdDimensions, cellClippingThresholdValue,
1031 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
1032 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
1033 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
1034 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
1035 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
1036 scratchBufferDimensions, scratchBufferValue,
1037 outputStateOutDimensions, outputStateOutValue,
1038 cellStateOutDimensions, cellStateOutValue,
1039 outputDimensions, outputValue,
1040 compute);
1041 }
1042
1043 template <typename HalPolicy>
LstmNoCifgPeepholeProjection(armnn::Compute compute)1044 void LstmNoCifgPeepholeProjection(armnn::Compute compute)
1045 {
1046 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm3.model.cpp
1047 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm3.example.cpp
1048 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
1049
1050 uint32_t batchSize = 2;
1051 uint32_t inputSize = 5;
1052 uint32_t numUnits = 20;
1053 uint32_t outputSize = 16;
1054
1055 // Inputs:
1056 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1057 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
1058 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
1059 std::vector<float> inputValue{0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1060 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f};
1061
1062 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1063 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
1064 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
1065 std::vector<float> inputToInputWeightsValue
1066 {
1067 0.0213936830f, 0.0612455100f, 0.0469051670f, -0.0146576770f, -0.0314946300f,
1068 0.0917180300f, 0.1464780100f, 0.1079719300f, -0.0057968358f, 0.0019193048f,
1069 -0.2726754000f, 0.1015402900f, -0.0185398850f, 0.0803498850f, -0.1026238500f,
1070 -0.0225997870f, -0.0912115500f, -0.0086759670f, -0.0452061030f, -0.0821282000f,
1071 -0.0080459520f, 0.0154780810f, 0.0552172470f, 0.0387195870f, 0.0441536270f,
1072 -0.0645324300f, 0.0503182500f, -0.0469351080f, -0.0081644309f, 0.0145742260f,
1073 -0.1671009000f, -0.1551955200f, -0.1681979700f, -0.1397126900f, -0.1195305900f,
1074 0.2500548700f, -0.2279098300f, 0.0098550870f, -0.0281409580f, -0.1120069800f,
1075 0.1129540800f, -0.0035217577f, 0.0544850750f, 0.0518469500f, 0.0647112060f,
1076 0.1098919300f, 0.1167478600f, 0.0349060700f, 0.0772735700f, 0.1139058500f,
1077 -0.1863375000f, -0.1034451000f, -0.1394518900f, -0.0494012270f, -0.1876706300f,
1078 0.0424839030f, 0.1423355200f, 0.1383258100f, 0.1835016500f, 0.1454560300f,
1079 -0.0285457040f, 0.0249395310f, 0.0509297180f, 0.0076203286f, -0.0029723682f,
1080 -0.0424842240f, -0.1182759600f, -0.0917110400f, -0.1080862800f, -0.1632798800f,
1081 -0.2273378000f, -0.0993647000f, -0.0171551070f, 0.0023917493f, 0.0492727640f,
1082 0.0038534778f, 0.0547645050f, 0.0897537840f, 0.0694723400f, 0.0801447600f,
1083 -0.0454423400f, -0.0497073000f, -0.0713563100f, -0.0489291060f, -0.0040420120f,
1084 -0.0092840260f, 0.0180420540f, 0.0036860977f, -0.0742730200f, -0.1143460400f,
1085 -0.0189954560f, 0.0314875430f, 0.0128349080f, 0.0199777540f, 0.0442566540f,
1086 -0.3929261300f, -0.1851933400f, -0.1165128100f, -0.0680989200f, 0.0113736770f
1087 };
1088 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1089 // [num_units, input_size].
1090 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
1091 std::vector<float> inputToForgetWeightsValue
1092 {
1093 -0.0018401089f, -0.0048522370f, 0.0369842400f, 0.0141817040f, 0.0282732360f,
1094 -0.0167261940f, -0.0524975900f, -0.1020426100f, 0.0086106600f, -0.0409795050f,
1095 -0.0098991870f, 0.0192389200f, -0.0281772690f, -0.0853510300f, -0.1458549500f,
1096 0.1066256700f, -0.0190973100f, -0.0178835340f, -0.0047269356f, -0.0451033230f,
1097 0.0030784295f, 0.0767847750f, 0.0746369600f, 0.0945313950f, 0.0814421000f,
1098 -0.1225789900f, -0.0339457580f, -0.0313034650f, 0.0456306260f, 0.0684388700f,
1099 -0.1349294500f, -0.0124800070f, -0.0811829000f, -0.0722449900f, -0.0962879100f,
1100 0.0451009460f, 0.0012300825f, 0.0139646620f, 0.0993723940f, 0.0254305900f,
1101 0.0695832400f, 0.0342572960f, 0.0482646000f, 0.0626799700f, 0.0526250680f,
1102 0.1278466600f, 0.0707789700f, 0.0257259350f, 0.0416500900f, 0.0724190500f,
1103 0.0186686440f, -0.0373772940f, -0.0627778300f, -0.0883363600f, -0.0401206050f,
1104 -0.0114055860f, -0.0078083350f, -0.0103013860f, -0.0051021670f, 0.0277174640f,
1105 0.0548342300f, 0.1144911100f, 0.1128965200f, 0.1093983900f, 0.1339650600f,
1106 -0.0840216600f, -0.0190146200f, -0.0446783040f, -0.0772056500f, 0.0143500630f,
1107 -0.1175795800f, -0.0652038000f, -0.0818573300f, -0.0767543240f, -0.0926143750f,
1108 0.1040549100f, 0.0529603360f, 0.0357558950f, 0.0358393860f, -0.0125405530f,
1109 0.0368812980f, 0.0291337600f, 0.0342015900f, 0.0544844700f, -0.0545233530f,
1110 0.0258271500f, 0.0232735500f, -0.0118571790f, -0.0011980024f, -0.0346417170f,
1111 -0.0261250940f, -0.1758261500f, -0.1592365700f, -0.2748677400f, -0.0006143371f,
1112 0.0001771948f, -8.470171e-05f, 0.0265180700f, 0.0457907650f, 0.069564960f
1113 };
1114 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
1115 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
1116 std::vector<float> inputToCellWeightsValue
1117 {
1118 -0.0458028300f, -0.0954946200f, -0.0324189850f, -0.0645463300f, -0.0435284530f,
1119 0.0430185870f, -0.0491523440f, -0.1241814400f, -0.0789854750f, -0.0759688900f,
1120 0.0194843620f, -0.1143496200f, -0.0074034138f, -0.0631484400f, -0.0929814950f,
1121 0.0062155537f, -0.0250343380f, -0.0028890965f, 0.0489295270f, 0.0623507500f,
1122 0.1066591800f, -0.0320367920f, -0.0850591600f, -0.1084335800f, -0.1300243300f,
1123 -0.0368164370f, -0.0213013400f, -0.0165182390f, 0.0047691227f, -0.0025825808f,
1124 0.0660178660f, 0.0299915340f, -0.1065283600f, -0.1037554000f, -0.1305607100f,
1125 -0.0326664300f, -0.0337024140f, -0.0064734240f, -0.0461169200f, 0.0144193390f,
1126 -0.0251743230f, 0.0396852000f, 0.0817775060f, 0.0615746800f, 0.1021009500f,
1127 -0.0096581940f, 0.0465117170f, 0.0360390600f, 0.0069369148f, 0.0159600950f,
1128 -0.0650766600f, 0.0955159800f, 0.0535688360f, 0.0640871400f, 0.1283566700f,
1129 -0.0087143290f, -0.2021196600f, -0.1209367400f, 0.0294504720f, 0.2849013000f,
1130 -0.0292279010f, 0.1164364000f, -0.0856026300f, 0.0994178600f, -0.0369995650f,
1131 -0.0288426260f, -0.0033637602f, -0.0170129020f, -0.0972086500f, -0.1119335100f,
1132 -0.0291551170f, -0.0179360340f, -0.0097689360f, -0.0422332400f, -0.0361596350f,
1133 0.0650511200f, -0.0217428920f, -0.0233772120f, -0.0722136400f, -0.0643055200f,
1134 0.0545386500f, 0.0911498140f, 0.0638733100f, 0.0075183930f, 0.0559609530f,
1135 0.0697793440f, 0.0464111680f, 0.1050991100f, 0.0746389400f, 0.0075130584f,
1136 0.0128509820f, 0.0455543100f, 0.0569556880f, 0.0655528500f, 0.0508014560f,
1137 -0.0098626830f, 0.0082677200f, -0.0265556090f, -0.0073611983f, -0.0014897042f
1138 };
1139 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1140 // [num_units, input_size].
1141 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
1142 std::vector<float> inputToOutputWeightsValue
1143 {
1144 -0.0998932000f, -0.0720195600f, -0.0528037730f, -0.1562959300f, -0.1500191800f,
1145 -0.0765075100f, 0.0235985500f, -0.0751553550f, -0.0803770900f, -0.1509353400f,
1146 0.0295175520f, -0.0475139300f, 0.0103505310f, -0.0266485100f, -0.0168397220f,
1147 -0.0231211630f, 0.0077019283f, 0.0128512570f, -0.0504064900f, -0.0129761000f,
1148 -0.0217377470f, -0.0383057930f, -0.0687058600f, -0.0148124700f, -0.0012853940f,
1149 0.1012423600f, 0.0831228350f, 0.0533130060f, -0.0622356460f, -0.0756371540f,
1150 -0.0278339030f, 0.0297749710f, 0.1130802000f, 0.0921890600f, 0.0950613500f,
1151 -0.0866657640f, -0.0371627060f, -0.0388809140f, -0.0358328450f, -0.0144815640f,
1152 -0.0982500300f, -0.1204856900f, -0.0976655860f, -0.0528763300f, -0.0964047000f,
1153 -0.1136642900f, 0.0357775050f, 0.1356881900f, 0.0524513830f, 0.0506493040f,
1154 0.0579895100f, -0.0218523350f, -0.0998488440f, 0.0147404750f, -0.0788979460f,
1155 0.0497469900f, 0.0141604730f, 0.0697393200f, 0.0496494200f, 0.0333646460f,
1156 0.0819012400f, 0.0255353670f, 0.0508931650f, 0.0485142540f, 0.0694581300f,
1157 -0.0789075640f, -0.0670761600f, -0.1184450800f, -0.0998668800f, -0.0750940300f,
1158 0.0626322600f, 0.1492558700f, 0.2018843600f, 0.1209845100f, 0.1463941500f,
1159 0.0015017595f, -0.0142673820f, -0.0341725700f, 0.0127114680f, 0.0028300495f,
1160 -0.0247584820f, -0.0509854800f, -0.0821182000f, 0.0142256720f, 0.0215441580f,
1161 0.0894972500f, 0.0750526800f, -0.0020780868f, 0.0490825800f, 0.0647629500f,
1162 -0.0229070630f, 0.0275624560f, 0.0401857350f, 0.0195675770f, -0.0155987390f,
1163 -0.0490973030f, -0.0171218660f, -0.0833682340f, -0.0233200200f, -0.084095600f
1164 };
1165 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1166 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1167 // “num_units”), or the second dimension of the “projection_weights”, if defined.
1168 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
1169 std::vector<float> recurrentToInputWeightsValue
1170 {
1171 -0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f, // 00
1172 -0.11585556f, 0.02557986f, -0.13446963f, -0.035785314f,
1173 -0.01244275f, 0.025961924f, -0.02337298f, -0.044228926f,
1174 -0.055839065f, -0.046598054f, -0.010546039f, -0.06900766f,
1175 0.027239809f, 0.022582639f, -0.013296484f, -0.05459212f, // 01
1176 0.08981f, -0.045407712f, 0.08682226f, -0.06867011f,
1177 -0.14390695f, -0.02916037f, 0.000996957f, 0.091420636f,
1178 0.14283475f, -0.07390571f, -0.06402044f, 0.062524505f,
1179 -0.093129106f, 0.04860203f, -0.08364217f, -0.08119002f, // 02
1180 0.009352075f, 0.22920375f, 0.0016303885f, 0.11583097f,
1181 -0.13732095f, 0.012405723f, -0.07551853f, 0.06343048f,
1182 0.12162708f, -0.031923793f, -0.014335606f, 0.01790974f,
1183 -0.10650317f, -0.0724401f, 0.08554849f, -0.05727212f, // 03
1184 0.06556731f, -0.042729504f, -0.043227166f, 0.011683251f,
1185 -0.013082158f, -0.029302018f, -0.010899579f, -0.062036745f,
1186 -0.022509435f, -0.00964907f, -0.01567329f, 0.04260106f,
1187 -0.07787477f, -0.11576462f, 0.017356863f, 0.048673786f, // 04
1188 -0.017577527f, -0.05527947f, -0.082487635f, -0.040137455f,
1189 -0.10820036f, -0.04666372f, 0.022746278f, -0.07851417f,
1190 0.01068115f, 0.032956902f, 0.022433773f, 0.0026891115f,
1191 0.08944216f, -0.0685835f, 0.010513544f, 0.07228705f, // 05
1192 0.02032331f, -0.059686817f, -0.0005566496f, -0.086984694f,
1193 0.040414046f, -0.1380399f, 0.094208956f, -0.05722982f,
1194 0.012092817f, -0.04989123f, -0.086576f, -0.003399834f,
1195 -0.04696032f, -0.045747425f, 0.10091314f, 0.048676282f, // 06
1196 -0.029037097f, 0.031399418f, -0.0040285117f, 0.047237843f,
1197 0.09504992f, 0.041799378f, -0.049185462f, -0.031518843f,
1198 -0.10516937f, 0.026374253f, 0.10058866f, -0.0033195973f,
1199 -0.041975245f, 0.0073591834f, 0.0033782164f, -0.004325073f, // 07
1200 -0.10167381f, 0.042500053f, -0.01447153f, 0.06464186f,
1201 -0.017142897f, 0.03312627f, 0.009205989f, 0.024138335f,
1202 -0.011337001f, 0.035530265f, -0.010912711f, 0.0706555f,
1203 -0.005894094f, 0.051841937f, -0.1401738f, -0.02351249f, // 08
1204 0.0365468f, 0.07590991f, 0.08838724f, 0.021681072f,
1205 -0.10086113f, 0.019608743f, -0.06195883f, 0.077335775f,
1206 0.023646897f, -0.095322326f, 0.02233014f, 0.09756986f,
1207 -0.048691444f, -0.009579111f, 0.07595467f, 0.11480546f, // 09
1208 -0.09801813f, 0.019894179f, 0.08502348f, 0.004032281f,
1209 0.037211012f, 0.068537936f, -0.048005626f, -0.091520436f,
1210 -0.028379958f, -0.01556313f, 0.06554592f, -0.045599163f,
1211 -0.01672207f, -0.020169014f, -0.011877351f, -0.20212261f, // 10
1212 0.010889619f, 0.0047078193f, 0.038385306f, 0.08540671f,
1213 -0.017140968f, -0.0035865551f, 0.016678626f, 0.005633034f,
1214 0.015963363f, 0.00871737f, 0.060130805f, 0.028611384f,
1215 0.10109069f, -0.015060172f, -0.07894427f, 0.06401885f, // 11
1216 0.011584063f, -0.024466386f, 0.0047652307f, -0.09041358f,
1217 0.030737216f, -0.0046374933f, 0.14215417f, -0.11823516f,
1218 0.019899689f, 0.006106124f, -0.027092824f, 0.0786356f,
1219 0.05052217f, -0.058925f, -0.011402121f, -0.024987547f, // 12
1220 -0.0013661642f, -0.06832946f, -0.015667673f, -0.1083353f,
1221 -0.00096863037f, -0.06988685f, -0.053350925f, -0.027275559f,
1222 -0.033664223f, -0.07978348f, -0.025200296f, -0.017207067f,
1223 -0.058403496f, -0.055697463f, 0.005798788f, 0.12965427f, // 13
1224 -0.062582195f, 0.0013350133f, -0.10482091f, 0.0379771f,
1225 0.072521195f, -0.0029455067f, -0.13797039f, -0.03628521f,
1226 0.013806405f, -0.017858358f, -0.01008298f, -0.07700066f,
1227 -0.017081132f, 0.019358726f, 0.0027079724f, 0.004635139f, // 14
1228 0.062634714f, -0.02338735f, -0.039547626f, -0.02050681f,
1229 0.03385117f, -0.083611414f, 0.002862572f, -0.09421313f,
1230 0.058618143f, -0.08598433f, 0.00972939f, 0.023867095f,
1231 -0.053934585f, -0.023203006f, 0.07452513f, -0.048767887f, // 15
1232 -0.07314807f, -0.056307215f, -0.10433547f, -0.06440842f,
1233 0.04328182f, 0.04389765f, -0.020006588f, -0.09076438f,
1234 -0.11652589f, -0.021705797f, 0.03345259f, -0.010329105f,
1235 -0.025767034f, 0.013057034f, -0.07316461f, -0.10145612f, // 16
1236 0.06358255f, 0.18531723f, 0.07759293f, 0.12006465f,
1237 0.1305557f, 0.058638252f, -0.03393652f, 0.09622831f,
1238 -0.16253184f, -2.4580743e-06f, 0.079869635f, -0.070196845f,
1239 -0.005644518f, 0.06857898f, -0.12598175f, -0.035084512f, // 17
1240 0.03156317f, -0.12794146f, -0.031963028f, 0.04692781f,
1241 0.030070418f, 0.0071660685f, -0.095516115f, -0.004643372f,
1242 0.040170413f, -0.062104587f, -0.0037324072f, 0.0554317f,
1243 0.08184801f, -0.019164372f, 0.06791302f, 0.034257166f, // 18
1244 -0.10307039f, 0.021943003f, 0.046745934f, 0.0790918f,
1245 -0.0265588f, -0.007824208f, 0.042546265f, -0.00977924f,
1246 -0.0002440307f, -0.017384544f, -0.017990116f, 0.12252321f,
1247 -0.014512694f, -0.08251313f, 0.08861942f, 0.13589665f, // 19
1248 0.026351685f, 0.012641483f, 0.07466548f, 0.044301085f,
1249 -0.045414884f, -0.051112458f, 0.03444247f, -0.08502782f,
1250 -0.04106223f, -0.028126027f, 0.028473156f, 0.10467447f
1251 };
1252 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1253 // [num_units, output_size].
1254 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
1255 std::vector<float> recurrentToForgetWeightsValue
1256 {
1257 -0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f, // 00
1258 0.14811787f, 0.10826372f, 0.09471067f, 0.03987225f,
1259 -0.0039523416f, 0.00030638507f, 0.053185795f, 0.10572994f,
1260 0.08414449f, -0.022036452f, -0.00066928595f, -0.09203576f,
1261 0.032950465f, -0.10985798f, -0.023809856f, 0.0021431844f, // 01
1262 -0.02196096f, -0.00326074f, 0.00058621005f, -0.074678116f,
1263 -0.06193199f, 0.055729095f, 0.03736828f, 0.020123724f,
1264 0.061878487f, -0.04729229f, 0.034919553f, -0.07585433f,
1265 -0.04421272f, -0.044019096f, 0.085488975f, 0.04058006f, // 02
1266 -0.06890133f, -0.030951202f, -0.024628663f, -0.07672815f,
1267 0.034293607f, 0.08556707f, -0.05293577f, -0.033561368f,
1268 -0.04899627f, 0.0241671f, 0.015736353f, -0.095442444f,
1269 -0.029564252f, 0.016493602f, -0.035026584f, 0.022337519f, // 03
1270 -0.026871363f, 0.004780428f, 0.0077918363f, -0.03601621f,
1271 0.016435321f, -0.03263031f, -0.09543275f, -0.047392778f,
1272 0.013454138f, 0.028934088f, 0.01685226f, -0.086110644f,
1273 -0.046250615f, -0.01847454f, 0.047608484f, 0.07339695f, // 04
1274 0.034546845f, -0.04881143f, 0.009128804f, -0.08802852f,
1275 0.03761666f, 0.008096139f, -0.014454086f, 0.014361001f,
1276 -0.023502491f, -0.0011840804f, -0.07607001f, 0.001856849f,
1277 -0.06509276f, -0.006021153f, -0.08570962f, -0.1451793f, // 05
1278 0.060212336f, 0.055259194f, 0.06974018f, 0.049454916f,
1279 -0.027794661f, -0.08077226f, -0.016179763f, 0.1169753f,
1280 0.17213494f, -0.0056326236f, -0.053934924f, -0.0124349f,
1281 -0.11520337f, 0.05409887f, 0.088759385f, 0.0019655675f, // 06
1282 0.0042065294f, 0.03881498f, 0.019844765f, 0.041858196f,
1283 -0.05695512f, 0.047233116f, 0.038937137f, -0.06542224f,
1284 0.014429736f, -0.09719407f, 0.13908425f, -0.05379757f,
1285 0.012321099f, 0.082840554f, -0.029899208f, 0.044217527f, // 07
1286 0.059855383f, 0.07711018f, -0.045319796f, 0.0948846f,
1287 -0.011724666f, -0.0033288454f, -0.033542685f, -0.04764985f,
1288 -0.13873616f, 0.040668588f, 0.034832682f, -0.015319203f,
1289 -0.018715994f, 0.046002675f, 0.0599172f, -0.043107376f, // 08
1290 0.0294216f, -0.002314414f, -0.022424703f, 0.0030315618f,
1291 0.0014641669f, 0.0029166266f, -0.11878115f, 0.013738511f,
1292 0.12375372f, -0.0006038222f, 0.029104086f, 0.087442465f,
1293 0.052958444f, 0.07558703f, 0.04817258f, 0.044462286f, // 09
1294 -0.015213451f, -0.08783778f, -0.0561384f, -0.003008196f,
1295 0.047060397f, -0.002058388f, 0.03429439f, -0.018839769f,
1296 0.024734668f, 0.024614193f, -0.042046934f, 0.09597743f,
1297 -0.0043254104f, 0.04320769f, 0.0064070094f, -0.0019131786f, // 10
1298 -0.02558259f, -0.022822596f, -0.023273505f, -0.02464396f,
1299 -0.10991725f, -0.006240552f, 0.0074488563f, 0.024044557f,
1300 0.04383914f, -0.046476185f, 0.028658995f, 0.060410924f,
1301 0.050786525f, 0.009452605f, -0.0073054377f, -0.024810238f, // 11
1302 0.0052906186f, 0.0066939713f, -0.0020913032f, 0.014515517f,
1303 0.015898481f, 0.021362653f, -0.030262267f, 0.016587038f,
1304 -0.011442813f, 0.041154444f, -0.007631438f, -0.03423484f,
1305 -0.010977775f, 0.036152758f, 0.0066366293f, 0.11915515f, // 12
1306 0.02318443f, -0.041350313f, 0.021485701f, -0.10906167f,
1307 -0.028218046f, -0.00954771f, 0.020531068f, -0.11995105f,
1308 -0.03672871f, 0.024019798f, 0.014255957f, -0.05221243f,
1309 -0.00661567f, -0.04630967f, 0.033188973f, 0.10107534f, // 13
1310 -0.014027541f, 0.030796422f, -0.10270911f, -0.035999842f,
1311 0.15443139f, 0.07684145f, 0.036571592f, -0.035900835f,
1312 -0.0034699554f, 0.06209149f, 0.015920248f, -0.031122351f,
1313 -0.03858649f, 0.01849943f, 0.13872518f, 0.01503974f, // 14
1314 0.069941424f, -0.06948533f, -0.0088794185f, 0.061282158f,
1315 -0.047401894f, 0.03100163f, -0.041533746f, -0.10430945f,
1316 0.044574402f, -0.01425562f, -0.024290353f, 0.034563623f,
1317 0.05866852f, 0.023947537f, -0.09445152f, 0.035450947f, // 15
1318 0.02247216f, -0.0042998926f, 0.061146557f, -0.10250651f,
1319 0.020881841f, -0.06747029f, 0.10062043f, -0.0023941975f,
1320 0.03532124f, -0.016341697f, 0.09685456f, -0.016764693f,
1321 0.051808182f, 0.05875331f, -0.04536488f, 0.001626336f, // 16
1322 -0.028892258f, -0.01048663f, -0.009793449f, -0.017093895f,
1323 0.010987891f, 0.02357273f, -0.00010856845f, 0.0099760275f,
1324 -0.001845119f, -0.03551521f, 0.0018358806f, 0.05763657f,
1325 -0.01769146f, 0.040995963f, 0.02235177f, -0.060430344f, // 17
1326 0.11475477f, -0.023854522f, 0.10071741f, 0.0686208f,
1327 -0.014250481f, 0.034261297f, 0.047418304f, 0.08562733f,
1328 -0.030519066f, 0.0060542435f, 0.014653856f, -0.038836084f,
1329 0.04096551f, 0.032249358f, -0.08355519f, -0.026823482f, // 18
1330 0.056386515f, -0.010401743f, -0.028396193f, 0.08507674f,
1331 0.014410365f, 0.020995233f, 0.17040324f, 0.11511526f,
1332 0.02459721f, 0.0066619175f, 0.025853224f, -0.023133837f,
1333 -0.081302024f, 0.017264642f, -0.009585969f, 0.09491168f, // 19
1334 -0.051313367f, 0.054532815f, -0.014298593f, 0.10657464f,
1335 0.007076659f, 0.10964551f, 0.0409152f, 0.008275321f,
1336 -0.07283536f, 0.07937492f, 0.04192024f, -0.1075027f
1337 };
1338 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1339 // [num_units, output_size].
1340 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
1341 std::vector<float> recurrentToCellWeightsValue
1342 {
1343 -0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f,
1344 0.055647098f, -0.05713207f, -0.05626563f, 0.005559383f,
1345 0.03375411f, -0.025757805f, -0.088049285f, 0.06017052f,
1346 -0.06570978f, 0.007384076f, 0.035123326f, -0.07920549f,
1347 0.053676967f, 0.044480428f, -0.07663568f, 0.0071805613f,
1348 0.08089997f, 0.05143358f, 0.038261272f, 0.03339287f,
1349 -0.027673481f, 0.044746667f, 0.028349208f, 0.020090483f,
1350 -0.019443132f, -0.030755889f, -0.0040000007f, 0.04465846f,
1351 -0.021585021f, 0.0031670958f, 0.0053199246f, -0.056117613f,
1352 -0.10893326f, 0.076739706f, -0.08509834f, -0.027997585f,
1353 0.037871376f, 0.01449768f, -0.09002357f, -0.06111149f,
1354 -0.046195522f, 0.0422062f, -0.005683705f, -0.1253618f,
1355 -0.012925729f, -0.04890792f, 0.06985068f, 0.037654128f,
1356 0.03398274f, -0.004781977f, 0.007032333f, -0.031787455f,
1357 0.010868644f, -0.031489216f, 0.09525667f, 0.013939797f,
1358 0.0058680447f, 0.0167067f, 0.02668468f, -0.04797466f,
1359 -0.048885044f, -0.12722108f, 0.035304096f, 0.06554885f,
1360 0.00972396f, -0.039238118f, -0.05159735f, -0.11329045f,
1361 0.1613692f, -0.03750952f, 0.06529313f, -0.071974665f,
1362 -0.11769596f, 0.015524369f, -0.0013754242f, -0.12446318f,
1363 0.02786344f, -0.014179351f, 0.005264273f, 0.14376344f,
1364 0.015983658f, 0.03406988f, -0.06939408f, 0.040699873f,
1365 0.02111075f, 0.09669095f, 0.041345075f, -0.08316494f,
1366 -0.07684199f, -0.045768797f, 0.032298047f, -0.041805092f,
1367 0.0119405f, 0.0061010392f, 0.12652606f, 0.0064572375f,
1368 -0.024950314f, 0.11574242f, 0.04508852f, -0.04335324f,
1369 0.06760663f, -0.027437469f, 0.07216407f, 0.06977076f,
1370 -0.05438599f, 0.034033038f, -0.028602652f, 0.05346137f,
1371 0.043184172f, -0.037189785f, 0.10420091f, 0.00882477f,
1372 -0.054019816f, -0.074273005f, -0.030617684f, -0.0028467078f,
1373 0.024302477f, -0.0038869337f, 0.005332455f, 0.0013399826f,
1374 0.04361412f, -0.007001822f, 0.09631092f, -0.06702025f,
1375 -0.042049985f, -0.035070654f, -0.04103342f, -0.10273396f,
1376 0.0544271f, 0.037184782f, -0.13150354f, -0.0058036847f,
1377 -0.008264958f, 0.042035464f, 0.05891794f, 0.029673764f,
1378 0.0063542654f, 0.044788733f, 0.054816857f, 0.062257513f,
1379 -0.00093483756f, 0.048938446f, -0.004952862f, -0.007730018f,
1380 -0.04043371f, -0.017094059f, 0.07229206f, -0.023670016f,
1381 -0.052195564f, -0.025616996f, -0.01520939f, 0.045104615f,
1382 -0.007376126f, 0.003533447f, 0.006570588f, 0.056037236f,
1383 0.12436656f, 0.051817212f, 0.028532185f, -0.08686856f,
1384 0.11868599f, 0.07663395f, -0.07323171f, 0.03463402f,
1385 -0.050708205f, -0.04458982f, -0.11590894f, 0.021273347f,
1386 0.1251325f, -0.15313013f, -0.12224372f, 0.17228661f,
1387 0.023029093f, 0.086124025f, 0.006445803f, -0.03496501f,
1388 0.028332196f, 0.04449512f, -0.042436164f, -0.026587414f,
1389 -0.006041347f, -0.09292539f, -0.05678812f, 0.03897832f,
1390 0.09465633f, 0.008115513f, -0.02171956f, 0.08304309f,
1391 0.071401566f, 0.019622514f, 0.032163795f, -0.004167056f,
1392 0.02295182f, 0.030739572f, 0.056506045f, 0.004612461f,
1393 0.06524936f, 0.059999723f, 0.046395954f, -0.0045512207f,
1394 -0.1335546f, -0.030136576f, 0.11584653f, -0.014678886f,
1395 0.0020118146f, -0.09688814f, -0.0790206f, 0.039770417f,
1396 -0.0329582f, 0.07922767f, 0.029322514f, 0.026405897f,
1397 0.04207835f, -0.07073373f, 0.063781224f, 0.0859677f,
1398 -0.10925287f, -0.07011058f, 0.048005477f, 0.03438226f,
1399 -0.09606514f, -0.006669445f, -0.043381985f, 0.04240257f,
1400 -0.06955775f, -0.06769346f, 0.043903265f, -0.026784198f,
1401 -0.017840602f, 0.024307009f, -0.040079936f, -0.019946516f,
1402 0.045318738f, -0.12233574f, 0.026170589f, 0.0074471775f,
1403 0.15978073f, 0.10185836f, 0.10298046f, -0.015476589f,
1404 -0.039390966f, -0.072174534f, 0.0739445f, -0.1211869f,
1405 -0.0347889f, -0.07943156f, 0.014809798f, -0.12412325f,
1406 -0.0030663363f, 0.039695457f, 0.0647603f, -0.08291318f,
1407 -0.018529687f, -0.004423833f, 0.0037507233f, 0.084633216f,
1408 -0.01514876f, -0.056505352f, -0.012800942f, -0.06994386f,
1409 0.012962922f, -0.031234352f, 0.07029052f, 0.016418684f,
1410 0.03618972f, 0.055686004f, -0.08663945f, -0.017404709f,
1411 -0.054761406f, 0.029065743f, 0.052404847f, 0.020238016f,
1412 0.0048197987f, -0.0214882f, 0.07078733f, 0.013016777f,
1413 0.06262858f, 0.009184685f, 0.020785125f, -0.043904778f,
1414 -0.0270329f, -0.03299152f, -0.060088247f, -0.015162964f,
1415 -0.001828936f, 0.12642565f, -0.056757294f, 0.013586685f,
1416 0.09232601f, -0.035886683f, 0.06000002f, 0.05229691f,
1417 -0.052580316f, -0.082029596f, -0.010794592f, 0.012947712f,
1418 -0.036429964f, -0.085508935f, -0.13127148f, -0.017744139f,
1419 0.031502828f, 0.036232427f, -0.031581745f, 0.023051167f,
1420 -0.05325106f, -0.03421577f, 0.028793324f, -0.034633752f,
1421 -0.009881397f, -0.043551125f, -0.018609839f, 0.0019097115f,
1422 -0.008799762f, 0.056595087f, 0.0022273948f, 0.055752404f
1423 };
1424 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1425 // [num_units, output_size].
1426 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
1427 std::vector<float> recurrentToOutputWeightsValue
1428 {
1429 0.025825322f, -0.05813119f, 0.09495884f, -0.045984812f,
1430 -0.01255415f, -0.0026479573f, -0.08196161f, -0.054914974f,
1431 -0.0046604523f, -0.029587349f, -0.044576716f, -0.07480124f,
1432 -0.082868785f, 0.023254942f, 0.027502948f, -0.0039728214f,
1433 -0.08683098f, -0.08116779f, -0.014675607f, -0.037924774f,
1434 -0.023314456f, -0.007401714f, -0.09255757f, 0.029460307f,
1435 -0.08829125f, -0.005139627f, -0.08989442f, -0.0555066f,
1436 0.13596267f, -0.025062224f, -0.048351806f, -0.03850004f,
1437 0.07266485f, -0.022414139f, 0.05940088f, 0.075114764f,
1438 0.09597592f, -0.010211725f, -0.0049794707f, -0.011523867f,
1439 -0.025980417f, 0.072999895f, 0.11091378f, -0.081685916f,
1440 0.014416728f, 0.043229222f, 0.034178585f, -0.07530371f,
1441 0.035837382f, -0.085607f, -0.007721233f, -0.03287832f,
1442 -0.043848954f, -0.06404588f, -0.06632928f, -0.073643476f,
1443 0.008214239f, -0.045984086f, 0.039764922f, 0.03474462f,
1444 0.060612556f, -0.080590084f, 0.049127717f, 0.04151091f,
1445 -0.030063879f, 0.008801774f, -0.023021035f, -0.019558564f,
1446 0.05158114f, -0.010947698f, -0.011825728f, 0.0075720972f,
1447 0.0699727f, -0.0039981045f, 0.069350146f, 0.08799282f,
1448 0.016156472f, 0.035502106f, 0.11695009f, 0.006217345f,
1449 0.13392477f, -0.037875112f, 0.025745004f, 0.08940699f,
1450 -0.00924166f, 0.0046702605f, -0.036598757f, -0.08811812f,
1451 0.10522024f, -0.032441203f, 0.008176899f, -0.04454919f,
1452 0.07058152f, 0.0067963637f, 0.039206743f, 0.03259838f,
1453 0.03725492f, -0.09515802f, 0.013326398f, -0.052055415f,
1454 -0.025676316f, 0.03198509f, -0.015951829f, -0.058556724f,
1455 0.036879618f, 0.043357447f, 0.028362012f, -0.05908629f,
1456 0.0059240665f, -0.04995891f, -0.019187413f, 0.0276265f,
1457 -0.01628143f, 0.0025863599f, 0.08800015f, 0.035250366f,
1458 -0.022165963f, -0.07328642f, -0.009415526f, -0.07455109f,
1459 0.11690406f, 0.0363299f, 0.07411125f, 0.042103454f,
1460 -0.009660886f, 0.019076364f, 0.018299393f, -0.046004917f,
1461 0.08891175f, 0.0431396f, -0.026327137f, -0.051502608f,
1462 0.08979574f, -0.051670972f, 0.04940282f, -0.07491107f,
1463 -0.021240504f, 0.022596184f, -0.034280192f, 0.060163025f,
1464 -0.058211457f, -0.051837247f, -0.01349775f, -0.04639988f,
1465 -0.035936575f, -0.011681591f, 0.064818054f, 0.0073146066f,
1466 -0.021745546f, -0.043124277f, -0.06471268f, -0.07053354f,
1467 -0.029321948f, -0.05330136f, 0.016933719f, -0.053782392f,
1468 0.13747959f, -0.1361751f, -0.11569455f, 0.0033329215f,
1469 0.05693899f, -0.053219706f, 0.063698f, 0.07977434f,
1470 -0.07924483f, 0.06936997f, 0.0034815092f, -0.007305279f,
1471 -0.037325785f, -0.07251102f, -0.033633437f, -0.08677009f,
1472 0.091591336f, -0.14165086f, 0.021752775f, 0.019683983f,
1473 0.0011612234f, -0.058154266f, 0.049996935f, 0.0288841f,
1474 -0.0024567875f, -0.14345716f, 0.010955264f, -0.10234828f,
1475 0.1183656f, -0.0010731248f, -0.023590032f, -0.072285876f,
1476 -0.0724771f, -0.026382286f, -0.0014920527f, 0.042667855f,
1477 0.0018776858f, 0.02986552f, 0.009814309f, 0.0733756f,
1478 0.12289186f, 0.018043943f, -0.0458958f, 0.049412545f,
1479 0.033632483f, 0.05495232f, 0.036686596f, -0.013781798f,
1480 -0.010036754f, 0.02576849f, -0.08307328f, 0.010112348f,
1481 0.042521734f, -0.05869831f, -0.071689695f, 0.03876447f,
1482 -0.13275425f, -0.0352966f, -0.023077697f, 0.10285965f,
1483 0.084736146f, 0.15568255f, -0.00040734606f, 0.027835453f,
1484 -0.10292561f, -0.032401145f, 0.10053256f, -0.026142767f,
1485 -0.08271222f, -0.0030240538f, -0.016368777f, 0.1070414f,
1486 0.042672627f, 0.013456989f, -0.0437609f, -0.022309763f,
1487 0.11576483f, 0.04108048f, 0.061026827f, -0.0190714f,
1488 -0.0869359f, 0.037901703f, 0.0610107f, 0.07202949f,
1489 0.01675338f, 0.086139716f, -0.08795751f, -0.014898893f,
1490 -0.023771819f, -0.01965048f, 0.007955471f, -0.043740474f,
1491 0.03346837f, -0.10549954f, 0.090567775f, 0.042013682f,
1492 -0.03176985f, 0.12569028f, -0.02421228f, -0.029526481f,
1493 0.023851605f, 0.031539805f, 0.05292009f, -0.02344001f,
1494 -0.07811758f, -0.08834428f, 0.10094801f, 0.16594367f,
1495 -0.06861939f, -0.021256343f, -0.041093912f, -0.06669611f,
1496 0.035498552f, 0.021757556f, -0.09302526f, -0.015403468f,
1497 -0.06614931f, -0.051798206f, -0.013874718f, 0.03630673f,
1498 0.010412845f, -0.08077351f, 0.046185967f, 0.0035662893f,
1499 0.03541868f, -0.094149634f, -0.034814864f, 0.003128424f,
1500 -0.020674974f, -0.03944324f, -0.008110165f, -0.11113267f,
1501 0.08484226f, 0.043586485f, 0.040582247f, 0.0968012f,
1502 -0.065249965f, -0.028036479f, 0.0050708856f, 0.0017462453f,
1503 0.0326779f, 0.041296225f, 0.09164146f, -0.047743853f,
1504 -0.015952192f, -0.034451712f, 0.084197424f, -0.05347844f,
1505 -0.11768019f, 0.085926116f, -0.08251791f, -0.045081906f,
1506 0.0948852f, 0.068401024f, 0.024856757f, 0.06978981f,
1507 -0.057309967f, -0.012775832f, -0.0032452994f, 0.01977615f,
1508 -0.041040014f, -0.024264973f, 0.063464895f, 0.05431621f
1509 };
1510 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1511 hidl_vec<uint32_t> cellToInputWeightsDimensions{numUnits};
1512 std::vector<float> cellToInputWeightsValue
1513 {
1514 0.040369894f, 0.030746894f, 0.24704495f, 0.018586371f, -0.037586458f,
1515 -0.15312155f, -0.11812848f, -0.11465643f, 0.20259799f, 0.11418174f,
1516 -0.10116027f, -0.011334949f, 0.12411352f, -0.076769054f, -0.052169047f,
1517 0.21198851f, -0.38871562f, -0.09061183f, -0.09683246f, -0.21929175f
1518 };
1519 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1520 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
1521 std::vector<float> cellToForgetWeightsValue
1522 {
1523 -0.01998659f, -0.15568835f, -0.24248174f, -0.012770197f, 0.041331276f,
1524 -0.072311886f, -0.052123554f, -0.0066330447f, -0.043891653f, 0.036225766f,
1525 -0.047248036f, 0.021479502f, 0.033189066f, 0.11952997f, -0.020432774f,
1526 0.64658105f, -0.06650122f, -0.03467612f, 0.095340036f, 0.23647355f
1527 };
1528 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1529 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
1530 std::vector<float> cellToOutputWeightsValue
1531 {
1532 0.08286371f, -0.08261836f, -0.51210177f, 0.002913762f, 0.17764764f,
1533 -0.5495371f, -0.08460716f, -0.24552552f, 0.030037103f, 0.04123544f,
1534 -0.11940523f, 0.007358328f, 0.1890978f, 0.4833202f, -0.34441817f,
1535 0.36312827f, -0.26375428f, 0.1457655f, -0.19724406f, 0.15548733f
1536 };
1537 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1538 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
1539 std::vector<float> inputGateBiasValue
1540 {
1541 0.02234832f, 0.14757581f, 0.18176508f, 0.10380666f, 0.053110216f,
1542 -0.06928846f, -0.13942584f, -0.11816189f, 0.19483899f, 0.03652339f,
1543 -0.10250295f, 0.036714908f, -0.18426876f, 0.036065217f, 0.21810818f,
1544 0.02383196f, -0.043370757f, 0.08690144f, -0.04444982f, 0.00030581196f
1545 };
1546 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1547 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
1548 std::vector<float> forgetGateBiasValue
1549 {
1550 0.035185695f, -0.042891346f, -0.03032477f, 0.23027696f, 0.11098921f,
1551 0.15378423f, 0.09263801f, 0.09790885f, 0.09508917f, 0.061199076f,
1552 0.07665568f, -0.015443159f, -0.03499149f, 0.046190713f, 0.08895977f,
1553 0.10899629f, 0.40694186f, 0.06030037f, 0.012413437f, -0.06108739f
1554 };
1555 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1556 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
1557 std::vector<float> cellBiasValue
1558 {
1559 -0.024379363f, 0.0055531194f, 0.23377132f, 0.033463873f, -0.1483596f,
1560 -0.10639995f, -0.091433935f, 0.058573797f, -0.06809782f, -0.07889636f,
1561 -0.043246906f, -0.09829136f, -0.4279842f, 0.034901652f, 0.18797937f,
1562 0.0075234566f, 0.016178843f, 0.1749513f, 0.13975595f, 0.92058027f
1563 };
1564 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1565 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
1566 std::vector<float> outputGateBiasValue
1567 {
1568 0.046159424f, -0.0012809046f, 0.03563469f, 0.12648113f, 0.027195795f,
1569 0.35373217f, -0.018957434f, 0.008907322f, -0.0762701f, 0.12018895f,
1570 0.04216877f, 0.0022856654f, 0.040952638f, 0.3147856f, 0.08225149f,
1571 -0.057416286f, -0.14995944f, -0.008040261f, 0.13208859f, 0.029760877f
1572 };
1573 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1574 // [output_size, num_units].
1575 hidl_vec<uint32_t> projectionWeightsDimensions{outputSize, numUnits};
1576 std::vector<float> projectionWeightsValue
1577 {
1578 -0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f,
1579 0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f,
1580 -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f,
1581 -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f,
1582 0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f,
1583 0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f,
1584 0.08682067f, 0.17240396f, 0.014975425f, 0.056431185f, 0.031037588f,
1585 0.16702051f, 0.0077946745f, 0.15140012f, 0.29405436f, 0.120285f,
1586 -0.188994f, -0.027265169f, 0.043389652f, -0.022061434f, 0.014777949f,
1587 -0.20203483f, 0.094781205f, 0.19100232f, 0.13987629f, -0.036132768f,
1588 -0.06426278f, -0.05108664f, 0.13221376f, 0.009441198f, -0.16715929f,
1589 0.15859416f, -0.040437475f, 0.050779544f, -0.022187516f, 0.012166504f,
1590 0.027685808f, -0.07675938f, -0.0055694645f, -0.09444123f, 0.0046453946f,
1591 0.050794356f, 0.10770313f, -0.20790008f, -0.07149004f, -0.11425117f,
1592 0.008225835f, -0.035802525f, 0.14374903f, 0.15262283f, 0.048710253f,
1593 0.1847461f, -0.007487823f, 0.11000021f, -0.09542012f, 0.22619456f,
1594 -0.029149994f, 0.08527916f, 0.009043713f, 0.0042746216f, 0.016261552f,
1595 0.022461696f, 0.12689082f, -0.043589946f, -0.12035478f, -0.08361797f,
1596 -0.050666027f, -0.1248618f, -0.1275799f, -0.071875185f, 0.07377272f,
1597 0.09944291f, -0.18897448f, -0.1593054f, -0.06526116f, -0.040107165f,
1598 -0.004618631f, -0.067624845f, -0.007576253f, 0.10727444f, 0.041546922f,
1599 -0.20424393f, 0.06907816f, 0.050412357f, 0.00724631f, 0.039827548f,
1600 0.12449835f, 0.10747581f, 0.13708383f, 0.09134148f, -0.12617786f,
1601 -0.06428341f, 0.09956831f, 0.1208086f, -0.14676677f, -0.0727722f,
1602 0.1126304f, 0.010139365f, 0.015571211f, -0.038128063f, 0.022913318f,
1603 -0.042050496f, 0.16842307f, -0.060597885f, 0.10531834f, -0.06411776f,
1604 -0.07451711f, -0.03410368f, -0.13393489f, 0.06534304f, 0.003620307f,
1605 0.04490757f, 0.05970546f, 0.05197996f, 0.02839995f, 0.10434969f,
1606 -0.013699693f, -0.028353551f, -0.07260381f, 0.047201227f, -0.024575593f,
1607 -0.036445823f, 0.07155557f, 0.009672501f, -0.02328883f, 0.009533515f,
1608 -0.03606021f, -0.07421458f, -0.028082801f, -0.2678904f, -0.13221288f,
1609 0.18419984f, -0.13012612f, -0.014588381f, -0.035059117f, -0.04824723f,
1610 0.07830115f, -0.056184657f, 0.03277091f, 0.025466874f, 0.14494097f,
1611 -0.12522776f, -0.098633975f, -0.10766018f, -0.08317623f, 0.08594209f,
1612 0.07749552f, 0.039474737f, 0.1776665f, -0.07409566f, -0.0477268f,
1613 0.29323658f, 0.10801441f, 0.1154011f, 0.013952499f, 0.10739139f,
1614 0.10708251f, -0.051456142f, 0.0074137426f, -0.10430189f, 0.10034707f,
1615 0.045594677f, 0.0635285f, -0.0715442f, -0.089667566f, -0.10811871f,
1616 0.00026344223f, 0.08298446f, -0.009525053f, 0.006585689f, -0.24567553f,
1617 -0.09450807f, 0.09648481f, 0.026996298f, -0.06419476f, -0.04752702f,
1618 -0.11063944f, -0.23441927f, -0.17608605f, -0.052156363f, 0.067035615f,
1619 0.19271925f, -0.0032889997f, -0.043264326f, 0.09663576f, -0.057112187f,
1620 -0.10100678f, 0.0628376f, 0.04447668f, 0.017961001f, -0.10094388f,
1621 -0.10190601f, 0.18335468f, 0.10494553f, -0.052095775f, -0.0026118709f,
1622 0.10539724f, -0.04383912f, -0.042349473f, 0.08438151f, -0.1947263f,
1623 0.02251204f, 0.11216432f, -0.10307853f, 0.17351969f, -0.039091777f,
1624 0.08066188f, -0.00561982f, 0.12633002f, 0.11335965f, -0.0088127935f,
1625 -0.019777594f, 0.06864014f, -0.059751723f, 0.016233567f, -0.06894641f,
1626 -0.28651384f, -0.004228674f, 0.019708522f, -0.16305895f, -0.07468996f,
1627 -0.0855457f, 0.099339016f, -0.07580735f, -0.13775392f, 0.08434318f,
1628 0.08330512f, -0.12131499f, 0.031935584f, 0.09180414f, -0.08876437f,
1629 -0.08049874f, 0.008753825f, 0.03498998f, 0.030215185f, 0.03907079f,
1630 0.089751154f, 0.029194152f, -0.03337423f, -0.019092513f, 0.04331237f,
1631 0.04299654f, -0.036394123f, -0.12915532f, 0.09793732f, 0.07512415f,
1632 -0.11319543f, -0.032502122f, 0.15661901f, 0.07671967f, -0.005491124f,
1633 -0.19379048f, -0.218606f, 0.21448623f, 0.017840758f, 0.1416943f,
1634 -0.07051762f, 0.19488361f, 0.02664691f, -0.18104725f, -0.09334311f,
1635 0.15026465f, -0.15493552f, -0.057762887f, -0.11604192f, -0.262013f,
1636 -0.01391798f, 0.012185008f, 0.11156489f, -0.07483202f, 0.06693364f,
1637 -0.26151478f, 0.046425626f, 0.036540434f, -0.16435726f, 0.17338543f,
1638 -0.21401681f, -0.11385144f, -0.08283257f, -0.069031075f, 0.030635102f,
1639 0.010969227f, 0.11109743f, 0.010919218f, 0.027526086f, 0.13519906f,
1640 0.01891392f, -0.046839405f, -0.040167913f, 0.017953383f, -0.09700955f,
1641 0.0061885654f, -0.07000971f, 0.026893595f, -0.038844477f, 0.14543656f
1642 };
1643 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
1644 hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
1645 std::vector<float> projectionBiasValue(outputSize, 0.0f);
1646
1647 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1648 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
1649 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
1650 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1651 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
1652 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
1653
1654 // Constant scalar values (the VTS test adds these as tensors of dim {})
1655 // 20: The activation function: A value indicating the activation function:
1656 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
1657 hidl_vec<uint32_t> activationFunctionDimensions{};
1658 std::vector<int32_t> activationFunctionValue{4};
1659 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1660 // If set to 0.0 then clipping is disabled.
1661 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
1662 std::vector<float> cellClippingThresholdValue{0.0f};
1663 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1664 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
1665 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
1666 std::vector<float> projectionClippingThresholdValue{0.0f};
1667
1668 // Normalization:
1669 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
1670 // Used to rescale normalized inputs to activation at input gate.
1671 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
1672 std::vector<float> inputLayerNormWeightsValue;
1673 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
1674 // Used to rescale normalized inputs to activation at forget gate.
1675 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
1676 std::vector<float> forgetLayerNormWeightsValue;
1677 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
1678 // Used to rescale normalized inputs to activation at cell gate.
1679 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
1680 std::vector<float> cellLayerNormWeightsValue;
1681 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
1682 // Used to rescale normalized inputs to activation at output gate.
1683 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
1684 std::vector<float> outputLayerNormWeightsValue;
1685
1686 // Outputs:
1687 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1688 // CIFG, or [batch_size, num_units * 3] without CIFG.
1689 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
1690 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
1691 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
1692 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
1693 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 4};
1694 std::vector<float> scratchBufferValue(batchSize * numUnits * 4, 0.0f);
1695 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1696 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
1697 std::vector<float> outputStateOutValue
1698 {
1699 -0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835577f, -0.0211779f, 0.0283512f, -0.0114597f,
1700 0.00907307f, -0.0244004f, -0.0152191f, -0.0259063f, 0.00914318f, 0.00415119f, 0.017147f, 0.0134203f,
1701 -0.013869f, 0.0287268f, -0.00334694f, 0.00733397f, -0.0287926f, -0.0186926f, 0.0193662f, -0.0115437f,
1702 0.00422612f, -0.0345232f, 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.0216801f
1703 };
1704 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1705 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1706 std::vector<float> cellStateOutValue
1707 {
1708 -0.0531632f, -0.0118138f, 0.0870833f, 0.0347929f, -0.076144f,
1709 -0.0659219f, -0.0463811f, 0.0141307f, -0.0127706f, -0.03782f,
1710 -0.00402401f, -0.00571876f, -0.187957f, -0.0247127f, 0.0711425f,
1711 0.008244f, 0.0492649f, 0.126972f, 0.0933097f, 0.29848f,
1712 -0.0966178f, -0.114417f, 0.0387229f, 0.0453255f, -0.181286f,
1713 -0.0651251f, -0.0996879f, -0.00276995f, 0.0617558f, -0.0100728f,
1714 0.056304f, -0.077416f, -0.162858f, -0.0541251f, 0.0571202f,
1715 -0.0525331f, 0.0724297f, 0.171029f, 0.141738f, 0.295483f
1716 };
1717 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1718 // effectively the same as the current “output state (out)” value.
1719 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
1720 std::vector<float> outputValue
1721 {
1722 -0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f, -0.0211779f, 0.0283512f, -0.0114597f,
1723 0.00907307f, -0.0244004f, -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f, 0.0134203f,
1724 -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f, -0.0186926f, 0.0193662f, -0.0115437f,
1725 0.00422612f, -0.0345232f, 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f
1726 };
1727
1728 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
1729 inputToInputWeightsDimensions, inputToInputWeightsValue,
1730 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1731 inputToCellWeightsDimensions, inputToCellWeightsValue,
1732 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1733 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1734 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1735 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1736 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1737 cellToInputWeightsDimensions, cellToInputWeightsValue,
1738 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1739 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1740 inputGateBiasDimensions, inputGateBiasValue,
1741 forgetGateBiasDimensions, forgetGateBiasValue,
1742 cellBiasDimensions, cellBiasValue,
1743 outputGateBiasDimensions, outputGateBiasValue,
1744 projectionWeightsDimensions, projectionWeightsValue,
1745 projectionBiasDimensions, projectionBiasValue,
1746 outputStateInDimensions, outputStateInValue,
1747 cellStateInDimensions, cellStateInValue,
1748 activationFunctionDimensions, activationFunctionValue,
1749 cellClippingThresholdDimensions, cellClippingThresholdValue,
1750 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
1751 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
1752 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
1753 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
1754 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
1755 scratchBufferDimensions, scratchBufferValue,
1756 outputStateOutDimensions, outputStateOutValue,
1757 cellStateOutDimensions, cellStateOutValue,
1758 outputDimensions, outputValue,
1759 compute);
1760 }
1761
1762 template <typename HalPolicy>
LstmCifgPeepholeNoProjectionBatch2(armnn::Compute compute)1763 void LstmCifgPeepholeNoProjectionBatch2(armnn::Compute compute)
1764 {
1765 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm2.model.cpp
1766 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm2.example.cpp
1767 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
1768 // The batch size has been increased to 2 (it was 1 in the VTS test) with appropriate input and output values added.
1769
1770 uint32_t batchSize = 2;
1771 uint32_t inputSize = 2;
1772 uint32_t numUnits = 4;
1773 uint32_t outputSize = numUnits;
1774
1775 // Inputs:
1776 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1777 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
1778 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
1779 std::vector<float> inputValue{2.0f, 3.0f, 3.0f, 4.0f};
1780
1781 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1782 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
1783 hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
1784 std::vector<float> inputToInputWeightsValue;
1785 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1786 // [num_units, input_size].
1787 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
1788 std::vector<float> inputToForgetWeightsValue{-0.55291498f, -0.42866567f,
1789 0.13056988f, -0.36333650f,
1790 -0.22755712f, 0.28253698f,
1791 0.24407166f, 0.33826375f};
1792 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
1793 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
1794 std::vector<float> inputToCellWeightsValue{-0.49770179f, -0.27711356f,
1795 -0.09624726f, 0.05100781f,
1796 0.04717243f, 0.48944736f,
1797 -0.38535351f, -0.17212132f};
1798 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1799 // [num_units, input_size].
1800 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
1801 std::vector<float> inputToOutputWeightsValue{ 0.10725588f, -0.02335852f,
1802 -0.55932593f, -0.09426838f,
1803 -0.44257352f, 0.54939759f,
1804 0.01533556f, 0.42751634f};
1805 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1806 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1807 // “num_units”), or the second dimension of the “projection_weights”, if defined.
1808 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0}; // VTS was {4, 4} -> {0} ?
1809 std::vector<float> recurrentToInputWeightsValue;
1810 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1811 // [num_units, output_size].
1812 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
1813 std::vector<float> recurrentToForgetWeightsValue{-0.13832897f, -0.05151010f, -0.23590070f, -0.16661474f,
1814 -0.14340827f, 0.36986142f, 0.23414481f, 0.55899000f,
1815 0.10798943f, -0.41174671f, 0.17751795f, -0.34484994f,
1816 -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f};
1817 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1818 // [num_units, output_size].
1819 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
1820 std::vector<float> recurrentToCellWeightsValue{ 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f,
1821 0.42957711f, 0.01841056f, -0.32764608f, -0.33027974f,
1822 -0.10826075f, 0.20675004f, 0.19069612f, -0.03026325f,
1823 -0.54532051f, 0.33003211f, 0.44901288f, 0.21193194f};
1824 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1825 // [num_units, output_size].
1826 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
1827 std::vector<float> recurrentToOutputWeightsValue{0.41613156f, 0.42610586f, -0.16495961f, -0.56638730f,
1828 0.30579174f, -0.05115908f, -0.33941799f, 0.23364776f,
1829 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
1830 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f};
1831 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1832 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
1833 std::vector<float> cellToInputWeightsValue;
1834 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1835 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
1836 std::vector<float> cellToForgetWeightsValue{0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
1837 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1838 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
1839 std::vector<float> cellToOutputWeightsValue{-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
1840 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1841 hidl_vec<uint32_t> inputGateBiasDimensions{0}; // VTS was {4} -> {0} ?
1842 std::vector<float> inputGateBiasValue;
1843 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1844 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
1845 std::vector<float> forgetGateBiasValue{1.0f, 1.0f, 1.0f, 1.0f};
1846 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1847 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
1848 std::vector<float> cellBiasValue(numUnits, 0.0f);
1849 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1850 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
1851 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
1852 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1853 // [output_size, num_units].
1854 hidl_vec<uint32_t> projectionWeightsDimensions{0};
1855 std::vector<float> projectionWeightsValue;
1856 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
1857 hidl_vec<uint32_t> projectionBiasDimensions{0};
1858 std::vector<float> projectionBiasValue;
1859
1860 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1861 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
1862 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
1863 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1864 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
1865 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
1866
1867 // Constant scalar values (the VTS test adds these as tensors of dim {})
1868 // 20: The activation function: A value indicating the activation function:
1869 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
1870 hidl_vec<uint32_t> activationFunctionDimensions{};
1871 std::vector<int32_t> activationFunctionValue{4};
1872 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1873 // If set to 0.0 then clipping is disabled.
1874 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
1875 std::vector<float> cellClippingThresholdValue{0.0f};
1876 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1877 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
1878 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
1879 std::vector<float> projectionClippingThresholdValue{0.0f};
1880
1881 // Normalization:
1882 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
1883 // Used to rescale normalized inputs to activation at input gate.
1884 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
1885 std::vector<float> inputLayerNormWeightsValue;
1886 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
1887 // Used to rescale normalized inputs to activation at forget gate.
1888 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
1889 std::vector<float> forgetLayerNormWeightsValue;
1890 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
1891 // Used to rescale normalized inputs to activation at cell gate.
1892 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
1893 std::vector<float> cellLayerNormWeightsValue;
1894 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
1895 // Used to rescale normalized inputs to activation at output gate.
1896 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
1897 std::vector<float> outputLayerNormWeightsValue;
1898
1899 // Outputs:
1900 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1901 // CIFG, or [batch_size, num_units * 3] without CIFG.
1902 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
1903 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
1904 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
1905 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
1906 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 3};
1907 std::vector<float> scratchBufferValue(batchSize * numUnits * 3, 0.0f);
1908 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1909 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
1910 std::vector<float> outputStateOutValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1911 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f};
1912 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1913 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1914 std::vector<float> cellStateOutValue{-0.76044439f, -0.01804161f, 0.18226376f, -0.06493707f,
1915 -0.90477051f, -0.04355603f, 0.18475688f, -0.04158677f};
1916 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1917 // effectively the same as the current “output state (out)” value.
1918 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
1919 std::vector<float> outputValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1920 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f};
1921
1922 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
1923 inputToInputWeightsDimensions, inputToInputWeightsValue,
1924 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1925 inputToCellWeightsDimensions, inputToCellWeightsValue,
1926 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1927 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1928 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1929 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1930 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1931 cellToInputWeightsDimensions, cellToInputWeightsValue,
1932 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1933 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1934 inputGateBiasDimensions, inputGateBiasValue,
1935 forgetGateBiasDimensions, forgetGateBiasValue,
1936 cellBiasDimensions, cellBiasValue,
1937 outputGateBiasDimensions, outputGateBiasValue,
1938 projectionWeightsDimensions, projectionWeightsValue,
1939 projectionBiasDimensions, projectionBiasValue,
1940 outputStateInDimensions, outputStateInValue,
1941 cellStateInDimensions, cellStateInValue,
1942 activationFunctionDimensions, activationFunctionValue,
1943 cellClippingThresholdDimensions, cellClippingThresholdValue,
1944 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
1945 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
1946 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
1947 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
1948 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
1949 scratchBufferDimensions, scratchBufferValue,
1950 outputStateOutDimensions, outputStateOutValue,
1951 cellStateOutDimensions, cellStateOutValue,
1952 outputDimensions, outputValue,
1953 compute);
1954 }
1955
1956 template <typename HalPolicy>
LstmNoCifgPeepholeProjectionNoClippingLayerNorm(armnn::Compute compute)1957 void LstmNoCifgPeepholeProjectionNoClippingLayerNorm(armnn::Compute compute)
1958 {
1959 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/layer_norm_lstm.model.cpp
1960 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/layer_norm_lstm.example.cpp
1961 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
1962
1963 uint32_t batchSize = 2;
1964 uint32_t inputSize = 5;
1965 uint32_t numUnits = 4;
1966 uint32_t outputSize = 3;
1967
1968 // Inputs:
1969 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1970 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
1971 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
1972 std::vector<float> inputValue{ 0.7f, 0.8f, 0.1f, 0.2f, 0.3f, // batch 0
1973 0.3f, 0.2f, 0.9f, 0.8f, 0.1f}; // batch 1
1974
1975 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1976 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
1977 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
1978 std::vector<float> inputToInputWeightsValue{ 0.5, 0.6, 0.7, -0.8, -0.9,
1979 0.1, 0.2, 0.3, -0.4, 0.5,
1980 -0.8, 0.7, -0.6, 0.5, -0.4,
1981 -0.5, -0.4, -0.3, -0.2, -0.1};
1982 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1983 // [num_units, input_size].
1984 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
1985 std::vector<float> inputToForgetWeightsValue{-0.6, -0.1, 0.3, 0.2, 0.9,
1986 -0.5, -0.2, -0.4, 0.3, -0.8,
1987 -0.4, 0.3, -0.5, -0.4, -0.6,
1988 0.3, -0.4, -0.6, -0.5, -0.5};
1989 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
1990 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
1991 std::vector<float> inputToCellWeightsValue{-0.4, -0.3, -0.2, -0.1, -0.5,
1992 0.5, -0.2, -0.3, -0.2, -0.6,
1993 0.6, -0.1, -0.4, -0.3, -0.7,
1994 0.7, -0.9, -0.5, 0.8, 0.6};
1995 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1996 // [num_units, input_size].
1997 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
1998 std::vector<float> inputToOutputWeightsValue{-0.8, -0.4, -0.2, -0.9, -0.1,
1999 -0.7, 0.3, -0.3, -0.8, -0.2,
2000 0.6, -0.2, 0.4, -0.7, -0.3,
2001 -0.5, 0.1, 0.5, -0.6, -0.4};
2002 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2003 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
2004 // “num_units”), or the second dimension of the “projection_weights”, if defined.
2005 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
2006 std::vector<float> recurrentToInputWeightsValue{-0.2, -0.3, 0.4,
2007 0.1, -0.5, 0.9,
2008 -0.2, -0.3, -0.7,
2009 0.05, -0.2, -0.6};
2010 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2011 // [num_units, output_size].
2012 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
2013 std::vector<float> recurrentToForgetWeightsValue{-0.5, -0.3, -0.5,
2014 -0.2, 0.6, 0.4,
2015 0.9, 0.3, -0.1,
2016 0.2, 0.5, 0.2};
2017 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2018 // [num_units, output_size].
2019 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
2020 std::vector<float> recurrentToCellWeightsValue{-0.3, 0.2, 0.1,
2021 -0.3, 0.8,-0.08,
2022 -0.2, 0.3, 0.8,
2023 -0.6, -0.1, 0.2};
2024 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2025 // [num_units, output_size].
2026 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
2027 std::vector<float> recurrentToOutputWeightsValue{ 0.3, -0.1, 0.1,
2028 -0.2, -0.5, -0.7,
2029 -0.2, -0.6, -0.1,
2030 -0.4, -0.7, -0.2};
2031 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2032 hidl_vec<uint32_t> cellToInputWeightsDimensions{numUnits};
2033 std::vector<float> cellToInputWeightsValue{0.05, 0.1, 0.25, 0.15};
2034 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2035 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
2036 std::vector<float> cellToForgetWeightsValue{-0.02, -0.15, -0.25, -0.03};
2037 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2038 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
2039 std::vector<float> cellToOutputWeightsValue{0.1, -0.1, -0.5, 0.05};
2040 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2041 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
2042 std::vector<float> inputGateBiasValue{0.03, 0.15, 0.22, 0.38};
2043 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2044 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
2045 std::vector<float> forgetGateBiasValue{0.1, -0.3, -0.2, 0.1};
2046 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2047 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
2048 std::vector<float> cellBiasValue{-0.05, 0.72, 0.25, 0.08};
2049 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2050 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
2051 std::vector<float> outputGateBiasValue{0.05, -0.01, 0.2, 0.1};
2052 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2053 // [output_size, num_units].
2054 hidl_vec<uint32_t> projectionWeightsDimensions{numUnits, outputSize};
2055 std::vector<float> projectionWeightsValue{-0.1, 0.2, 0.01,
2056 -0.2, 0.1, 0.5,
2057 0.3, 0.08, 0.07,
2058 0.2, -0.4, 0.2};
2059 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
2060 hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
2061 std::vector<float> projectionBiasValue(outputSize, 0.0f);
2062 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2063 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
2064 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
2065 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2066 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
2067 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
2068
2069 // Constant scalar values (the VTS test adds these as tensors of dim {})
2070 // 20: The activation function: A value indicating the activation function:
2071 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
2072 hidl_vec<uint32_t> activationFunctionDimensions{};
2073 std::vector<int32_t> activationFunctionValue{4};
2074 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
2075 // If set to 0.0 then clipping is disabled.
2076 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
2077 std::vector<float> cellClippingThresholdValue{0.0f};
2078 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
2079 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
2080 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
2081 std::vector<float> projectionClippingThresholdValue{0.0f};
2082
2083 // Normalization:
2084 // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
2085 // Used to rescale normalized inputs to activation at input gate.
2086 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{numUnits};
2087 std::vector<float> inputLayerNormWeightsValue{0.1, 0.2, 0.3, 0.5};
2088 // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
2089 // Used to rescale normalized inputs to activation at forget gate.
2090 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{numUnits};
2091 std::vector<float> forgetLayerNormWeightsValue{0.2, 0.2, 0.4, 0.3};
2092 // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
2093 // Used to rescale normalized inputs to activation at cell gate.
2094 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{numUnits};
2095 std::vector<float> cellLayerNormWeightsValue{0.7, 0.2, 0.3, 0.8};
2096 // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
2097 // Used to rescale normalized inputs to activation at output gate.
2098 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{numUnits};
2099 std::vector<float> outputLayerNormWeightsValue{0.6, 0.2, 0.2, 0.5};
2100
2101 // Outputs:
2102 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
2103 // CIFG, or [batch_size, num_units * 3] without CIFG.
2104 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
2105 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
2106 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
2107 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
2108 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 4};
2109 std::vector<float> scratchBufferValue(batchSize * numUnits * 4, 0.0f);
2110 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2111 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
2112 std::vector<float> outputStateOutValue { 0.02440767f, 0.12802738f, -0.00170918f,
2113 -0.00692428f, 0.08487406f, 0.06344498f};
2114 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2115 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
2116 std::vector<float> cellStateOutValue {-0.45177122f, 0.37691566f, 0.22542511f, 0.23240635f,
2117 -0.25258583f, 0.33042118f, 0.01730525f, 0.36660123f};
2118 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
2119 // effectively the same as the current “output state (out)” value.
2120 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
2121 std::vector<float> outputValue{ 0.02440767f, 0.12802738f, -0.00170918f,
2122 -0.00692428f, 0.08487406f, 0.06344498f};
2123
2124 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
2125 inputToInputWeightsDimensions, inputToInputWeightsValue,
2126 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
2127 inputToCellWeightsDimensions, inputToCellWeightsValue,
2128 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
2129 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
2130 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
2131 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
2132 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
2133 cellToInputWeightsDimensions, cellToInputWeightsValue,
2134 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
2135 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
2136 inputGateBiasDimensions, inputGateBiasValue,
2137 forgetGateBiasDimensions, forgetGateBiasValue,
2138 cellBiasDimensions, cellBiasValue,
2139 outputGateBiasDimensions, outputGateBiasValue,
2140 projectionWeightsDimensions, projectionWeightsValue,
2141 projectionBiasDimensions, projectionBiasValue,
2142 outputStateInDimensions, outputStateInValue,
2143 cellStateInDimensions, cellStateInValue,
2144 activationFunctionDimensions, activationFunctionValue,
2145 cellClippingThresholdDimensions, cellClippingThresholdValue,
2146 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
2147 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
2148 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
2149 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
2150 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
2151 scratchBufferDimensions, scratchBufferValue,
2152 outputStateOutDimensions, outputStateOutValue,
2153 cellStateOutDimensions, cellStateOutValue,
2154 outputDimensions, outputValue,
2155 compute);
2156 }
2157
2158 template <typename HalPolicy>
LstmCifgPeepholeProjectionNoClippingLayerNorm(armnn::Compute compute)2159 void LstmCifgPeepholeProjectionNoClippingLayerNorm(armnn::Compute compute)
2160 {
2161 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/layer_norm_lstm.model.cpp
2162 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/layer_norm_lstm.example.cpp
2163 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
2164
2165 uint32_t batchSize = 2;
2166 uint32_t inputSize = 5;
2167 uint32_t numUnits = 4;
2168 uint32_t outputSize = 3;
2169
2170 // Inputs:
2171 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
2172 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
2173 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
2174 std::vector<float> inputValue{ 0.7f, 0.8f, 0.1f, 0.2f, 0.3f, // batch 0
2175 0.3f, 0.2f, 0.9f, 0.8f, 0.1f}; // batch 1
2176
2177 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2178 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
2179 hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
2180 std::vector<float> inputToInputWeightsValue;
2181 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2182 // [num_units, input_size].
2183 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
2184 std::vector<float> inputToForgetWeightsValue{-0.6, -0.1, 0.3, 0.2, 0.9,
2185 -0.5, -0.2, -0.4, 0.3, -0.8,
2186 -0.4, 0.3, -0.5, -0.4, -0.6,
2187 0.3, -0.4, -0.6, -0.5, -0.5};
2188 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
2189 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
2190 std::vector<float> inputToCellWeightsValue{-0.4, -0.3, -0.2, -0.1, -0.5,
2191 0.5, -0.2, -0.3, -0.2, -0.6,
2192 0.6, -0.1, -0.4, -0.3, -0.7,
2193 0.7, -0.9, -0.5, 0.8, 0.6};
2194 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2195 // [num_units, input_size].
2196 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
2197 std::vector<float> inputToOutputWeightsValue{-0.8, -0.4, -0.2, -0.9, -0.1,
2198 -0.7, 0.3, -0.3, -0.8, -0.2,
2199 0.6, -0.2, 0.4, -0.7, -0.3,
2200 -0.5, 0.1, 0.5, -0.6, -0.4};
2201 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2202 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
2203 // “num_units”), or the second dimension of the “projection_weights”, if defined.
2204 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0};
2205 std::vector<float> recurrentToInputWeightsValue;
2206 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2207 // [num_units, output_size].
2208 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
2209 std::vector<float> recurrentToForgetWeightsValue{-0.5, -0.3, -0.5,
2210 -0.2, 0.6, 0.4,
2211 0.9, 0.3, -0.1,
2212 0.2, 0.5, 0.2};
2213 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2214 // [num_units, output_size].
2215 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
2216 std::vector<float> recurrentToCellWeightsValue{-0.3, 0.2, 0.1,
2217 -0.3, 0.8,-0.08,
2218 -0.2, 0.3, 0.8,
2219 -0.6, -0.1, 0.2};
2220 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2221 // [num_units, output_size].
2222 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
2223 std::vector<float> recurrentToOutputWeightsValue{ 0.3, -0.1, 0.1,
2224 -0.2, -0.5, -0.7,
2225 -0.2, -0.6, -0.1,
2226 -0.4, -0.7, -0.2};
2227 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2228 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
2229 std::vector<float> cellToInputWeightsValue;
2230 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2231 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
2232 std::vector<float> cellToForgetWeightsValue{-0.02, -0.15, -0.25, -0.03};
2233 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2234 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
2235 std::vector<float> cellToOutputWeightsValue{0.1, -0.1, -0.5, 0.05};
2236 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2237 hidl_vec<uint32_t> inputGateBiasDimensions{0};
2238 std::vector<float> inputGateBiasValue;
2239 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2240 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
2241 std::vector<float> forgetGateBiasValue{0.1, -0.3, -0.2, 0.1};
2242 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2243 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
2244 std::vector<float> cellBiasValue{-0.05, 0.72, 0.25, 0.08};
2245 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2246 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
2247 std::vector<float> outputGateBiasValue{0.05, -0.01, 0.2, 0.1};
2248 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2249 // [output_size, num_units].
2250 hidl_vec<uint32_t> projectionWeightsDimensions{numUnits, outputSize};
2251 std::vector<float> projectionWeightsValue{-0.1, 0.2, 0.01,
2252 -0.2, 0.1, 0.5,
2253 0.3, 0.08, 0.07,
2254 0.2, -0.4, 0.2};
2255 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
2256 hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
2257 std::vector<float> projectionBiasValue(outputSize, 0.0f);
2258 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2259 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
2260 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
2261 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2262 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
2263 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
2264
2265 // Constant scalar values (the VTS test adds these as tensors of dim {})
2266 // 20: The activation function: A value indicating the activation function:
2267 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
2268 hidl_vec<uint32_t> activationFunctionDimensions{};
2269 std::vector<int32_t> activationFunctionValue{4};
2270 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
2271 // If set to 0.0 then clipping is disabled.
2272 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
2273 std::vector<float> cellClippingThresholdValue{0.0f};
2274 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
2275 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
2276 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
2277 std::vector<float> projectionClippingThresholdValue{0.0f};
2278
2279 // Normalization:
2280 // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
2281 // Used to rescale normalized inputs to activation at input gate.
2282 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{numUnits};
2283 std::vector<float> inputLayerNormWeightsValue{0.1, 0.2, 0.3, 0.5};
2284 // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
2285 // Used to rescale normalized inputs to activation at forget gate.
2286 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{numUnits};
2287 std::vector<float> forgetLayerNormWeightsValue{0.2, 0.2, 0.4, 0.3};
2288 // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
2289 // Used to rescale normalized inputs to activation at cell gate.
2290 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{numUnits};
2291 std::vector<float> cellLayerNormWeightsValue{0.7, 0.2, 0.3, 0.8};
2292 // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
2293 // Used to rescale normalized inputs to activation at output gate.
2294 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{numUnits};
2295 std::vector<float> outputLayerNormWeightsValue{0.6, 0.2, 0.2, 0.5};
2296
2297 // Outputs:
2298 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
2299 // CIFG, or [batch_size, num_units * 3] without CIFG.
2300 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
2301 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
2302 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
2303 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
2304 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 3};
2305 std::vector<float> scratchBufferValue(batchSize * numUnits * 3, 0.0f);
2306 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2307 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
2308 std::vector<float> outputStateOutValue { 0.02129706f, 0.14081624f, 0.01127331f,
2309 -0.02263505f, 0.09169482f, 0.07691758f};
2310 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2311 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
2312 std::vector<float> cellStateOutValue{-0.35102980f, 0.42610350f, 0.21463650f, 0.27716520f,
2313 -0.18855170f, 0.32522000f, 0.02036650f, 0.48967660f};
2314 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
2315 // effectively the same as the current “output state (out)” value.
2316 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
2317 std::vector<float> outputValue{ 0.02129706f, 0.14081624f, 0.01127331f,
2318 -0.02263505f, 0.09169482f, 0.07691758f};
2319
2320 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
2321 inputToInputWeightsDimensions, inputToInputWeightsValue,
2322 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
2323 inputToCellWeightsDimensions, inputToCellWeightsValue,
2324 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
2325 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
2326 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
2327 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
2328 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
2329 cellToInputWeightsDimensions, cellToInputWeightsValue,
2330 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
2331 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
2332 inputGateBiasDimensions, inputGateBiasValue,
2333 forgetGateBiasDimensions, forgetGateBiasValue,
2334 cellBiasDimensions, cellBiasValue,
2335 outputGateBiasDimensions, outputGateBiasValue,
2336 projectionWeightsDimensions, projectionWeightsValue,
2337 projectionBiasDimensions, projectionBiasValue,
2338 outputStateInDimensions, outputStateInValue,
2339 cellStateInDimensions, cellStateInValue,
2340 activationFunctionDimensions, activationFunctionValue,
2341 cellClippingThresholdDimensions, cellClippingThresholdValue,
2342 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
2343 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
2344 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
2345 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
2346 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
2347 scratchBufferDimensions, scratchBufferValue,
2348 outputStateOutDimensions, outputStateOutValue,
2349 cellStateOutDimensions, cellStateOutValue,
2350 outputDimensions, outputValue,
2351 compute);
2352 }
2353
2354 template <typename HalPolicy>
QuantizedLstm(armnn::Compute compute)2355 void QuantizedLstm(armnn::Compute compute)
2356 {
2357 armnn::IgnoreUnused(compute);
2358 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/quantized_lstm.model.cpp
2359 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/quantized_lstm.example.cpp
2360 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
2361
2362 uint32_t batchSize = 2;
2363 uint32_t inputSize = 2;
2364 uint32_t outputSize = 4;
2365
2366 // Inputs:
2367 // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
2368 // specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
2369 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
2370 std::vector<uint8_t> inputValue{166, 179, 50, 150};
2371
2372 // 1: The input-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2373 // [outputSize, inputSize] specifying input-to-input part of weights for fully-connected layer inside the
2374 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
2375 hidl_vec<uint32_t> inputToInputWeightsDimensions{outputSize, inputSize};
2376 std::vector<uint8_t> inputToInputWeightsValue{146, 250, 235, 171, 10, 218, 171, 108};
2377 // 2: The input-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2378 // [outputSize, inputSize] specifying input-to-forget part of weights for fully-connected layer inside the
2379 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
2380 hidl_vec<uint32_t> inputToForgetWeightsDimensions{outputSize, inputSize};
2381 std::vector<uint8_t> inputToForgetWeightsValue{24, 50, 132, 179, 158, 110, 3, 169};
2382 // 3: The input-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2383 // [outputSize, inputSize] specifying input-to-cell part of weights for fully-connected layer inside the
2384 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
2385 hidl_vec<uint32_t> inputToCellWeightsDimensions{outputSize, inputSize};
2386 std::vector<uint8_t> inputToCellWeightsValue{133, 34, 29, 49, 206, 109, 54, 183};
2387 // 4: The input-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2388 // [outputSize, inputSize] specifying input-to-output part of weights for fully-connected layer inside the
2389 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
2390 hidl_vec<uint32_t> inputToOutputWeightsDimensions{outputSize, inputSize};
2391 std::vector<uint8_t> inputToOutputWeightsValue{195, 187, 11, 99, 109, 10, 218, 48};
2392 // 5: The recurrent-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2393 // [outputSize, outputSize] specifying recurrent-to-input part of weights for fully-connected layer inside
2394 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
2395 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{outputSize, outputSize};
2396 std::vector<uint8_t> recurrentToInputWeightsValue{254, 206, 77, 168, 71, 20, 215, 6,
2397 223, 7, 118, 225, 59, 130, 174, 26};
2398 // 6: The recurrent-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2399 // [outputSize, outputSize] specifying recurrent-to-forget part of weights for fully-connected layer inside
2400 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
2401 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{outputSize, outputSize};
2402 std::vector<uint8_t> recurrentToForgetWeightsValue{137, 240, 103, 52, 68, 51, 237, 112,
2403 0, 220, 89, 23, 69, 4, 207, 253};
2404 // 7: The recurrent-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2405 // [outputSize, outputSize] specifying recurrent-to-cell part of weights for fully-connected layer inside
2406 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
2407 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{outputSize, outputSize};
2408 std::vector<uint8_t> recurrentToCellWeightsValue{172, 60, 205, 65, 14, 0, 140, 168,
2409 240, 223, 133, 56, 142, 64, 246, 216};
2410 // 8: The recurrent-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2411 // [outputSize, outputSize] specifying recurrent-to-output part of weights for fully-connected layer inside
2412 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
2413 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{outputSize, outputSize};
2414 std::vector<uint8_t> recurrentToOutputWeightsValue{106, 214, 67, 23, 59, 158, 45, 3,
2415 119, 132, 49, 205, 129, 218, 11, 98};
2416 // 9: The input gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the
2417 // bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
2418 // of input and weights scales and zeroPoint equal to 0.
2419 hidl_vec<uint32_t> inputGateBiasDimensions{outputSize};
2420 std::vector<int32_t> inputGateBiasValue{-7876, 13488, -726, 32839};
2421 // 10: The forget gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
2422 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
2423 // of input and weights scales and zeroPoint equal to 0.
2424 hidl_vec<uint32_t> forgetGateBiasDimensions{outputSize};
2425 std::vector<int32_t> forgetGateBiasValue{9206, -46884, -11693, -38724};
2426 // 11:The cell bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the bias
2427 // for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product of input
2428 // and weights scales and zeroPoint equal to 0.
2429 hidl_vec<uint32_t> cellBiasDimensions{outputSize};
2430 std::vector<int32_t> cellBiasValue{39481, 48624, 48976, -21419};
2431 // 12:The output gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
2432 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
2433 // of input and weights scales and zeroPoint equal to 0.
2434 hidl_vec<uint32_t> outputGateBiasDimensions{outputSize};
2435 std::vector<int32_t> outputGateBiasValue{-58999, -17050, -41852, -40538};
2436
2437 //13: The previous cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape
2438 // [numBatches, outputSize] specifying the cell state from the previous time step of the LSTM cell.
2439 // It is quantized using a quantization range of -2^4, 2^4 * 32767/32768.
2440 hidl_vec<uint32_t> previousCellStateInDimensions{batchSize, outputSize};
2441 std::vector<int16_t> previousCellStateInValue{876, 1034, 955, -909, 761, 1029, 796, -1036};
2442 // 14: The previous output state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2443 // [numBathes, outputSize] specifying the output of the LSTM cell from previous time-step. Tensor
2444 // is quantized with a fixed quantization range of -1, 127/128.
2445 hidl_vec<uint32_t> previousOutputInDimensions{batchSize, outputSize};
2446 std::vector<uint8_t> previousOutputInValue{136, 150, 140, 115, 135, 152, 138, 112};
2447
2448 // 0: The cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape [numBatches, outputSize]
2449 // which contains a cell state from the current time step. Tensor is quantized using a quantization range
2450 // of -2^4, 2^4 * 32767/32768.
2451 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, outputSize};
2452 std::vector<int16_t> cellStateOutValue {1485, 1177, 1373, -1023, 1019, 1355, 1097, -1235};
2453 // 1: The output: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBathes, outputSize] which
2454 // contains the output value. Tensor is quantized with a fixed quantization range of -1, 127/128.
2455 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
2456 std::vector<uint8_t> outputValue {140, 151, 146, 112, 136, 156, 142, 112};
2457
2458
2459 QuantizedLstmTestImpl<HalPolicy>(inputDimensions, inputValue,
2460 inputToInputWeightsDimensions, inputToInputWeightsValue,
2461 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
2462 inputToCellWeightsDimensions, inputToCellWeightsValue,
2463 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
2464 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
2465 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
2466 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
2467 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
2468 inputGateBiasDimensions, inputGateBiasValue,
2469 forgetGateBiasDimensions, forgetGateBiasValue,
2470 cellBiasDimensions, cellBiasValue,
2471 outputGateBiasDimensions, outputGateBiasValue,
2472 previousOutputInDimensions, previousOutputInValue,
2473 previousCellStateInDimensions, previousCellStateInValue,
2474 cellStateOutDimensions, cellStateOutValue,
2475 outputDimensions, outputValue);
2476 }
2477