Home
last modified time | relevance | path

Searched refs:n_input (Results 1 – 25 of 37) sorted by relevance

12

/external/tensorflow/tensorflow/lite/kernels/
Dbidirectional_sequence_lstm_test.cc32 BidirectionalLSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, in BidirectionalLSTMOpModel() argument
41 n_input_(n_input), in BidirectionalLSTMOpModel()
431 const int n_input = 2; in TEST_P() local
441 n_batch, n_input, n_cell, n_output, sequence_length, /*use_cifg=*/false, in TEST_P()
447 {sequence_length, n_batch, n_input}, // input tensor in TEST_P()
450 {n_cell, n_input}, // input_to_input_weight tensor in TEST_P()
451 {n_cell, n_input}, // input_to_forget_weight tensor in TEST_P()
452 {n_cell, n_input}, // input_to_cell_weight tensor in TEST_P()
453 {n_cell, n_input}, // input_to_output_weight tensor in TEST_P()
473 {n_cell, n_input}, // input_to_input_weight tensor in TEST_P()
[all …]
Dunidirectional_sequence_lstm_test.cc32 UnidirectionalLSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, in UnidirectionalLSTMOpModel() argument
42 n_input_(n_input), in UnidirectionalLSTMOpModel()
288 int n_batch, int n_input, int n_cell, int n_output, int sequence_length, in HybridUnidirectionalLSTMOpModel() argument
294 n_batch, n_input, n_cell, n_output, sequence_length, time_major, in HybridUnidirectionalLSTMOpModel()
501 const int n_input = 2; in TEST_F() local
508 n_batch, n_input, n_cell, n_output, sequence_length, in TEST_F()
514 {sequence_length, n_batch, n_input}, // input tensor in TEST_F()
516 {n_cell, n_input}, // input_to_input_weight tensor in TEST_F()
517 {n_cell, n_input}, // input_to_forget_weight tensor in TEST_F()
518 {n_cell, n_input}, // input_to_cell_weight tensor in TEST_F()
[all …]
Doptional_tensor_test.cc31 LSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, bool use_cifg, in LSTMOpModel() argument
36 n_input_(n_input), in LSTMOpModel()
230 const int n_input = 2; in TEST() local
235 LSTMOpModel lstm(n_batch, n_input, n_cell, n_output, in TEST()
241 {n_batch, n_input}, // input tensor in TEST()
244 {n_cell, n_input}, // input_to_forget_weight tensor in TEST()
245 {n_cell, n_input}, // input_to_cell_weight tensor in TEST()
246 {n_cell, n_input}, // input_to_output_weight tensor in TEST()
Dlstm_eval.cc49 int n_input, int n_aux_input, int n_output, in ComputeRowSums() argument
67 input_to_input_row_sums, n_cell, n_input); in ComputeRowSums()
70 input_to_forget_row_sums, n_cell, n_input); in ComputeRowSums()
72 input_to_cell_row_sums, n_cell, n_input); in ComputeRowSums()
74 input_to_output_row_sums, n_cell, n_input); in ComputeRowSums()
156 const int n_batch, const int n_input, const int n_aux_input, in CalculateLstmGateFloat() argument
174 input_to_gate_weights, n_cell, n_input, input, n_batch, gate); in CalculateLstmGateFloat()
321 const int n_batch, const int n_input, const int n_aux_input, in CalculateLstmGateHybrid() argument
354 input_to_gate_weights, input_to_gate_weights_ledger, n_cell, n_input, in CalculateLstmGateHybrid()
359 input_to_gate_weights, n_cell, n_input, input, in CalculateLstmGateHybrid()
[all …]
Dlstm_test.cc39 LSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, bool use_cifg, in LSTMOpModel() argument
44 : n_input_(n_input), in LSTMOpModel()
48 input_ = AddInput({TensorType_FLOAT32, {n_batch, n_input}}); in LSTMOpModel()
53 input_to_input_weights_ = AddInput({weight_type, {n_cell, n_input}}); in LSTMOpModel()
55 input_to_forget_weights_ = AddInput({weight_type, {n_cell, n_input}}); in LSTMOpModel()
56 input_to_cell_weights_ = AddInput({weight_type, {n_cell, n_input}}); in LSTMOpModel()
57 input_to_output_weights_ = AddInput({weight_type, {n_cell, n_input}}); in LSTMOpModel()
414 const int n_input = 2; in TEST_P() local
475 LSTMOpModel lstm(n_batch, n_input, n_cell, n_output, in TEST_P()
491 const int n_input = 2; in TEST_P() local
[all …]
/external/tensorflow/tensorflow/lite/kernels/internal/reference/
Dportable_tensor_utils.h126 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in MatrixBatchVectorMultiplyAccumulate() argument
129 input, bias, input_to_gate_weights, multiplier, shift, n_batch, n_input, in MatrixBatchVectorMultiplyAccumulate()
136 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in MatrixBatchVectorMultiplyAccumulate() argument
139 input, bias, input_to_gate_weights, multiplier, shift, n_batch, n_input, in MatrixBatchVectorMultiplyAccumulate()
153 int32_t n_batch, int32_t n_input, int32_t n_cell, in MatrixBatchVectorMultiply() argument
158 n_input, n_cell, gate_output, gate_output_zp); in MatrixBatchVectorMultiply()
177 int n_batch, int n_input, int16_t* output) { in ApplyLayerNorm() argument
179 layer_norm_scale_b, variance_limit, n_batch, n_input, in ApplyLayerNorm()
186 const int32_t* bias, int n_batch, int n_input, in ApplyLayerNormFloat() argument
189 layer_norm_scale_b, bias, n_batch, n_input, in ApplyLayerNormFloat()
[all …]
Dportable_tensor_utils.cc296 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in PortableMatrixBatchVectorMultiplyAccumulateImpl() argument
303 for (int col = 0; col < n_input; ++col) { in PortableMatrixBatchVectorMultiplyAccumulateImpl()
304 int8_t input_val = input[batch * n_input + col]; in PortableMatrixBatchVectorMultiplyAccumulateImpl()
305 int8_t weights_val = input_to_gate_weights[row * n_input + col]; in PortableMatrixBatchVectorMultiplyAccumulateImpl()
325 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in PortableMatrixBatchVectorMultiplyAccumulate() argument
328 input, bias, input_to_gate_weights, multiplier, shift, n_batch, n_input, in PortableMatrixBatchVectorMultiplyAccumulate()
335 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in PortableMatrixBatchVectorMultiplyAccumulate() argument
338 input, bias, input_to_gate_weights, multiplier, shift, n_batch, n_input, in PortableMatrixBatchVectorMultiplyAccumulate()
347 int32_t n_batch, int32_t n_input, in PortableMatrixBatchVectorMultiply() argument
355 for (int col = 0; col < n_input; ++col) { in PortableMatrixBatchVectorMultiply()
[all …]
Dportable_tensor_utils_impl.h111 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
117 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
125 int32_t n_batch, int32_t n_input,
143 int n_batch, int n_input, int16_t* output);
149 const int32_t* bias, int n_batch, int n_input,
153 int32_t n_input, int16_t* output);
156 int32_t n_input, int16_t* output);
159 int32_t n_batch, int32_t n_input, int16_t* output);
162 int32_t n_input, int32_t integer_bits,
166 int n_batch, int n_input, int shift, int16_t* output);
[all …]
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/
Dsse_tensor_utils.h102 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in MatrixBatchVectorMultiplyAccumulate() argument
106 shift, n_batch, n_input, n_output, output_zp, scratch, output, context); in MatrixBatchVectorMultiplyAccumulate()
112 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in MatrixBatchVectorMultiplyAccumulate() argument
116 shift, n_batch, n_input, n_output, output_zp, scratch, output, context); in MatrixBatchVectorMultiplyAccumulate()
123 int32_t n_batch, int32_t n_input, int32_t n_cell, in MatrixBatchVectorMultiply() argument
128 n_input, n_cell, gate_output, gate_output_zp); in MatrixBatchVectorMultiply()
153 int n_batch, int n_input, int16_t* output) { in ApplyLayerNorm() argument
155 layer_norm_scale_b, variance_limit, n_batch, n_input, in ApplyLayerNorm()
162 const int32_t* bias, int n_batch, int n_input, in ApplyLayerNormFloat() argument
165 layer_norm_scale_b, bias, n_batch, n_input, in ApplyLayerNormFloat()
[all …]
Dneon_tensor_utils.h93 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in MatrixBatchVectorMultiplyAccumulate() argument
96 input_to_gate_weights, multiplier, shift, n_batch, n_input, in MatrixBatchVectorMultiplyAccumulate()
103 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in MatrixBatchVectorMultiplyAccumulate() argument
106 input_to_gate_weights, multiplier, shift, n_batch, n_input, in MatrixBatchVectorMultiplyAccumulate()
114 int32_t n_batch, int32_t n_input, int32_t n_cell, in MatrixBatchVectorMultiply() argument
119 n_input, n_cell, gate_output, gate_output_zp); in MatrixBatchVectorMultiply()
145 int n_batch, int n_input, int16_t* output) { in ApplyLayerNorm() argument
148 n_batch, n_input, output); in ApplyLayerNorm()
154 const int32_t* bias, int n_batch, int n_input, in ApplyLayerNormFloat() argument
157 layer_norm_scale_b, bias, n_batch, n_input, in ApplyLayerNormFloat()
[all …]
Dneon_tensor_utils_impl.h65 int n_batch, int n_input, int16_t* output);
67 void NeonApplySigmoid(const int16_t* input, int32_t n_batch, int32_t n_input,
71 int32_t n_input, int16_t* output);
74 int n_input, int shift, int16_t* output);
77 int32_t multiplier, int shift, int n_batch, int n_input,
81 int n_input, int16_t* output);
93 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
99 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
Dneon_tensor_utils.cc743 int32_t n_batch, int32_t n_input, in NeonMatrixBatchVectorMultiplyImpl() argument
757 if ((n_input & (kNeonVectorAlignment - 1)) != 0) { in NeonMatrixBatchVectorMultiplyImpl()
760 (int8_t*)aligned_alloc(kNeonVectorAlignment, n_input, // NOLINT in NeonMatrixBatchVectorMultiplyImpl()
765 (int8_t*)aligned_alloc(kNeonVectorAlignment, n_input, // NOLINT in NeonMatrixBatchVectorMultiplyImpl()
774 RoundDownVectors<kInt8ValuesPerNeonVector>(n_input); in NeonMatrixBatchVectorMultiplyImpl()
776 RoundDownVectors<(kInt8ValuesPerNeonVector / 2)>(n_input); in NeonMatrixBatchVectorMultiplyImpl()
780 memcpy(aligned_vec, input + batch * n_input, sizeof(int8_t) * n_input); in NeonMatrixBatchVectorMultiplyImpl()
785 (int8_t*)input_to_gate_weights + row * n_input; // NOLINT in NeonMatrixBatchVectorMultiplyImpl()
787 memcpy(aligned_row, row_ptr, sizeof(int8_t) * n_input); in NeonMatrixBatchVectorMultiplyImpl()
837 for (; TFLITE_UNLIKELY(col < n_input); ++col) { in NeonMatrixBatchVectorMultiplyImpl()
[all …]
/external/tensorflow/tensorflow/lite/delegates/gpu/cl/kernels/
Dlstm_full_test.cc35 LSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, bool use_cifg, in LSTMOpModel() argument
61 : n_input_(n_input), in LSTMOpModel()
65 input_ = AddInput({TensorType_FLOAT32, {n_batch, n_input}}); in LSTMOpModel()
70 AddConstInput({weight_type, {n_cell, n_input}}, input_to_input_weights); in LSTMOpModel()
72 AddConstInput({weight_type, {n_cell, n_input}}, input_to_forget_weights); in LSTMOpModel()
73 AddConstInput({weight_type, {n_cell, n_input}}, input_to_cell_weights); in LSTMOpModel()
74 AddConstInput({weight_type, {n_cell, n_input}}, input_to_output_weights); in LSTMOpModel()
266 const int n_input = 2; in TEST_P() local
319 n_batch, n_input, n_cell, n_output, in TEST_P()
340 const int n_input = 2; in TEST_P() local
[all …]
/external/tensorflow/tensorflow/lite/experimental/kernels/
Dunidirectional_sequence_gru_test.cc34 explicit GRUOpModel(int n_batch, int n_input, int n_output, in GRUOpModel() argument
37 : n_batch_(n_batch), n_input_(n_input), n_output_(n_output) { in GRUOpModel()
102 const int n_input = 2; in TEST() local
105 GRUOpModel m(n_batch, n_input, n_output, in TEST()
106 {{n_time, n_batch, n_input}, in TEST()
108 {2 * n_output, n_input + n_output}, in TEST()
110 {n_output, n_input + n_output}, in TEST()
Dunidirectional_sequence_gru.cc39 const int n_input = input->dims->data[2]; in GruImpl() local
41 const int n_batch_input = n_batch * n_input; in GruImpl()
43 const RuntimeShape input_shape({n_batch, n_input}); in GruImpl()
135 const int n_input = input->dims->data[2]; in Prepare() local
151 TF_LITE_ENSURE_EQ(context, gate_weight->dims->data[1], n_input + n_output); in Prepare()
167 n_input + n_output); in Prepare()
218 concat_size->data[1] = n_input + n_output; in Prepare()
Dgru_cell.cc46 const int n_input = input_shape.Dims(1); in GruCell() local
76 auto hr = xh.block(n_input, 0, n_output, n_batch); in GruCell()
/external/tensorflow/tensorflow/lite/kernels/internal/
Dtensor_utils_common.h163 int32_t n_batch, int32_t n_input, int32_t n_cell,
206 int n_batch, int n_input, int16_t* output);
212 const int32_t* bias, int n_batch, int n_input,
222 void ApplySigmoid(const int16_t* input, int32_t n_batch, int32_t n_input,
226 void ApplySigmoidFloat(const int16_t* input, int32_t n_batch, int32_t n_input,
239 int32_t n_input, int16_t* output);
244 void ApplyTanhFloat(const int16_t* input, int32_t n_batch, int32_t n_input,
257 int n_input, int shift, int16_t* output);
269 int n_input, int shift, int8_t* output);
286 int32_t n_input, int32_t output_zp, int8_t* output);
[all …]
/external/tensorflow/tensorflow/lite/experimental/microfrontend/
Daudio_microfrontend_test.cc38 MicroFrontendOpModel(int n_input, int n_frame, int n_frequency_per_frame, in MicroFrontendOpModel() argument
42 : n_input_(n_input), in MicroFrontendOpModel()
179 const int n_input = 36; in TEST_F() local
183 MicroFrontendOpModel micro_frontend(n_input, n_frame, n_frequency_per_frame, in TEST_F()
186 {n_input}, in TEST_F()
/external/tensorflow/tensorflow/lite/tools/optimize/calibration/builtin_logging_ops/
Dlstm.cc46 const int n_batch, const int n_input, const int n_aux_input, in CalculateLstmGateFloat() argument
66 input_to_gate_weights, n_cell, n_input, input, n_batch, gate); in CalculateLstmGateFloat()
180 const TfLiteLSTMParams* params, int n_batch, int n_cell, int n_input, in LstmStepCalibration() argument
199 tensor_utils::IsZeroVector(input_ptr, n_batch * n_input); in LstmStepCalibration()
210 input_gate_bias_ptr, n_batch, n_input, n_aux_input, n_output, n_cell, in LstmStepCalibration()
221 forget_gate_bias_ptr, n_batch, n_input, n_aux_input, n_output, n_cell, in LstmStepCalibration()
231 n_batch, n_input, n_aux_input, n_output, n_cell, in LstmStepCalibration()
245 output_gate_bias_ptr, n_batch, n_input, n_aux_input, n_output, n_cell, in LstmStepCalibration()
301 const int n_input = input->dims->data[input->dims->size - 1]; in EvalCalibration() local
334 const int input_step = n_batch * n_input; in EvalCalibration()
[all …]
/external/tensorflow/tensorflow/lite/tools/optimize/calibration/custom_logging_ops/
Dlstm.cc63 const TfLiteLSTMParams* params, int n_batch, int n_cell, int n_input, in LstmStepWithAuxInput() argument
101 input_to_input_weights_ptr, n_cell, n_input, input_ptr, n_batch, in LstmStepWithAuxInput()
106 input_to_forget_weights_ptr, n_cell, n_input, input_ptr, n_batch, in LstmStepWithAuxInput()
109 n_cell, n_input, input_ptr, in LstmStepWithAuxInput()
112 input_to_output_weights_ptr, n_cell, n_input, input_ptr, n_batch, in LstmStepWithAuxInput()
370 const int n_input = input->dims->data[input->dims->size - 1]; in EvalFloat() local
403 const int input_step = n_batch * n_input; in EvalFloat()
443 n_input, aux_input_size, n_output, output_batch_leading_dim, in EvalFloat()
451 const int input_step = n_input; in EvalFloat()
505 n_cell, n_input, aux_input_size, n_output, output_batch_leading_dim, in EvalFloat()
/external/tensorflow/tensorflow/lite/delegates/nnapi/
Dnnapi_delegate_test.cc3092 LSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, bool use_cifg, in LSTMOpModel() argument
3098 n_input_(n_input), in LSTMOpModel()
3456 const int n_input = 2; in TEST_F() local
3461 LSTMOpModel lstm(n_batch, n_input, n_cell, n_output, in TEST_F()
3467 {n_batch, n_input}, // input tensor in TEST_F()
3469 {n_cell, n_input}, // input_to_input_weight tensor in TEST_F()
3470 {n_cell, n_input}, // input_to_forget_weight tensor in TEST_F()
3471 {n_cell, n_input}, // input_to_cell_weight tensor in TEST_F()
3472 {n_cell, n_input}, // input_to_output_weight tensor in TEST_F()
3520 const int n_input = 2; in TEST_F() local
[all …]
/external/tensorflow/tensorflow/lite/micro/kernels/xtensa/
Dsvdf.cc64 const int n_input = input_tensor->dims->data[1]; in EvalIntegerSvdfHifimini() local
112 const int8_t* input_batch_ptr = input + b * n_input; in EvalIntegerSvdfHifimini()
115 int num_iters = n_input / 2; in EvalIntegerSvdfHifimini()
259 const int n_input = input_tensor->dims->data[1]; in EvalIntegerSvdfHifi4() local
287 weight_feature, &input[b * n_input], NULL, n_filter, in EvalIntegerSvdfHifi4()
288 n_input, n_input, n_memory, -data.input_zero_point, in EvalIntegerSvdfHifi4()
/external/tensorflow/tensorflow/lite/micro/kernels/
Dsvdf_common.cc43 const int n_input = input_tensor->dims->data[1]; in EvalIntegerSvdfReference() local
86 const int8_t* vector_in_batch = input + b * n_input; in EvalIntegerSvdfReference()
87 for (int c = 0; c < n_input; c++) { in EvalIntegerSvdfReference()
/external/tensorflow/tensorflow/lite/experimental/examples/lstm/
Dbidirectional_sequence_rnn_test.py46 self.n_input = 28
105 "float", [batch_size, self.time_steps, self.n_input],
175 self.n_input))
237 sample_input = np.reshape(b1, (1, self.time_steps, self.n_input))
Dunidirectional_sequence_lstm_test.py48 self.n_input = 28
95 "float", [None, self.time_steps, self.n_input], name="INPUT_IMAGE")
144 self.n_input))
194 sample_input = np.reshape(b1, (1, self.time_steps, self.n_input))

12