/external/tensorflow/tensorflow/lite/kernels/ |
D | basic_rnn.cc | 183 TfLiteTensor* row_sums; in Prepare() local 185 GetTemporarySafe(context, node, /*index=*/5, &row_sums)); in Prepare() 186 row_sums->type = kTfLiteInt32; in Prepare() 187 row_sums->allocation_type = kTfLiteArenaRwPersistent; in Prepare() 189 if (!TfLiteIntArrayEqualsArray(row_sums->dims, 2, row_sums_dims)) { in Prepare() 194 context, context->ResizeTensor(context, row_sums, row_sums_size)); in Prepare() 237 TfLiteTensor* row_sums, bool* compute_row_sums) { in EvalHybrid() argument 267 row_sums_ptr = GetTensorData<int32_t>(row_sums); in EvalHybrid() 324 TfLiteTensor* row_sums; in Eval() local 325 TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 5, &row_sums)); in Eval() [all …]
|
D | unidirectional_sequence_rnn.cc | 190 TfLiteTensor* row_sums; in Prepare() local 192 GetTemporarySafe(context, node, /*index=*/5, &row_sums)); in Prepare() 193 row_sums->type = kTfLiteInt32; in Prepare() 194 row_sums->allocation_type = kTfLiteArenaRwPersistent; in Prepare() 196 if (!TfLiteIntArrayEqualsArray(row_sums->dims, 2, row_sums_dims)) { in Prepare() 201 context, context->ResizeTensor(context, row_sums, row_sums_size)); in Prepare() 274 TfLiteTensor* accum_scratch, TfLiteTensor* row_sums, in EvalHybrid() argument 306 row_sums_ptr = GetTensorData<int32_t>(row_sums); in EvalHybrid() 400 TfLiteTensor* row_sums; in Eval() local 401 TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 5, &row_sums)); in Eval() [all …]
|
D | svdf.cc | 230 TfLiteTensor* row_sums; in Prepare() local 232 GetTemporarySafe(context, node, /*index=*/5, &row_sums)); in Prepare() 233 row_sums->type = kTfLiteFloat32; in Prepare() 234 row_sums->allocation_type = kTfLiteArenaRwPersistent; in Prepare() 236 if (!TfLiteIntArrayEqualsArray(row_sums->dims, 1, row_sums_dims)) { in Prepare() 240 context, context->ResizeTensor(context, row_sums, row_sums_size)); in Prepare() 333 TfLiteTensor* row_sums; in Eval() local 335 context, GetTemporarySafe(context, node, /*index=*/5, &row_sums)); in Eval() 356 if (params->asymmetric_quantize_inputs && row_sums != nullptr) { in Eval() 358 row_sums_ptr = GetTensorData<int32_t>(row_sums); in Eval()
|
D | fully_connected.cc | 326 TfLiteTensor* row_sums; in PrepareImpl() local 328 GetTemporarySafe(context, node, /*index=*/4, &row_sums)); in PrepareImpl() 329 row_sums->type = kTfLiteInt32; in PrepareImpl() 330 row_sums->allocation_type = kTfLiteArenaRwPersistent; in PrepareImpl() 332 if (!TfLiteIntArrayEqualsArray(row_sums->dims, 1, row_sums_dims)) { in PrepareImpl() 336 context, context->ResizeTensor(context, row_sums, row_sums_size)); in PrepareImpl() 440 TfLiteTensor* row_sums, TfLiteTensor* input_offsets, in EvalHybridImpl() argument 488 row_sums_ptr = GetTensorData<int32_t>(row_sums); in EvalHybridImpl() 533 TfLiteTensor* accum_scratch, TfLiteTensor* row_sums, in HybridFullyConnectedTask() 547 row_sums(row_sums), in HybridFullyConnectedTask() [all …]
|
D | batch_matmul.cc | 274 TfLiteTensor* row_sums; in InitializeTemporaries() local 276 GetTemporarySafe(context, node, /*index=*/6, &row_sums)); in InitializeTemporaries() 277 row_sums->type = kTfLiteInt32; in InitializeTemporaries() 278 row_sums->allocation_type = kTfLiteArenaRwPersistent; in InitializeTemporaries() 280 if (!TfLiteIntArrayEqualsArray(row_sums->dims, 1, row_sums_dims)) { in InitializeTemporaries() 284 context, context->ResizeTensor(context, row_sums, row_sums_size)); in InitializeTemporaries() 448 TfLiteTensor* accum_scratch, TfLiteTensor* row_sums, in EvalHybrid() argument 468 row_sums_ptr = GetTensorData<int32_t>(row_sums); in EvalHybrid() 588 TfLiteTensor* row_sums; in EvalQuantized() local 590 GetTemporarySafe(context, node, /*index=*/6, &row_sums)); in EvalQuantized() [all …]
|
D | conv.cc | 579 TfLiteTensor* row_sums; in Prepare() local 582 GetTemporarySafe(context, node, data->row_sums_index, &row_sums)); in Prepare() 583 row_sums->type = kTfLiteInt32; in Prepare() 584 row_sums->allocation_type = kTfLiteArenaRwPersistent; in Prepare() 587 if (!TfLiteIntArrayEqualsArray(row_sums->dims, 1, row_sums_dims)) { in Prepare() 591 context, context->ResizeTensor(context, row_sums, row_sums_size)); in Prepare() 891 TfLiteTensor* row_sums; in EvalHybridPerChannel() local 894 GetTemporarySafe(context, node, data->row_sums_index, &row_sums)); in EvalHybridPerChannel() 906 GetTensorData<int32>(scratch), GetTensorData<int32_t>(row_sums), in EvalHybridPerChannel()
|
D | unidirectional_sequence_lstm.cc | 1138 TfLiteTensor* row_sums; in Prepare() local 1140 GetTemporarySafe(context, node, kRowSums, &row_sums)); in Prepare() 1141 row_sums->type = kTfLiteInt32; in Prepare() 1142 row_sums->allocation_type = kTfLiteArenaRwPersistent; in Prepare() 1150 if (!TfLiteIntArrayEqualsArray(row_sums->dims, 2, row_sums_dims)) { in Prepare() 1155 context, context->ResizeTensor(context, row_sums, row_sums_size)); in Prepare() 1350 TfLiteTensor* row_sums; in Eval() local 1352 GetTemporarySafe(context, node, kRowSums, &row_sums)); in Eval() 1353 const int row_sums_size = row_sums->dims->data[0]; in Eval() 1394 GetTemporary(context, node, kOutputStateZeroPoints), row_sums, in Eval()
|
D | lstm.cc | 1608 TfLiteTensor* row_sums; in Prepare() local 1610 GetTemporarySafe(context, node, kRowSums, &row_sums)); in Prepare() 1611 row_sums->type = kTfLiteInt32; in Prepare() 1612 row_sums->allocation_type = kTfLiteArenaRwPersistent; in Prepare() 1614 if (!TfLiteIntArrayEqualsArray(row_sums->dims, 2, row_sums_dims)) { in Prepare() 1619 context, context->ResizeTensor(context, row_sums, row_sums_size)); in Prepare() 1936 TfLiteTensor* row_sums; in Eval() local 1938 GetTemporarySafe(context, node, kRowSums, &row_sums)); in Eval() 1939 const int row_sums_size = row_sums->dims->data[0]; in Eval() 2028 GetTemporary(context, node, kOutputStateZeroPoints), row_sums, in Eval() [all …]
|
D | lstm_eval.h | 169 TfLiteTensor* output_state_zp, TfLiteTensor* row_sums, int row_sums_size,
|
/external/tensorflow/tensorflow/lite/kernels/internal/ |
D | kernel_utils.h | 75 int32_t* accum_scratch, int32_t* row_sums, bool* compute_row_sums); 89 int32_t* accum_scratch, int32_t* row_sums, bool* compute_row_sums);
|
D | kernel_utils.cc | 128 int32_t* accum_scratch, int32_t* row_sums, bool* compute_row_sums) { in RnnBatchStep() argument 139 asymmetric_quantize_inputs, zero_points, accum_scratch, row_sums, in RnnBatchStep() 155 int32_t* accum_scratch, int32_t* row_sums, bool* compute_row_sums) { in RnnBatchStep() argument 163 input_row_sums = row_sums; in RnnBatchStep() 164 aux_input_row_sums = row_sums; in RnnBatchStep()
|
D | tensor_utils.h | 54 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, 65 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, in MatrixBatchVectorMultiplyAccumulate() argument 75 row_sums, compute_row_sums, context); in MatrixBatchVectorMultiplyAccumulate()
|
D | tensor_utils_test.cc | 457 int32_t* row_sums = scratch.data() + 8 * 4; in TEST() local 462 input_offsets.data(), scratch.data(), row_sums, &compute_row_sums, in TEST() 478 input_offsets.data(), scratch.data(), row_sums, &compute_row_sums, in TEST() 505 input_offsets_big_batch.data(), scratch_big_batch.data(), row_sums, in TEST() 1166 std::vector<int32_t> row_sums(rows); in TestPerChannelDotprodMatrixBatchVectorMultiply() local 1173 row_sums.data(), &compute_row_sums, &context); in TestPerChannelDotprodMatrixBatchVectorMultiply()
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | sse_tensor_utils.cc | 101 const int32_t* input_offset, const int32_t* row_sums) { in SseMatrixBatchVectorMultiplyAccumulateImpl() argument 113 row_sums && batch_offset ? batch_offset * row_sums[row] : 0; in SseMatrixBatchVectorMultiplyAccumulateImpl() 274 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, in SseMatrixBatchVectorMultiplyAccumulate() argument 277 SseReductionSumVector(matrix, row_sums, m_rows, m_cols); in SseMatrixBatchVectorMultiplyAccumulate() 284 per_channel_scale, input_offset, row_sums); in SseMatrixBatchVectorMultiplyAccumulate()
|
D | sse_tensor_utils_impl.h | 52 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums,
|
D | neon_tensor_utils.cc | 436 const int32_t* input_offset, int32_t* row_sums) { in DotprodMatrixBatchFourVectorMultiplyAccumulate() argument 443 int32_t* row_sums_ptr = row_sums ? row_sums + row : nullptr; in DotprodMatrixBatchFourVectorMultiplyAccumulate() 598 const int32_t* input_offset, int32_t* row_sums) { in DotprodMatrixBatchPaddedFourVectorMultiplyAccumulate() argument 650 row_sums); in DotprodMatrixBatchPaddedFourVectorMultiplyAccumulate() 1247 const int32_t* input_offset, int32_t* row_sums) { in NeonMatrixBatchVectorMultiplyAccumulateImpl() argument 1254 per_channel_scale, input_offset, row_sums); in NeonMatrixBatchVectorMultiplyAccumulateImpl() 1259 per_channel_scale, input_offset, row_sums); in NeonMatrixBatchVectorMultiplyAccumulateImpl() 1284 int32_t* row_sums_ptr = row_sums; in NeonMatrixBatchVectorMultiplyAccumulateImpl() 1285 if (row_sums == nullptr) { in NeonMatrixBatchVectorMultiplyAccumulateImpl() 1362 if (row_sums == nullptr) { in NeonMatrixBatchVectorMultiplyAccumulateImpl() [all …]
|
D | batch_matmul.h | 118 const int32_t* input_offset, int32_t* row_sums, in BatchMatMul() argument 184 lhs_data, row_sums, num_weights_matrices * lhs_rows, accum_depth); in BatchMatMul() 210 const int32_t* woff_ptr0 = row_sums + (b0 * woff_ext0); in BatchMatMul()
|
D | sse_tensor_utils.h | 57 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, in MatrixBatchVectorMultiplyAccumulate() argument 61 input_offset, scratch, row_sums, compute_row_sums, context); in MatrixBatchVectorMultiplyAccumulate()
|
D | neon_tensor_utils.h | 59 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, in MatrixBatchVectorMultiplyAccumulate() argument 63 input_offset, scratch, row_sums, compute_row_sums, context); in MatrixBatchVectorMultiplyAccumulate()
|
D | neon_tensor_utils_impl.h | 59 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums,
|
/external/tensorflow/tensorflow/lite/kernels/internal/reference/ |
D | batch_matmul.h | 111 const int32_t* input_offset, int32_t* row_sums, in BatchMatMul() argument 151 lhs_data, row_sums, num_weights_matrices * lhs_rows, accum_depth); in BatchMatMul() 162 const int32_t* woff_ptr0 = row_sums + (b0 * woff_ext0); in BatchMatMul()
|
D | svdf.h | 199 int32_t* row_sums, bool* compute_row_sums) { in EvalHybridSVDF() argument 229 reinterpret_cast<int32_t*>(scratch), row_sums, compute_row_sums, in EvalHybridSVDF()
|
D | portable_tensor_utils.h | 79 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, in MatrixBatchVectorMultiplyAccumulate() argument 83 per_channel_scale, input_offset, scratch, row_sums, compute_row_sums, in MatrixBatchVectorMultiplyAccumulate()
|
D | portable_tensor_utils.cc | 167 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, in PortableMatrixBatchVectorMultiplyAccumulate() argument 175 PortableReductionSumVector(matrix, row_sums, m_rows, m_cols); in PortableMatrixBatchVectorMultiplyAccumulate() 199 dotprod -= row_sums[row] * batch_offset; in PortableMatrixBatchVectorMultiplyAccumulate()
|
D | portable_tensor_utils_impl.h | 71 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums,
|