/external/tensorflow/tensorflow/lite/kernels/ |
D | basic_rnn.cc | 183 TfLiteTensor* row_sums; in Prepare() local 237 TfLiteTensor* row_sums, bool* compute_row_sums) { in EvalHybrid() 324 TfLiteTensor* row_sums; in Eval() local
|
D | unidirectional_sequence_rnn.cc | 190 TfLiteTensor* row_sums; in Prepare() local 274 TfLiteTensor* accum_scratch, TfLiteTensor* row_sums, in EvalHybrid() 400 TfLiteTensor* row_sums; in Eval() local
|
D | fully_connected.cc | 326 TfLiteTensor* row_sums; in PrepareImpl() local 440 TfLiteTensor* row_sums, TfLiteTensor* input_offsets, in EvalHybridImpl() 570 TfLiteTensor* row_sums; member 585 TfLiteTensor* accum_scratch, TfLiteTensor* row_sums, in EvalHybrid() 707 TfLiteTensor* row_sums; in EvalQuantized() local
|
D | svdf.cc | 230 TfLiteTensor* row_sums; in Prepare() local 333 TfLiteTensor* row_sums; in Eval() local
|
D | batch_matmul.cc | 274 TfLiteTensor* row_sums; in InitializeTemporaries() local 448 TfLiteTensor* accum_scratch, TfLiteTensor* row_sums, in EvalHybrid() 588 TfLiteTensor* row_sums; in EvalQuantized() local
|
D | unidirectional_sequence_lstm.cc | 1138 TfLiteTensor* row_sums; in Prepare() local 1350 TfLiteTensor* row_sums; in Eval() local
|
D | conv.cc | 579 TfLiteTensor* row_sums; in Prepare() local 891 TfLiteTensor* row_sums; in EvalHybridPerChannel() local
|
D | lstm.cc | 1608 TfLiteTensor* row_sums; in Prepare() local 1936 TfLiteTensor* row_sums; in Eval() local
|
D | lstm_eval.cc | 48 int32_t* projection_weights_row_sums, int32_t* row_sums, int n_cell, in ComputeRowSums() 977 inline void LstmStepHybrid( in LstmStepHybrid() 1868 TfLiteStatus EvalHybrid( in EvalHybrid()
|
/external/tensorflow/tensorflow/lite/kernels/internal/ |
D | kernel_utils.cc | 128 int32_t* accum_scratch, int32_t* row_sums, bool* compute_row_sums) { in RnnBatchStep() 155 int32_t* accum_scratch, int32_t* row_sums, bool* compute_row_sums) { in RnnBatchStep()
|
D | tensor_utils.h | 65 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, in MatrixBatchVectorMultiplyAccumulate()
|
D | tensor_utils_test.cc | 457 int32_t* row_sums = scratch.data() + 8 * 4; in TEST() local 1166 std::vector<int32_t> row_sums(rows); in TestPerChannelDotprodMatrixBatchVectorMultiply() local
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | sse_tensor_utils.cc | 101 const int32_t* input_offset, const int32_t* row_sums) { in SseMatrixBatchVectorMultiplyAccumulateImpl() 274 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, in SseMatrixBatchVectorMultiplyAccumulate()
|
D | batch_matmul.h | 118 const int32_t* input_offset, int32_t* row_sums, in BatchMatMul()
|
D | neon_tensor_utils.cc | 436 const int32_t* input_offset, int32_t* row_sums) { in DotprodMatrixBatchFourVectorMultiplyAccumulate() 598 const int32_t* input_offset, int32_t* row_sums) { in DotprodMatrixBatchPaddedFourVectorMultiplyAccumulate() 1247 const int32_t* input_offset, int32_t* row_sums) { in NeonMatrixBatchVectorMultiplyAccumulateImpl() 1375 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, in NeonMatrixBatchVectorMultiplyAccumulate()
|
D | sse_tensor_utils.h | 57 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, in MatrixBatchVectorMultiplyAccumulate()
|
D | neon_tensor_utils.h | 59 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, in MatrixBatchVectorMultiplyAccumulate()
|
D | optimized_ops.h | 1433 const RuntimeShape& scratch_shape, int32_t* scratch, int32_t* row_sums, in HybridConvPerChannel()
|
/external/tensorflow/tensorflow/lite/kernels/internal/reference/ |
D | svdf.h | 199 int32_t* row_sums, bool* compute_row_sums) { in EvalHybridSVDF()
|
D | batch_matmul.h | 111 const int32_t* input_offset, int32_t* row_sums, in BatchMatMul()
|
D | portable_tensor_utils.h | 79 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, in MatrixBatchVectorMultiplyAccumulate()
|
D | portable_tensor_utils.cc | 167 const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, in PortableMatrixBatchVectorMultiplyAccumulate()
|