| /external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
| D | sse_tensor_utils.cc | 99 const float* __restrict__ scaling_factors, int n_batch, in SseMatrixBatchVectorMultiplyAccumulateImpl() 211 const float* __restrict__ scaling_factors, int n_batch, in SseMatrixBatchVectorMultiplyAccumulate() 222 const float* __restrict__ scaling_factors, int n_batch, int32_t* scratch, in SseMatrixBatchVectorMultiplyAccumulate() 272 const float* __restrict__ scaling_factors, int n_batch, in SseMatrixBatchVectorMultiplyAccumulate() 394 const float* __restrict__ scaling_factors, int n_batch, in SseSparseMatrixBatchVectorMultiplyAccumulate()
|
| D | sse_tensor_utils.h | 47 const float* __restrict__ scaling_factors, int n_batch, in MatrixBatchVectorMultiplyAccumulate() 55 const int8_t* __restrict__ vectors, const float* scaling_factors, in MatrixBatchVectorMultiplyAccumulate() 67 const float* __restrict__ scaling_factors, int n_batch, in MatrixBatchVectorMultiplyAccumulate() 93 const float* __restrict__ scaling_factors, int n_batch, in SparseMatrixBatchVectorMultiplyAccumulate()
|
| D | neon_tensor_utils.h | 37 const float* scaling_factors, in MatrixBatchVectorMultiplyAccumulate() 47 const float* scaling_factors, in MatrixBatchVectorMultiplyAccumulate() 57 const int8_t* __restrict__ vectors, const float* scaling_factors, in MatrixBatchVectorMultiplyAccumulate() 85 const float* scaling_factors, int n_batch, float* __restrict__ result) { in SparseMatrixBatchVectorMultiplyAccumulate()
|
| D | neon_tensor_utils.cc | 306 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchFourVectorMultiplyAccumulate() 434 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchFourVectorMultiplyAccumulate() 563 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchFourVectorMultiplyAccumulate() 596 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchPaddedFourVectorMultiplyAccumulate() 668 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchPaddedFourVectorMultiplyAccumulate() 679 const float* scaling_factors, int n_batch, float* __restrict__ result) { in DotprodSparseMatrixBatchVectorMultiplyAccumulate() 1046 const float* scaling_factors, in NeonMatrixBatchVectorMultiplyAccumulate() 1179 const float* scaling_factors, in NeonMatrixBatchVectorMultiplyAccumulate() 1245 const int8_t* __restrict__ vectors, const float* scaling_factors, in NeonMatrixBatchVectorMultiplyAccumulateImpl() 1373 const int8_t* __restrict__ vectors, const float* scaling_factors, in NeonMatrixBatchVectorMultiplyAccumulate() [all …]
|
| D | batch_matmul.h | 117 const float* scaling_factors, in BatchMatMul()
|
| /external/tensorflow/tensorflow/lite/kernels/ |
| D | basic_rnn.cc | 142 TfLiteTensor* scaling_factors; in Prepare() local 234 TfLiteTensor* scaling_factors, in EvalHybrid() 315 TfLiteTensor* scaling_factors; in Eval() local
|
| D | unidirectional_sequence_rnn.cc | 149 TfLiteTensor* scaling_factors; in Prepare() local 272 TfLiteTensor* hidden_state_scratch, TfLiteTensor* scaling_factors, in EvalHybrid() 391 TfLiteTensor* scaling_factors; in Eval() local
|
| D | fully_connected.cc | 283 TfLiteTensor* scaling_factors; in PrepareImpl() local 439 TfLiteTensor* scaling_factors, TfLiteTensor* accum_scratch, in EvalHybridImpl() 568 TfLiteTensor* scaling_factors; member 584 TfLiteTensor* scaling_factors, in EvalHybrid() 698 TfLiteTensor* scaling_factors; in EvalQuantized() local
|
| D | svdf.cc | 185 TfLiteTensor* scaling_factors; in Prepare() local 324 TfLiteTensor* scaling_factors; in Eval() local
|
| D | batch_matmul.cc | 231 TfLiteTensor* scaling_factors; in InitializeTemporaries() local 447 TfLiteTensor* scaling_factors, in EvalHybrid() 579 TfLiteTensor* scaling_factors; in EvalQuantized() local
|
| D | bidirectional_sequence_rnn.cc | 250 TfLiteTensor* scaling_factors; in Prepare() local 523 TfLiteTensor* scaling_factors, TfLiteTensor* input_quantized, in EvalHybrid() 802 TfLiteTensor* scaling_factors; in Eval() local
|
| D | depthwise_conv.cc | 245 TfLiteTensor* scaling_factors; in Prepare() local
|
| D | conv.cc | 518 TfLiteTensor* scaling_factors; in Prepare() local
|
| /external/tensorflow/tensorflow/lite/micro/ |
| D | micro_utils.cc | 39 float* scaling_factors) { in SignedSymmetricPerChannelQuantize()
|
| /external/tensorflow/tensorflow/lite/kernels/internal/ |
| D | kernel_utils.cc | 125 int8_t* quantized_hidden_state_ptr_batch, float* scaling_factors, in RnnBatchStep() 152 int8_t* quantized_hidden_state_ptr_batch, float* scaling_factors, in RnnBatchStep()
|
| D | tensor_utils_common.h | 65 float* scaling_factors, int32_t* zero_points, in BatchQuantizeFloats()
|
| D | tensor_utils_test.cc | 427 const std::vector<float> scaling_factors = { in TEST() local
|
| /external/tensorflow/tensorflow/lite/kernels/internal/reference/ |
| D | portable_tensor_utils.h | 68 const float* scaling_factors, in MatrixBatchVectorMultiplyAccumulate() 77 const int8_t* __restrict__ vectors, const float* scaling_factors, in MatrixBatchVectorMultiplyAccumulate() 90 const float* scaling_factors, in MatrixBatchVectorMultiplyAccumulate() 117 const float* scaling_factors, int n_batch, float* __restrict__ result) { in SparseMatrixBatchVectorMultiplyAccumulate()
|
| D | svdf.h | 197 float* scaling_factors, int8_t* quantized_input, float* state, in EvalHybridSVDF()
|
| D | portable_tensor_utils.cc | 140 const int8_t* __restrict__ vectors, const float* scaling_factors, in PortableMatrixBatchVectorMultiplyAccumulate() 165 const int8_t* __restrict__ vectors, const float* scaling_factors, in PortableMatrixBatchVectorMultiplyAccumulate() 262 const float* scaling_factors, int n_batch, float* __restrict__ result) { in PortableSparseMatrixBatchVectorMultiplyAccumulate()
|
| D | batch_matmul.h | 110 const float* scaling_factors, in BatchMatMul()
|
| /external/webrtc/modules/audio_processing/agc2/ |
| D | limiter.cc | 45 void ComputePerSampleSubframeFactors( in ComputePerSampleSubframeFactors()
|