Home
last modified time | relevance | path

Searched refs:scaling_factors (Results 1 – 25 of 27) sorted by relevance

12

/external/tensorflow/tensorflow/lite/kernels/internal/
Dkernel_utils.cc125 int8_t* quantized_hidden_state_ptr_batch, float* scaling_factors, in RnnBatchStep() argument
137 quantized_hidden_state_ptr_batch, scaling_factors, in RnnBatchStep()
152 int8_t* quantized_hidden_state_ptr_batch, float* scaling_factors, in RnnBatchStep() argument
194 scaling_factors, zero_points, asymmetric_quantize_inputs); in RnnBatchStep()
196 scaling_factors[b] *= input_weights_scale; in RnnBatchStep()
201 scaling_factors, batch_size, output_ptr_batch, in RnnBatchStep()
211 aux_quantized_input_ptr_batch, scaling_factors, zero_points, in RnnBatchStep()
214 scaling_factors[b] *= aux_input_weights_scale; in RnnBatchStep()
220 aux_quantized_input_ptr_batch, scaling_factors, batch_size, in RnnBatchStep()
232 quantized_hidden_state_ptr_batch, scaling_factors, zero_points, in RnnBatchStep()
[all …]
Dtensor_utils_common.h65 float* scaling_factors, int32_t* zero_points, in BatchQuantizeFloats() argument
72 &scaling_factors[b], &zero_points[b]); in BatchQuantizeFloats()
77 &unused_min, &unused_max, &scaling_factors[b]); in BatchQuantizeFloats()
126 const float* __restrict__ scaling_factors, int n_batch,
135 const float* __restrict__ scaling_factors, int n_batch,
152 const float* __restrict__ scaling_factors, int n_batch,
Dkernel_utils.h72 int8_t* quantized_hidden_state_ptr_batch, float* scaling_factors,
86 int8_t* quantized_hidden_state_ptr_batch, float* scaling_factors,
Dtensor_utils.h45 const float* __restrict__ scaling_factors, int n_batch,
52 const int8_t* __restrict__ vectors, const float* scaling_factors,
/external/webrtc/modules/audio_processing/agc2/
Dlimiter.cc46 const std::array<float, kSubFramesInFrame + 1>& scaling_factors, in ComputePerSampleSubframeFactors()
49 const size_t num_subframes = scaling_factors.size() - 1; in ComputePerSampleSubframeFactors()
54 const bool is_attack = scaling_factors[0] > scaling_factors[1]; in ComputePerSampleSubframeFactors()
57 scaling_factors[0], scaling_factors[1], in ComputePerSampleSubframeFactors()
64 const float scaling_start = scaling_factors[i]; in ComputePerSampleSubframeFactors()
65 const float scaling_end = scaling_factors[i + 1]; in ComputePerSampleSubframeFactors()
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/
Dsse_tensor_utils_impl.h35 const float* __restrict__ scaling_factors, int n_batch,
43 const float* __restrict__ scaling_factors, int n_batch, int32_t* scratch,
50 const float* __restrict__ scaling_factors, int n_batch,
60 const float* __restrict__ scaling_factors, int n_batch,
Dsse_tensor_utils.cc99 const float* __restrict__ scaling_factors, int n_batch, in SseMatrixBatchVectorMultiplyAccumulateImpl() argument
103 const float batch_scaling_factor = scaling_factors[batch]; in SseMatrixBatchVectorMultiplyAccumulateImpl()
211 const float* __restrict__ scaling_factors, int n_batch, in SseMatrixBatchVectorMultiplyAccumulate() argument
214 matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result, in SseMatrixBatchVectorMultiplyAccumulate()
222 const float* __restrict__ scaling_factors, int n_batch, int32_t* scratch, in SseMatrixBatchVectorMultiplyAccumulate() argument
235 const float batch_scaling_factor0 = scaling_factors[i / m_rows]; in SseMatrixBatchVectorMultiplyAccumulate()
236 const float batch_scaling_factor1 = scaling_factors[(i + 4) / m_rows]; in SseMatrixBatchVectorMultiplyAccumulate()
254 const float batch_scaling_factor = scaling_factors[i / m_rows]; in SseMatrixBatchVectorMultiplyAccumulate()
264 matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result, in SseMatrixBatchVectorMultiplyAccumulate()
272 const float* __restrict__ scaling_factors, int n_batch, in SseMatrixBatchVectorMultiplyAccumulate() argument
[all …]
Dsse_tensor_utils.h47 const float* __restrict__ scaling_factors, int n_batch, in MatrixBatchVectorMultiplyAccumulate() argument
50 vectors, scaling_factors, n_batch, result); in MatrixBatchVectorMultiplyAccumulate()
55 const int8_t* __restrict__ vectors, const float* scaling_factors, in MatrixBatchVectorMultiplyAccumulate() argument
60 vectors, scaling_factors, n_batch, result, per_channel_scale, in MatrixBatchVectorMultiplyAccumulate()
67 const float* __restrict__ scaling_factors, int n_batch, in MatrixBatchVectorMultiplyAccumulate() argument
71 vectors, scaling_factors, n_batch, scratch, result, context); in MatrixBatchVectorMultiplyAccumulate()
93 const float* __restrict__ scaling_factors, int n_batch, in SparseMatrixBatchVectorMultiplyAccumulate() argument
96 m_rows, m_cols, vectors, scaling_factors, n_batch, result); in SparseMatrixBatchVectorMultiplyAccumulate()
Dneon_tensor_utils.h37 const float* scaling_factors, in MatrixBatchVectorMultiplyAccumulate() argument
41 vectors, scaling_factors, n_batch, result); in MatrixBatchVectorMultiplyAccumulate()
47 const float* scaling_factors, in MatrixBatchVectorMultiplyAccumulate() argument
52 vectors, scaling_factors, n_batch, scratch, result, context); in MatrixBatchVectorMultiplyAccumulate()
57 const int8_t* __restrict__ vectors, const float* scaling_factors, in MatrixBatchVectorMultiplyAccumulate() argument
62 vectors, scaling_factors, n_batch, result, per_channel_scale, in MatrixBatchVectorMultiplyAccumulate()
85 const float* scaling_factors, int n_batch, float* __restrict__ result) { in SparseMatrixBatchVectorMultiplyAccumulate() argument
87 m_rows, m_cols, vectors, scaling_factors, n_batch, result); in SparseMatrixBatchVectorMultiplyAccumulate()
Dneon_tensor_utils_impl.h40 const float* scaling_factors,
49 const float* scaling_factors,
57 const int8_t* __restrict__ vectors, const float* scaling_factors,
123 const float* scaling_factors, int n_batch, float* __restrict__ result);
Dneon_tensor_utils.cc306 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchFourVectorMultiplyAccumulate() argument
320 const float* scaling_factors_ptr = scaling_factors + batch; in DotprodMatrixBatchFourVectorMultiplyAccumulate()
434 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchFourVectorMultiplyAccumulate() argument
450 const float* scaling_factors_ptr = scaling_factors + batch; in DotprodMatrixBatchFourVectorMultiplyAccumulate()
563 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchFourVectorMultiplyAccumulate() argument
567 matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result, in DotprodMatrixBatchFourVectorMultiplyAccumulate()
596 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchPaddedFourVectorMultiplyAccumulate() argument
633 memcpy(padded_scaling_factors, scaling_factors, n_batch * sizeof(float)); in DotprodMatrixBatchPaddedFourVectorMultiplyAccumulate()
668 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchPaddedFourVectorMultiplyAccumulate() argument
671 matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result, in DotprodMatrixBatchPaddedFourVectorMultiplyAccumulate()
[all …]
/external/tensorflow/tensorflow/lite/kernels/
Dbasic_rnn.cc142 TfLiteTensor* scaling_factors; in Prepare() local
144 &scaling_factors)); in Prepare()
145 scaling_factors->type = kTfLiteFloat32; in Prepare()
146 scaling_factors->allocation_type = kTfLiteArenaRw; in Prepare()
148 if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { in Prepare()
151 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, in Prepare()
234 TfLiteTensor* scaling_factors, in EvalHybrid() argument
261 float* scaling_factors_ptr = GetTensorData<float>(scaling_factors); in EvalHybrid()
315 TfLiteTensor* scaling_factors; in Eval() local
317 GetTemporarySafe(context, node, 2, &scaling_factors)); in Eval()
[all …]
Dunidirectional_sequence_rnn.cc149 TfLiteTensor* scaling_factors; in Prepare() local
151 &scaling_factors)); in Prepare()
152 scaling_factors->type = kTfLiteFloat32; in Prepare()
153 scaling_factors->allocation_type = kTfLiteArenaRw; in Prepare()
155 if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { in Prepare()
158 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, in Prepare()
272 TfLiteTensor* hidden_state_scratch, TfLiteTensor* scaling_factors, in EvalHybrid() argument
299 float* scaling_factors_ptr = GetTensorData<float>(scaling_factors); in EvalHybrid()
391 TfLiteTensor* scaling_factors; in Eval() local
393 GetTemporarySafe(context, node, 2, &scaling_factors)); in Eval()
[all …]
Dsvdf.cc185 TfLiteTensor* scaling_factors; in Prepare() local
187 &scaling_factors)); in Prepare()
188 scaling_factors->type = kTfLiteFloat32; in Prepare()
189 scaling_factors->allocation_type = kTfLiteArenaRw; in Prepare()
191 if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { in Prepare()
194 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, in Prepare()
324 TfLiteTensor* scaling_factors; in Eval() local
326 &scaling_factors)); in Eval()
368 GetTensorData<float>(scaling_factors), in Eval()
Dfully_connected.cc283 TfLiteTensor* scaling_factors; in PrepareImpl() local
285 &scaling_factors)); in PrepareImpl()
286 scaling_factors->type = kTfLiteFloat32; in PrepareImpl()
287 scaling_factors->allocation_type = kTfLiteArenaRw; in PrepareImpl()
290 if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { in PrepareImpl()
293 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, in PrepareImpl()
439 TfLiteTensor* scaling_factors, TfLiteTensor* accum_scratch, in EvalHybridImpl() argument
483 GetTensorData<float>(scaling_factors) + thread_start; in EvalHybridImpl()
532 TfLiteTensor* scaling_factors, in HybridFullyConnectedTask()
545 scaling_factors(scaling_factors), in HybridFullyConnectedTask()
[all …]
Dbatch_matmul.cc231 TfLiteTensor* scaling_factors; in InitializeTemporaries() local
233 &scaling_factors)); in InitializeTemporaries()
234 scaling_factors->type = kTfLiteFloat32; in InitializeTemporaries()
235 scaling_factors->allocation_type = kTfLiteArenaRw; in InitializeTemporaries()
238 if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { in InitializeTemporaries()
241 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, in InitializeTemporaries()
447 TfLiteTensor* scaling_factors, in EvalHybrid() argument
464 float* scaling_factors_ptr = GetTensorData<float>(scaling_factors); in EvalHybrid()
579 TfLiteTensor* scaling_factors; in EvalQuantized() local
581 &scaling_factors)); in EvalQuantized()
[all …]
Dbidirectional_sequence_rnn.cc250 TfLiteTensor* scaling_factors; in Prepare() local
252 &scaling_factors)); in Prepare()
253 scaling_factors->type = kTfLiteFloat32; in Prepare()
254 scaling_factors->allocation_type = kTfLiteArenaRw; in Prepare()
256 if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { in Prepare()
259 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, in Prepare()
523 TfLiteTensor* scaling_factors, TfLiteTensor* input_quantized, in EvalHybrid() argument
575 float* scaling_factors_ptr = GetTensorData<float>(scaling_factors); in EvalHybrid()
802 TfLiteTensor* scaling_factors; in Eval() local
805 GetTemporarySafe(context, node, kScalingFactors, &scaling_factors)); in Eval()
[all …]
Ddepthwise_conv.cc245 TfLiteTensor* scaling_factors; in Prepare() local
248 &scaling_factors)); in Prepare()
249 scaling_factors->type = kTfLiteFloat32; in Prepare()
250 scaling_factors->allocation_type = kTfLiteArenaRw; in Prepare()
253 if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { in Prepare()
256 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, in Prepare()
Dconv.cc518 TfLiteTensor* scaling_factors; in Prepare() local
521 &scaling_factors)); in Prepare()
522 scaling_factors->type = kTfLiteFloat32; in Prepare()
523 scaling_factors->allocation_type = kTfLiteArenaRw; in Prepare()
529 if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { in Prepare()
532 TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, in Prepare()
/external/tensorflow/tensorflow/lite/micro/
Dmicro_utils.cc39 float* scaling_factors) { in SignedSymmetricPerChannelQuantize() argument
66 scaling_factors[channel] = in SignedSymmetricPerChannelQuantize()
71 static_cast<int32_t>(roundf(values[idx] / scaling_factors[channel])); in SignedSymmetricPerChannelQuantize()
/external/tensorflow/tensorflow/lite/kernels/internal/reference/
Dportable_tensor_utils.h68 const float* scaling_factors, in MatrixBatchVectorMultiplyAccumulate() argument
72 scaling_factors, n_batch, result); in MatrixBatchVectorMultiplyAccumulate()
77 const int8_t* __restrict__ vectors, const float* scaling_factors, in MatrixBatchVectorMultiplyAccumulate() argument
82 matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result, in MatrixBatchVectorMultiplyAccumulate()
90 const float* scaling_factors, in MatrixBatchVectorMultiplyAccumulate() argument
95 scaling_factors, n_batch, result); in MatrixBatchVectorMultiplyAccumulate()
117 const float* scaling_factors, int n_batch, float* __restrict__ result) { in SparseMatrixBatchVectorMultiplyAccumulate() argument
119 matrix, ledger, m_rows, m_cols, vectors, scaling_factors, n_batch, in SparseMatrixBatchVectorMultiplyAccumulate()
Dsvdf.h197 float* scaling_factors, int8_t* quantized_input, float* state, in EvalHybridSVDF() argument
218 input_data, batch_size, input_size, quantized_input, scaling_factors, in EvalHybridSVDF()
221 scaling_factors[b] *= weights_feature_scale; in EvalHybridSVDF()
227 scaling_factors, batch_size, scratch, in EvalHybridSVDF()
Dportable_tensor_utils_impl.h64 const int8_t* __restrict__ vectors, const float* scaling_factors,
69 const int8_t* __restrict__ vectors, const float* scaling_factors,
76 const int8_t* __restrict__ vector, const float* scaling_factors,
93 const float* scaling_factors, int n_batch, float* __restrict__ result);
Dportable_tensor_utils.cc140 const int8_t* __restrict__ vectors, const float* scaling_factors, in PortableMatrixBatchVectorMultiplyAccumulate() argument
143 const float batch_scaling_factor = scaling_factors[batch]; in PortableMatrixBatchVectorMultiplyAccumulate()
165 const int8_t* __restrict__ vectors, const float* scaling_factors, in PortableMatrixBatchVectorMultiplyAccumulate() argument
171 matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result); in PortableMatrixBatchVectorMultiplyAccumulate()
182 const float batch_scaling_factor = scaling_factors[batch]; in PortableMatrixBatchVectorMultiplyAccumulate()
262 const float* scaling_factors, int n_batch, float* __restrict__ result) { in PortableSparseMatrixBatchVectorMultiplyAccumulate() argument
267 const float batch_scaling_factor = scaling_factors[batch]; in PortableSparseMatrixBatchVectorMultiplyAccumulate()
Dbatch_matmul.h110 const float* scaling_factors, in BatchMatMul() argument
161 const float* scale_ptr0 = scaling_factors + (b0 * ioff_ext0); in BatchMatMul()

12