Home
last modified time | relevance | path

Searched refs:scale_value (Results 1 – 25 of 32) sorted by relevance

12

/external/tensorflow/tensorflow/core/kernels/
Dquantized_batch_norm_op.cc131 float scale_value; in FixedPointBatchNorm() local
133 scale_value = (1.0f / sqrtf(var_value + variance_epsilon)) * gamma_value; in FixedPointBatchNorm()
135 scale_value = (1.0f / sqrtf(var_value + variance_epsilon)); in FixedPointBatchNorm()
137 const float offset_value = (-mean_value * scale_value) + beta_value; in FixedPointBatchNorm()
139 FloatToQuantized<T2>(scale_value, *output_min, *output_max); in FixedPointBatchNorm()
152 const T2 scale_value = scale_flat(channel); in FixedPointBatchNorm() local
155 ((input_value * scale_value) / one_in_output_space) + offset_value; in FixedPointBatchNorm()
/external/tensorflow/tensorflow/python/tools/
Doptimize_for_inference_lib.py349 scale_value = (
353 scale_value = (
355 offset_value = (-mean_value * scale_value) + beta_value
361 current_scale = scale_value[it.multi_index[3]]
367 current_scale = scale_value[it.multi_index[2] * channel_multiplier +
/external/XNNPACK/src/f32-vscaleextexp/gen/
Davx512f-p5-scalef-x16.c23 float scale_value, in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x16() argument
39 const __m512 vscalev = _mm512_set1_ps(scale_value); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x16()
Davx2-p5-x8.c24 float scale_value, in xnn_f32_vscaleextexp_ukernel__avx2_p5_x8() argument
45 const __m256 vscalev = _mm256_set1_ps(scale_value); in xnn_f32_vscaleextexp_ukernel__avx2_p5_x8()
Davx512f-p5-scalef-x32.c23 float scale_value, in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x32() argument
39 const __m512 vscalev = _mm512_set1_ps(scale_value); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x32()
Davx512f-p5-scalef-x48.c23 float scale_value, in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x48() argument
39 const __m512 vscalev = _mm512_set1_ps(scale_value); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x48()
Davx2-p5-x16.c24 float scale_value, in xnn_f32_vscaleextexp_ukernel__avx2_p5_x16() argument
45 const __m256 vscalev = _mm256_set1_ps(scale_value); in xnn_f32_vscaleextexp_ukernel__avx2_p5_x16()
Davx512f-p5-scalef-x64.c23 float scale_value, in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x64() argument
39 const __m512 vscalev = _mm512_set1_ps(scale_value); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x64()
Davx2-p5-x24.c24 float scale_value, in xnn_f32_vscaleextexp_ukernel__avx2_p5_x24() argument
45 const __m256 vscalev = _mm256_set1_ps(scale_value); in xnn_f32_vscaleextexp_ukernel__avx2_p5_x24()
Davx512f-p5-scalef-x80.c23 float scale_value, in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x80() argument
39 const __m512 vscalev = _mm512_set1_ps(scale_value); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x80()
Davx2-p5-x32.c24 float scale_value, in xnn_f32_vscaleextexp_ukernel__avx2_p5_x32() argument
45 const __m256 vscalev = _mm256_set1_ps(scale_value); in xnn_f32_vscaleextexp_ukernel__avx2_p5_x32()
Davx512f-p5-scalef-x96.c23 float scale_value, in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x96() argument
39 const __m512 vscalev = _mm512_set1_ps(scale_value); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x96()
Davx2-p5-x40.c24 float scale_value, in xnn_f32_vscaleextexp_ukernel__avx2_p5_x40() argument
45 const __m256 vscalev = _mm256_set1_ps(scale_value); in xnn_f32_vscaleextexp_ukernel__avx2_p5_x40()
Davx512f-p5-scalef-x112.c23 float scale_value, in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x112() argument
39 const __m512 vscalev = _mm512_set1_ps(scale_value); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x112()
Davx512f-p5-scalef-x128.c23 float scale_value, in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x128() argument
39 const __m512 vscalev = _mm512_set1_ps(scale_value); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x128()
Davx2-p5-x48.c24 float scale_value, in xnn_f32_vscaleextexp_ukernel__avx2_p5_x48() argument
45 const __m256 vscalev = _mm256_set1_ps(scale_value); in xnn_f32_vscaleextexp_ukernel__avx2_p5_x48()
Davx512f-p5-scalef-x144.c23 float scale_value, in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x144() argument
39 const __m512 vscalev = _mm512_set1_ps(scale_value); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x144()
Davx2-p5-x56.c24 float scale_value, in xnn_f32_vscaleextexp_ukernel__avx2_p5_x56() argument
45 const __m256 vscalev = _mm256_set1_ps(scale_value); in xnn_f32_vscaleextexp_ukernel__avx2_p5_x56()
Davx512f-p5-scalef-x160.c23 float scale_value, in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x160() argument
39 const __m512 vscalev = _mm512_set1_ps(scale_value); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x160()
Davx2-p5-x64.c24 float scale_value, in xnn_f32_vscaleextexp_ukernel__avx2_p5_x64() argument
45 const __m256 vscalev = _mm256_set1_ps(scale_value); in xnn_f32_vscaleextexp_ukernel__avx2_p5_x64()
Davx512f-p5-scalef-x176.c23 float scale_value, in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x176() argument
39 const __m512 vscalev = _mm512_set1_ps(scale_value); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x176()
Davx512f-p5-scalef-x192.c23 float scale_value, in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x192() argument
39 const __m512 vscalev = _mm512_set1_ps(scale_value); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x192()
/external/XNNPACK/src/f32-vscaleextexp/
Davx512f-p5-scalef.c.in23 float scale_value,
39 const __m512 vscalev = _mm512_set1_ps(scale_value);
Davx2-p5.c.in24 float scale_value,
45 const __m256 vscalev = _mm256_set1_ps(scale_value);
/external/libgav1/libgav1/src/dsp/
Dintrapred.cc271 const uint16_t scale_value = (1 << kSmoothWeightScale); in Smooth() local
277 assert(scale_value >= weights_y[y] && scale_value >= weights_x[x]); in Smooth()
280 pred += static_cast<uint8_t>(scale_value - weights_y[y]) * bottom_left; in Smooth()
281 pred += static_cast<uint8_t>(scale_value - weights_x[x]) * top_right; in Smooth()
302 const uint16_t scale_value = (1 << kSmoothWeightScale); in SmoothVertical() local
308 assert(scale_value >= weights_y[y]); in SmoothVertical()
310 pred += static_cast<uint8_t>(scale_value - weights_y[y]) * bottom_left; in SmoothVertical()
329 const uint16_t scale_value = (1 << kSmoothWeightScale); in SmoothHorizontal() local
335 assert(scale_value >= weights_x[x]); in SmoothHorizontal()
337 pred += static_cast<uint8_t>(scale_value - weights_x[x]) * top_right; in SmoothHorizontal()

12