/external/libopus/silk/fixed/ |
D | burg_modified_FIX.c | 59 const opus_int16 *x_ptr; in silk_burg_modified_c() local 87 x_ptr = x + s * subfr_length; in silk_burg_modified_c() 90 … silk_inner_prod16_aligned_64( x_ptr, x_ptr + n, subfr_length - n, arch ), rshifts ); in silk_burg_modified_c() 97 x_ptr = x + s * subfr_length; in silk_burg_modified_c() 98 celt_pitch_xcorr(x_ptr, x_ptr + 1, xcorr, subfr_length - D, D, arch ); in silk_burg_modified_c() 101 d = MAC16_16( d, x_ptr[ i ], x_ptr[ i - n ] ); in silk_burg_modified_c() 123 x_ptr = x + s * subfr_length; in silk_burg_modified_c() 124 …x1 = -silk_LSHIFT32( (opus_int32)x_ptr[ n ], 16 - rshifts ); /* Q(16-rs… in silk_burg_modified_c() 125 …x2 = -silk_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], 16 - rshifts ); /* Q(16-rs… in silk_burg_modified_c() 126 …tmp1 = silk_LSHIFT32( (opus_int32)x_ptr[ n ], QA - 16 ); /* Q(QA-16… in silk_burg_modified_c() [all …]
|
D | LTP_analysis_filter_FIX.c | 45 const opus_int16 *x_ptr, *x_lag_ptr; in silk_LTP_analysis_filter_FIX() local 51 x_ptr = x; in silk_LTP_analysis_filter_FIX() 55 x_lag_ptr = x_ptr - pitchL[ k ]; in silk_LTP_analysis_filter_FIX() 65 LTP_res_ptr[ i ] = x_ptr[ i ]; in silk_LTP_analysis_filter_FIX() 77 LTP_res_ptr[ i ] = (opus_int16)silk_SAT16( (opus_int32)x_ptr[ i ] - LTP_est ); in silk_LTP_analysis_filter_FIX() 87 x_ptr += subfr_length; in silk_LTP_analysis_filter_FIX()
|
D | find_pitch_lags_FIX.c | 47 const opus_int16 *x_ptr; in silk_find_pitch_lags_FIX() local 73 x_ptr = x + buf_len - psEnc->sCmn.pitch_LPC_win_length; in silk_find_pitch_lags_FIX() 75 silk_apply_sine_window( Wsig_ptr, x_ptr, 1, psEnc->sCmn.la_pitch ); in silk_find_pitch_lags_FIX() 79 x_ptr += psEnc->sCmn.la_pitch; in silk_find_pitch_lags_FIX() 80 …silk_memcpy( Wsig_ptr, x_ptr, ( psEnc->sCmn.pitch_LPC_win_length - silk_LSHIFT( psEnc->sCmn.la_pit… in silk_find_pitch_lags_FIX() 84 x_ptr += psEnc->sCmn.pitch_LPC_win_length - silk_LSHIFT( psEnc->sCmn.la_pitch, 1 ); in silk_find_pitch_lags_FIX() 85 silk_apply_sine_window( Wsig_ptr, x_ptr, 2, psEnc->sCmn.la_pitch ); in silk_find_pitch_lags_FIX()
|
D | residual_energy_FIX.c | 52 const opus_int16 *x_ptr; in silk_residual_energy_FIX() local 56 x_ptr = x; in silk_residual_energy_FIX() 64 …silk_LPC_analysis_filter( LPC_res, x_ptr, a_Q12[ i ], ( MAX_NB_SUBFR >> 1 ) * offset, LPC_order, a… in silk_residual_energy_FIX() 79 x_ptr += ( MAX_NB_SUBFR >> 1 ) * offset; in silk_residual_energy_FIX()
|
D | find_pred_coefs_FIX.c | 46 const opus_int16 *x_ptr; in silk_find_pred_coefs_FIX() local 108 x_ptr = x - psEnc->sCmn.predictLPCOrder; in silk_find_pred_coefs_FIX() 111 silk_scale_copy_vector16( x_pre_ptr, x_ptr, invGains_Q16[ i ], in silk_find_pred_coefs_FIX() 114 x_ptr += psEnc->sCmn.subfr_length; in silk_find_pred_coefs_FIX()
|
/external/compiler-rt/test/msan/ |
D | dtor-base-access.cc | 12 int *x_ptr; member in Base 15 x_ptr = y_ptr; in Base() 31 assert(__msan_test_shadow(&this->x_ptr, sizeof(this->x_ptr)) == -1); in ~Base() 33 assert(__msan_test_shadow(this->x_ptr, sizeof(*this->x_ptr)) != -1); in ~Base() 40 assert(__msan_test_shadow(&this->x_ptr, sizeof(this->x_ptr)) == -1); in ~Derived() 45 assert(__msan_test_shadow(&d->x_ptr, sizeof(d->x_ptr)) == -1); in main() 47 assert(__msan_test_shadow(&d->x_ptr, sizeof(d->x_ptr)) != -1); in main()
|
/external/libopus/silk/fixed/x86/ |
D | burg_modified_FIX_sse4_1.c | 64 const opus_int16 *x_ptr; in silk_burg_modified_sse4_1() local 99 x_ptr = x + s * subfr_length; in silk_burg_modified_sse4_1() 102 … silk_inner_prod16_aligned_64( x_ptr, x_ptr + n, subfr_length - n, arch ), rshifts ); in silk_burg_modified_sse4_1() 109 x_ptr = x + s * subfr_length; in silk_burg_modified_sse4_1() 110 celt_pitch_xcorr(x_ptr, x_ptr + 1, xcorr, subfr_length - D, D, arch ); in silk_burg_modified_sse4_1() 113 d = MAC16_16( d, x_ptr[ i ], x_ptr[ i - n ] ); in silk_burg_modified_sse4_1() 135 x_ptr = x + s * subfr_length; in silk_burg_modified_sse4_1() 136 …x1 = -silk_LSHIFT32( (opus_int32)x_ptr[ n ], 16 - rshifts ); /* Q(16-rs… in silk_burg_modified_sse4_1() 137 …x2 = -silk_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], 16 - rshifts ); /* Q(16-rs… in silk_burg_modified_sse4_1() 138 …tmp1 = silk_LSHIFT32( (opus_int32)x_ptr[ n ], QA - 16 ); /* Q(QA-16… in silk_burg_modified_sse4_1() [all …]
|
/external/libopus/silk/float/ |
D | burg_modified_FLP.c | 50 const silk_float *x_ptr; in silk_burg_modified_FLP() local 61 x_ptr = x + s * subfr_length; in silk_burg_modified_FLP() 63 C_first_row[ n - 1 ] += silk_inner_product_FLP( x_ptr, x_ptr + n, subfr_length - n ); in silk_burg_modified_FLP() 78 x_ptr = x + s * subfr_length; in silk_burg_modified_FLP() 79 tmp1 = x_ptr[ n ]; in silk_burg_modified_FLP() 80 tmp2 = x_ptr[ subfr_length - n - 1 ]; in silk_burg_modified_FLP() 82 C_first_row[ k ] -= x_ptr[ n ] * x_ptr[ n - k - 1 ]; in silk_burg_modified_FLP() 83 C_last_row[ k ] -= x_ptr[ subfr_length - n - 1 ] * x_ptr[ subfr_length - n + k ]; in silk_burg_modified_FLP() 85 tmp1 += x_ptr[ n - k - 1 ] * Atmp; in silk_burg_modified_FLP() 86 tmp2 += x_ptr[ subfr_length - n + k ] * Atmp; in silk_burg_modified_FLP() [all …]
|
D | LTP_analysis_filter_FLP.c | 45 const silk_float *x_ptr, *x_lag_ptr; in silk_LTP_analysis_filter_FLP() local 51 x_ptr = x; in silk_LTP_analysis_filter_FLP() 54 x_lag_ptr = x_ptr - pitchL[ k ]; in silk_LTP_analysis_filter_FLP() 62 LTP_res_ptr[ i ] = x_ptr[ i ]; in silk_LTP_analysis_filter_FLP() 73 x_ptr += subfr_length; in silk_LTP_analysis_filter_FLP()
|
D | find_pred_coefs_FLP.c | 48 const silk_float *x_ptr; in silk_find_pred_coefs_FLP() local 82 x_ptr = x - psEnc->sCmn.predictLPCOrder; in silk_find_pred_coefs_FLP() 85 silk_scale_copy_vector_FLP( x_pre_ptr, x_ptr, invGains[ i ], in silk_find_pred_coefs_FLP() 88 x_ptr += psEnc->sCmn.subfr_length; in silk_find_pred_coefs_FLP()
|
D | noise_shape_analysis_FLP.c | 162 const silk_float *x_ptr, *pitch_res_ptr; in silk_noise_shape_analysis_FLP() local 165 x_ptr = x - psEnc->sCmn.la_shape; in silk_noise_shape_analysis_FLP() 243 silk_apply_sine_window_FLP( x_windowed, x_ptr, 1, slope_part ); in silk_noise_shape_analysis_FLP() 245 silk_memcpy( x_windowed + shift, x_ptr + shift, flat_part * sizeof(silk_float) ); in silk_noise_shape_analysis_FLP() 247 silk_apply_sine_window_FLP( x_windowed + shift, x_ptr + shift, 2, slope_part ); in silk_noise_shape_analysis_FLP() 250 x_ptr += psEnc->sCmn.subfr_length; in silk_noise_shape_analysis_FLP()
|
/external/pigweed/pw_assert/ |
D | assert_backend_compile_test_c.c | 116 void* x_ptr = (void*)(50); in AssertBackendCompileTestsInC() local 119 PW_CHECK_PTR_EQ(x_ptr, y_ptr); in AssertBackendCompileTestsInC() 120 PW_CHECK_PTR_LE(x_ptr, y_ptr, "PTR: " FAIL_IF_DISPLAYED); in AssertBackendCompileTestsInC() 121 PW_CHECK_PTR_LE(x_ptr, y_ptr, "PTR: " FAIL_IF_DISPLAYED_ARGS, z); in AssertBackendCompileTestsInC() 123 PW_CHECK_PTR_GE(x_ptr, y_ptr); in AssertBackendCompileTestsInC() 124 PW_CHECK_PTR_GE(x_ptr, y_ptr, "PTR: " FAIL_IF_HIDDEN); in AssertBackendCompileTestsInC() 125 PW_CHECK_PTR_GE(x_ptr, y_ptr, "PTR: " FAIL_IF_HIDDEN_ARGS, z); in AssertBackendCompileTestsInC()
|
D | assert_backend_compile_test.cc | 116 void* x_ptr = reinterpret_cast<void*>(50); in TEST() local 119 PW_CHECK_PTR_EQ(x_ptr, y_ptr); in TEST() 120 PW_CHECK_PTR_LE(x_ptr, y_ptr, "PTR: " FAIL_IF_DISPLAYED); in TEST() 121 PW_CHECK_PTR_LE(x_ptr, y_ptr, "PTR: " FAIL_IF_DISPLAYED_ARGS, z); in TEST() 123 PW_CHECK_PTR_GE(x_ptr, y_ptr); in TEST() 124 PW_CHECK_PTR_GE(x_ptr, y_ptr, "PTR: " FAIL_IF_HIDDEN); in TEST() 125 PW_CHECK_PTR_GE(x_ptr, y_ptr, "PTR: " FAIL_IF_HIDDEN_ARGS, z); in TEST()
|
/external/XNNPACK/src/f32-vrelu/gen/ |
D | vrelu-scalar-x1.c | 18 const float* x_ptr, in xnn_f32_vrelu_ukernel__scalar_x1() argument 24 assert(x_ptr != NULL); in xnn_f32_vrelu_ukernel__scalar_x1() 27 const uint32_t* x = (const uint32_t*)x_ptr; in xnn_f32_vrelu_ukernel__scalar_x1()
|
D | vrelu-scalar-x2.c | 18 const float* x_ptr, in xnn_f32_vrelu_ukernel__scalar_x2() argument 24 assert(x_ptr != NULL); in xnn_f32_vrelu_ukernel__scalar_x2() 27 const uint32_t* x = (const uint32_t*)x_ptr; in xnn_f32_vrelu_ukernel__scalar_x2()
|
D | vrelu-scalar-x4.c | 18 const float* x_ptr, in xnn_f32_vrelu_ukernel__scalar_x4() argument 24 assert(x_ptr != NULL); in xnn_f32_vrelu_ukernel__scalar_x4() 27 const uint32_t* x = (const uint32_t*)x_ptr; in xnn_f32_vrelu_ukernel__scalar_x4()
|
D | vrelu-scalar-x8.c | 18 const float* x_ptr, in xnn_f32_vrelu_ukernel__scalar_x8() argument 24 assert(x_ptr != NULL); in xnn_f32_vrelu_ukernel__scalar_x8() 27 const uint32_t* x = (const uint32_t*)x_ptr; in xnn_f32_vrelu_ukernel__scalar_x8()
|
/external/eigen/Eigen/src/Core/products/ |
D | SelfadjointMatrixVector_BLAS.h | 90 const EIGTYPE *x_ptr; \ 96 x_ptr=x_tmp.data(); \ 97 } else x_ptr=_rhs; \ 98 …E*)&numext::real_ref(alpha), (const BLASTYPE*)lhs, &lda, (const BLASTYPE*)x_ptr, &incx, (const BLA…
|
D | GeneralMatrixVector_BLAS.h | 103 const EIGTYPE *x_ptr; \ 113 x_ptr=x_tmp.data(); \ 115 } else x_ptr=rhs; \ 116 …E*)&numext::real_ref(alpha), (const BLASTYPE*)lhs, &lda, (const BLASTYPE*)x_ptr, &incx, (const BLA…
|
/external/ComputeLibrary/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemv_pretransposed/ |
D | generic.cpp | 57 const float *x_ptr = X; in a64_sgemv_pretransposed() local 170 x0 = vld1q_f32(x_ptr); in a64_sgemv_pretransposed() 521 [a_ptr] "+r" (a_ptr), [x_ptr] "+r" (x_ptr), in a64_sgemv_pretransposed() 583 [a_ptr] "+r" (a_ptr), [x_ptr] "+r" (x_ptr), in a64_sgemv_pretransposed()
|
/external/XNNPACK/src/f16-vclamp/gen/ |
D | vclamp-neonfp16arith-x8.c | 20 const void* restrict x_ptr, in xnn_f16_vclamp_ukernel__neonfp16arith_x8() argument 26 assert(x_ptr != NULL); in xnn_f16_vclamp_ukernel__neonfp16arith_x8() 29 const __fp16* x = (const __fp16*) x_ptr; in xnn_f16_vclamp_ukernel__neonfp16arith_x8()
|
D | vclamp-f16c-x8.c | 20 const void* restrict x_ptr, in xnn_f16_vclamp_ukernel__f16c_x8() argument 26 assert(x_ptr != NULL); in xnn_f16_vclamp_ukernel__f16c_x8() 29 const uint16_t* x = (const uint16_t*) x_ptr; in xnn_f16_vclamp_ukernel__f16c_x8()
|
D | vclamp-neonfp16arith-x16.c | 20 const void* restrict x_ptr, in xnn_f16_vclamp_ukernel__neonfp16arith_x16() argument 26 assert(x_ptr != NULL); in xnn_f16_vclamp_ukernel__neonfp16arith_x16() 29 const __fp16* x = (const __fp16*) x_ptr; in xnn_f16_vclamp_ukernel__neonfp16arith_x16()
|
D | vclamp-f16c-x16.c | 20 const void* restrict x_ptr, in xnn_f16_vclamp_ukernel__f16c_x16() argument 26 assert(x_ptr != NULL); in xnn_f16_vclamp_ukernel__f16c_x16() 29 const uint16_t* x = (const uint16_t*) x_ptr; in xnn_f16_vclamp_ukernel__f16c_x16()
|
/external/XNNPACK/src/f32-vrelu/ |
D | scalar.c.in | 16 const float* x_ptr, 22 assert(x_ptr != NULL); 25 const uint32_t* x = (const uint32_t*)x_ptr;
|