/external/XNNPACK/src/u8-vclamp/ |
D | scalar-x4.c | 27 uint32_t vt3 = (uint32_t) x[3]; in xnn_u8_vclamp_ukernel__scalar_x4() local
|
/external/XNNPACK/src/s8-vclamp/ |
D | scalar-x4.c | 27 int32_t vt3 = (int32_t) x[3]; in xnn_s8_vclamp_ukernel__scalar_x4() local
|
/external/XNNPACK/src/x8-lut/gen/ |
D | lut-scalar-x4.c | 36 const uint32_t vt3 = (uint32_t) t[vx3]; in xnn_x8_lut_ukernel__scalar_x4() local
|
D | lut-scalar-x8.c | 40 const uint32_t vt3 = (uint32_t) t[vx3]; in xnn_x8_lut_ukernel__scalar_x8() local
|
D | lut-scalar-x16.c | 48 const uint32_t vt3 = (uint32_t) t[vx3]; in xnn_x8_lut_ukernel__scalar_x16() local
|
D | lut-avx-x16.c | 32 const __m128i vt3 = _mm_load_si128((const __m128i*) (t + 48)); in xnn_x8_lut_ukernel__avx_x16() local
|
D | lut-ssse3-x16.c | 31 const __m128i vt3 = _mm_load_si128((const __m128i*) (t + 48)); in xnn_x8_lut_ukernel__ssse3_x16() local
|
D | lut-avx512skx-vpshufb-x64.c | 32 const __m512i vt3 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 48))); in xnn_x8_lut_ukernel__avx512skx_vpshufb_x64() local
|
D | lut-avx2-x32.c | 32 const __m256i vt3 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 48))); in xnn_x8_lut_ukernel__avx2_x32() local
|
D | lut-avx-x32.c | 32 const __m128i vt3 = _mm_load_si128((const __m128i*) (t + 48)); in xnn_x8_lut_ukernel__avx_x32() local
|
D | lut-avx512skx-vpshufb-x128.c | 32 const __m512i vt3 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 48))); in xnn_x8_lut_ukernel__avx512skx_vpshufb_x128() local
|
D | lut-avx2-x64.c | 32 const __m256i vt3 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 48))); in xnn_x8_lut_ukernel__avx2_x64() local
|
D | lut-ssse3-x32.c | 31 const __m128i vt3 = _mm_load_si128((const __m128i*) (t + 48)); in xnn_x8_lut_ukernel__ssse3_x32() local
|
D | lut-avx512skx-vpshufb-x192.c | 32 const __m512i vt3 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 48))); in xnn_x8_lut_ukernel__avx512skx_vpshufb_x192() local
|
D | lut-avx-x48.c | 32 const __m128i vt3 = _mm_load_si128((const __m128i*) (t + 48)); in xnn_x8_lut_ukernel__avx_x48() local
|
D | lut-avx2-x96.c | 32 const __m256i vt3 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 48))); in xnn_x8_lut_ukernel__avx2_x96() local
|
D | lut-avx512skx-vpshufb-x256.c | 32 const __m512i vt3 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 48))); in xnn_x8_lut_ukernel__avx512skx_vpshufb_x256() local
|
D | lut-avx2-x128.c | 32 const __m256i vt3 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 48))); in xnn_x8_lut_ukernel__avx2_x128() local
|
D | lut-avx-x64.c | 32 const __m128i vt3 = _mm_load_si128((const __m128i*) (t + 48)); in xnn_x8_lut_ukernel__avx_x64() local
|
/external/XNNPACK/src/x32-packx/ |
D | x4-wasmsimd.c | 51 const v128_t vt3 = wasm_v32x4_shuffle(vx2, vx3, 2, 6, 3, 7); in xnn_x32_packx_ukernel_4x__wasmsimd() local
|
D | x4-sse.c | 52 const __m128 vt3 = _mm_unpackhi_ps(vx2, vx3); in xnn_x32_packx_ukernel_4x__sse() local
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
D | idct_msa.c | 90 v4i32 in0, in1, in2, in3, hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3; in idct4x4_addblk_msa() local 183 v4i32 hz0_w, hz1_w, hz2_w, hz3_w, vt0, vt1, vt2, vt3, res0, res1, res2, res3; in dequant_idct4x4_addblk_msa() local 219 v8i16 hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3, res0, res1, res2, res3; in dequant_idct4x4_addblk_2x_msa() local
|
/external/XNNPACK/src/f32-vscaleexpminusmax/gen/ |
D | avx512f-p5-scalef-x64.c | 66 __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x64() local
|
/external/XNNPACK/src/f32-vscaleextexp/gen/ |
D | avx512f-p5-scalef-x64.c | 61 __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x64() local
|
/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/ |
D | scalar-rr2-p5-x4.c | 83 float vt3 = vn3 * vminus_ln2_hi + vx3; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x4() local
|