/external/libaom/libaom/av1/common/x86/ |
D | jnt_convolve_avx2.c | 109 const __m128i res_0 = _mm256_castsi256_si128(res_8); in av1_dist_wtd_convolve_x_avx2() local 113 _mm_storel_epi64((__m128i *)(&dst0[i * dst_stride0 + j]), res_0); in av1_dist_wtd_convolve_x_avx2() 118 _mm_cvtsi128_si32(res_0); in av1_dist_wtd_convolve_x_avx2() 123 const __m128i res_0 = _mm256_castsi256_si128(res_unsigned); in av1_dist_wtd_convolve_x_avx2() local 124 _mm_store_si128((__m128i *)(&dst[i * dst_stride + j]), res_0); in av1_dist_wtd_convolve_x_avx2() 164 const __m128i res_0 = _mm256_castsi256_si128(res_8); in av1_dist_wtd_convolve_x_avx2() local 168 _mm_storel_epi64((__m128i *)(&dst0[i * dst_stride0 + j]), res_0); in av1_dist_wtd_convolve_x_avx2() 173 _mm_cvtsi128_si32(res_0); in av1_dist_wtd_convolve_x_avx2() 178 const __m128i res_0 = _mm256_castsi256_si128(res_unsigned); in av1_dist_wtd_convolve_x_avx2() local 179 _mm_store_si128((__m128i *)(&dst[i * dst_stride + j]), res_0); in av1_dist_wtd_convolve_x_avx2() [all …]
|
D | convolve_avx2.c | 114 const __m128i res_0 = _mm256_castsi256_si128(res_a); in av1_convolve_y_sr_avx2() local 117 _mm_storeu_si128((__m128i *)&dst[i * dst_stride + j], res_0); in av1_convolve_y_sr_avx2() 121 const __m128i res_0 = _mm256_castsi256_si128(res_8b_lo); in av1_convolve_y_sr_avx2() local 124 _mm_storel_epi64((__m128i *)&dst[i * dst_stride + j], res_0); in av1_convolve_y_sr_avx2() 128 xx_storel_32(&dst[i * dst_stride + j], res_0); in av1_convolve_y_sr_avx2() 134 *(uint16_t *)p_0 = _mm_cvtsi128_si32(res_0); in av1_convolve_y_sr_avx2() 227 const __m128i res_0 = _mm256_castsi256_si128(res_a); in av1_convolve_y_sr_avx2() local 230 _mm_storeu_si128((__m128i *)&dst[i * dst_stride + j], res_0); in av1_convolve_y_sr_avx2() 234 const __m128i res_0 = _mm256_castsi256_si128(res_8b_lo); in av1_convolve_y_sr_avx2() local 237 _mm_storel_epi64((__m128i *)&dst[i * dst_stride + j], res_0); in av1_convolve_y_sr_avx2() [all …]
|
D | highbd_jnt_convolve_avx2.c | 152 const __m128i res_0 = _mm256_castsi256_si128(res_clip); in av1_highbd_dist_wtd_convolve_2d_copy_avx2() local 155 _mm_storel_epi64((__m128i *)(&dst0[i * dst_stride0 + j]), res_0); in av1_highbd_dist_wtd_convolve_2d_copy_avx2() 162 const __m128i res_0 = _mm256_castsi256_si128(res_unsigned_16b); in av1_highbd_dist_wtd_convolve_2d_copy_avx2() local 165 _mm_storel_epi64((__m128i *)(&dst[i * dst_stride + j]), res_0); in av1_highbd_dist_wtd_convolve_2d_copy_avx2() 209 const __m128i res_0 = _mm256_castsi256_si128(res_clip); in av1_highbd_dist_wtd_convolve_2d_copy_avx2() local 212 _mm_store_si128((__m128i *)(&dst0[i * dst_stride0 + j]), res_0); in av1_highbd_dist_wtd_convolve_2d_copy_avx2() 218 const __m128i res_0 = _mm256_castsi256_si128(res_unsigned_16b); in av1_highbd_dist_wtd_convolve_2d_copy_avx2() local 221 _mm_store_si128((__m128i *)(&dst[i * dst_stride + j]), res_0); in av1_highbd_dist_wtd_convolve_2d_copy_avx2() 384 const __m128i res_0 = _mm256_castsi256_si128(res_clip); in av1_highbd_dist_wtd_convolve_2d_avx2() local 387 _mm_storel_epi64((__m128i *)(&dst0[i * dst_stride0 + j]), res_0); in av1_highbd_dist_wtd_convolve_2d_avx2() [all …]
|
D | highbd_wiener_convolve_ssse3.c | 76 const __m128i res_0 = _mm_madd_epi16(data, coeff_01); in av1_highbd_wiener_convolve_add_src_ssse3() local 84 __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_4), in av1_highbd_wiener_convolve_add_src_ssse3() 154 const __m128i res_0 = _mm_madd_epi16(src_0, coeff_01); in av1_highbd_wiener_convolve_add_src_ssse3() local 159 const __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_2), in av1_highbd_wiener_convolve_add_src_ssse3()
|
D | wiener_convolve_sse2.c | 74 const __m128i res_0 = _mm_madd_epi16(src_0, coeff_01); in av1_wiener_convolve_add_src_sse2() local 82 __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_4), in av1_wiener_convolve_add_src_sse2() 152 const __m128i res_0 = _mm_madd_epi16(src_0, coeff_01); in av1_wiener_convolve_add_src_sse2() local 157 const __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_2), in av1_wiener_convolve_add_src_sse2()
|
D | highbd_wiener_convolve_avx2.c | 105 const __m256i res_0 = _mm256_madd_epi16(src_0, coeffs_01); in av1_highbd_wiener_convolve_add_src_avx2() local 117 _mm256_add_epi32(res_0, res_4), _mm256_add_epi32(res_2, res_6)); in av1_highbd_wiener_convolve_add_src_avx2() 196 const __m256i res_0 = _mm256_madd_epi16(src_0, coeffs_01); in av1_highbd_wiener_convolve_add_src_avx2() local 202 _mm256_add_epi32(res_0, res_2), _mm256_add_epi32(res_4, res_6)); in av1_highbd_wiener_convolve_add_src_avx2()
|
D | wiener_convolve_avx2.c | 113 const __m256i res_0 = _mm256_madd_epi16(src_0, coeffs_01); in av1_wiener_convolve_add_src_avx2() local 125 _mm256_add_epi32(res_0, res_4), _mm256_add_epi32(res_2, res_6)); in av1_wiener_convolve_add_src_avx2() 202 const __m256i res_0 = _mm256_madd_epi16(src_0, coeffs_01); in av1_wiener_convolve_add_src_avx2() local 208 _mm256_add_epi32(res_0, res_2), _mm256_add_epi32(res_4, res_6)); in av1_wiener_convolve_add_src_avx2()
|
D | jnt_convolve_ssse3.c | 88 const __m128i res_0 = _mm_madd_epi16(src_lo, coeff_01); in av1_dist_wtd_convolve_2d_ssse3() local 96 __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_4), in av1_dist_wtd_convolve_2d_ssse3() 165 const __m128i res_0 = _mm_madd_epi16(src_0, coeff_01); in av1_dist_wtd_convolve_2d_ssse3() local 170 const __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_2), in av1_dist_wtd_convolve_2d_ssse3()
|
D | convolve_2d_avx2.c | 160 const __m128i res_0 = _mm256_castsi256_si128(res_8b); in av1_convolve_2d_sr_avx2() local 167 _mm_storel_epi64(p_0, res_0); in av1_convolve_2d_sr_avx2() 170 xx_storel_32(p_0, res_0); in av1_convolve_2d_sr_avx2() 173 *(uint16_t *)p_0 = _mm_cvtsi128_si32(res_0); in av1_convolve_2d_sr_avx2()
|
D | highbd_convolve_2d_sse4.c | 240 const __m128i res_0 = _mm_madd_epi16(data, coeff_01); in av1_highbd_dist_wtd_convolve_2d_sse4_1() local 248 __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_4), in av1_highbd_dist_wtd_convolve_2d_sse4_1() 317 const __m128i res_0 = _mm_madd_epi16(src_0, coeff_01); in av1_highbd_dist_wtd_convolve_2d_sse4_1() local 322 const __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_2), in av1_highbd_dist_wtd_convolve_2d_sse4_1()
|
D | convolve_2d_sse2.c | 76 const __m128i res_0 = _mm_madd_epi16(src_0, coeff_01); in av1_convolve_2d_sr_sse2() local 84 __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_4), in av1_convolve_2d_sr_sse2() 157 const __m128i res_0 = _mm_madd_epi16(src_0, coeff_01); in av1_convolve_2d_sr_sse2() local 162 const __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_2), in av1_convolve_2d_sr_sse2()
|
D | highbd_warp_plane_sse4.c | 116 const __m128i res_0 = _mm_madd_epi16(src_1, coeff[0]); in highbd_filter_src_pixels() local 125 _mm_add_epi32(_mm_add_epi32(res_0, res_4), _mm_add_epi32(res_2, res_6)); in highbd_filter_src_pixels() 479 const __m128i res_0 = _mm_madd_epi16(src_0, coeff_0); in av1_highbd_warp_affine_sse4_1() local 484 const __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_2), in av1_highbd_warp_affine_sse4_1()
|
D | jnt_convolve_sse2.c | 458 const __m128i res_0 = _mm_madd_epi16(src_lo, coeff_01); in av1_dist_wtd_convolve_2d_sse2() local 472 __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_4), in av1_dist_wtd_convolve_2d_sse2() 549 const __m128i res_0 = _mm_madd_epi16(src_0, coeff_01); in av1_dist_wtd_convolve_2d_sse2() local 554 const __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_2), in av1_dist_wtd_convolve_2d_sse2()
|
D | warp_plane_sse4.c | 533 const __m128i res_0 = _mm_madd_epi16(src_0, coeffs[0]); in filter_src_pixels_vertical() local 539 _mm_add_epi32(_mm_add_epi32(res_0, res_2), _mm_add_epi32(res_4, res_6)); in filter_src_pixels_vertical()
|
/external/libaom/libaom/av1/common/arm/ |
D | warp_plane_neon.c | 313 int32x4_t res_0, res_1; in vertical_filter_neon() local 361 res_0 = vmull_s16(src_0, fltr_0); in vertical_filter_neon() 366 res_0 = vmlal_s16(res_0, src_0, fltr_0); in vertical_filter_neon() 367 res_0_im = vpadd_s32(vget_low_s32(res_0), vget_high_s32(res_0)); in vertical_filter_neon() 386 res_0 = vmull_s16(src_0, fltr_0); in vertical_filter_neon() 391 res_0 = vmlal_s16(res_0, src_0, fltr_0); in vertical_filter_neon() 392 res_0_im = vpadd_s32(vget_low_s32(res_0), vget_high_s32(res_0)); in vertical_filter_neon() 414 res_0 = vmull_s16(src_0, fltr_0); in vertical_filter_neon() 419 res_0 = vmlal_s16(res_0, src_0, fltr_0); in vertical_filter_neon() 420 res_0_im = vpadd_s32(vget_low_s32(res_0), vget_high_s32(res_0)); in vertical_filter_neon() [all …]
|
/external/libaom/libaom/aom_dsp/x86/ |
D | convolve_avx2.h | 104 const __m128i res_0 = _mm256_castsi256_si128(res_8b); \ 110 _mm_storel_epi64(p_0, res_0); \ 113 xx_storel_32(p_0, res_0); \ 116 *(uint16_t *)p_0 = _mm_cvtsi128_si32(res_0); \ 191 const __m128i res_0 = _mm256_castsi256_si128(res_8); \ 194 _mm_storel_epi64((__m128i *)(&dst0[i * dst_stride0 + j]), res_0); \ 198 const __m128i res_0 = _mm256_castsi256_si128(res_unsigned); \ 199 _mm_store_si128((__m128i *)(&dst[i * dst_stride + j]), res_0); \ 220 const __m128i res_0 = _mm256_castsi256_si128(res_8); \ 223 *(uint32_t *)(&dst0[i * dst_stride0 + j]) = _mm_cvtsi128_si32(res_0); \ [all …]
|
D | convolve_sse2.h | 37 const __m128i res_0 = _mm_madd_epi16(s[0], coeffs[0]); in convolve() local 43 _mm_add_epi32(_mm_add_epi32(res_0, res_1), _mm_add_epi32(res_2, res_3)); in convolve()
|
/external/u-boot/board/freescale/common/ |
D | sys_eeprom.c | 41 u8 res_0[40]; /* 0x18 - 0x3f Reserved */ member 52 u8 res_0; /* 0x1b Reserved */ member 206 e.res_0 = 0xFF; in prog_eeprom() 209 memset(e.res_0, 0xFF, sizeof(e.res_0)); in prog_eeprom()
|
/external/ltp/testcases/network/busy_poll/ |
D | busy_poll02.sh | 40 local poll_cmp=$(( 100 - ($(cat res_50) * 100) / $(cat res_0) ))
|
D | busy_poll03.sh | 43 local poll_cmp=$(( 100 - ($(cat res_50) * 100) / $(cat res_0) ))
|
D | busy_poll01.sh | 50 local poll_cmp=$(( 100 - ($(cat res_50) * 100) / $(cat res_0) ))
|
/external/u-boot/board/varisys/common/ |
D | sys_eeprom.c | 37 u8 res_0; /* 0x1b Reserved */ member 197 e.res_0 = 0xFF; in prog_eeprom()
|
/external/libaom/libaom/aom_dsp/arm/ |
D | blend_a64_mask_neon.c | 117 uint8x8_t res_0 = vqmovun_s16(src0_0); in blend_4x4() local 120 vst1_lane_u32((uint32_t *)(dst + 0 * dst_stride), vreinterpret_u32_u8(res_0), in blend_4x4() 122 vst1_lane_u32((uint32_t *)(dst + 1 * dst_stride), vreinterpret_u32_u8(res_0), in blend_4x4()
|
/external/u-boot/include/ |
D | fsl_memac.h | 14 u32 res_0[2]; member
|
/external/tensorflow/tensorflow/compiler/xrt/tests/ |
D | raw_api_test.cc | 671 auto res_0 = ops::XRTReadLiteralAndRelease(root, handle_3); in TEST() local 677 TF_EXPECT_OK(session.Run({res_0, res_1}, &outputs)); in TEST()
|