/external/libvpx/libvpx/vpx_dsp/x86/ |
D | highbd_variance_sse2.c | 93 void vpx_highbd_get##S##x##S##var_sse2(const uint8_t *src8, int src_stride, \ 96 uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ 102 void vpx_highbd_10_get##S##x##S##var_sse2(const uint8_t *src8, int src_stride, \ 105 uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ 113 void vpx_highbd_12_get##S##x##S##var_sse2(const uint8_t *src8, int src_stride, \ 116 uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ 131 const uint8_t *src8, int src_stride, \ 134 uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ 143 const uint8_t *src8, int src_stride, \ 146 uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ [all …]
|
D | convolve.h | 152 void vpx_highbd_convolve8_##name##_##opt(const uint8_t *src8, \ 162 uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ 241 vpx_highbd_convolve8_##name##_c(src8, src_stride, dst8, dst_stride, \
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | vpx_convolve8_vert_msa.c | 19 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_4w_msa() local 42 LD_SB4(src, src_stride, src7, src8, src9, src10); in common_vt_8t_4w_msa() 45 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r, in common_vt_8t_4w_msa() 70 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_8w_msa() local 89 LD_SB4(src, src_stride, src7, src8, src9, src10); in common_vt_8t_8w_msa() 90 XORI_B4_128_SB(src7, src8, src9, src10); in common_vt_8t_8w_msa() 93 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r, in common_vt_8t_8w_msa() 124 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_16w_msa() local 148 LD_SB4(src, src_stride, src7, src8, src9, src10); in common_vt_8t_16w_msa() 149 XORI_B4_128_SB(src7, src8, src9, src10); in common_vt_8t_16w_msa() [all …]
|
D | vpx_convolve8_avg_vert_msa.c | 22 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_and_aver_dst_4w_msa() local 45 LD_SB4(src, src_stride, src7, src8, src9, src10); in common_vt_8t_and_aver_dst_4w_msa() 49 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r, in common_vt_8t_and_aver_dst_4w_msa() 82 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_and_aver_dst_8w_msa() local 102 LD_SB4(src, src_stride, src7, src8, src9, src10); in common_vt_8t_and_aver_dst_8w_msa() 106 XORI_B4_128_SB(src7, src8, src9, src10); in common_vt_8t_and_aver_dst_8w_msa() 107 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r, in common_vt_8t_and_aver_dst_8w_msa() 143 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_and_aver_dst_16w_mult_msa() local 172 LD_SB4(src_tmp, src_stride, src7, src8, src9, src10); in common_vt_8t_and_aver_dst_16w_mult_msa() 176 XORI_B4_128_SB(src7, src8, src9, src10); in common_vt_8t_and_aver_dst_16w_mult_msa() [all …]
|
D | vpx_convolve_avg_msa.c | 105 v16u8 src8, src9, src10, src11, src12, src13, src14, src15; in avg_width32_msa() local 116 LD_UB4(src, src_stride, src8, src10, src12, src14); in avg_width32_msa() 127 AVER_UB4_UB(src8, dst8, src9, dst9, src10, dst10, src11, dst11, in avg_width32_msa() 146 v16u8 src8, src9, src10, src11, src12, src13, src14, src15; in avg_width64_msa() local 155 LD_UB4(src, 16, src8, src9, src10, src11); in avg_width64_msa() 173 AVER_UB4_UB(src8, dst8, src9, dst9, src10, dst10, src11, dst11, in avg_width64_msa()
|
D | vpx_convolve8_msa.c | 29 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_hv_8ht_8vt_4w_msa() local 68 LD_SB4(src, src_stride, src7, src8, src9, src10); in common_hv_8ht_8vt_4w_msa() 69 XORI_B4_128_SB(src7, src8, src9, src10); in common_hv_8ht_8vt_4w_msa() 72 hz_out7 = HORIZ_8TAP_FILT(src7, src8, mask0, mask1, mask2, mask3, in common_hv_8ht_8vt_4w_msa() 103 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_hv_8ht_8vt_8w_msa() local 149 LD_SB4(src, src_stride, src7, src8, src9, src10); in common_hv_8ht_8vt_8w_msa() 152 XORI_B4_128_SB(src7, src8, src9, src10); in common_hv_8ht_8vt_8w_msa() 160 hz_out8 = HORIZ_8TAP_FILT(src8, src8, mask0, mask1, mask2, mask3, in common_hv_8ht_8vt_8w_msa() 268 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, mask; in common_hv_2ht_2vt_4x8_msa() local 285 src8 = LD_SB(src); in common_hv_2ht_2vt_4x8_msa() [all …]
|
D | vpx_convolve8_avg_msa.c | 23 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_hv_8ht_8vt_and_aver_dst_4w_msa() local 62 LD_SB4(src, src_stride, src7, src8, src9, src10); in common_hv_8ht_8vt_and_aver_dst_4w_msa() 63 XORI_B4_128_SB(src7, src8, src9, src10); in common_hv_8ht_8vt_and_aver_dst_4w_msa() 67 hz_out7 = HORIZ_8TAP_FILT(src7, src8, mask0, mask1, mask2, mask3, in common_hv_8ht_8vt_and_aver_dst_4w_msa() 105 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_hv_8ht_8vt_and_aver_dst_8w_msa() local 151 LD_SB4(src, src_stride, src7, src8, src9, src10); in common_hv_8ht_8vt_and_aver_dst_8w_msa() 152 XORI_B4_128_SB(src7, src8, src9, src10); in common_hv_8ht_8vt_and_aver_dst_8w_msa() 163 hz_out8 = HORIZ_8TAP_FILT(src8, src8, mask0, mask1, mask2, mask3, in common_hv_8ht_8vt_and_aver_dst_8w_msa() 289 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, mask; in common_hv_2ht_2vt_and_aver_dst_4x8_msa() local 307 src8 = LD_SB(src); in common_hv_2ht_2vt_and_aver_dst_4x8_msa() [all …]
|
/external/skia/src/opts/ |
D | SkBitmapFilter_opts_SSE2.cpp | 88 __m128i src8 = _mm_loadu_si128(row_to_filter); in convolveHorizontally_SSE2() local 90 __m128i src16 = _mm_unpacklo_epi8(src8, zero); in convolveHorizontally_SSE2() 108 src16 = _mm_unpackhi_epi8(src8, zero); in convolveHorizontally_SSE2() 139 __m128i src8 = _mm_loadu_si128(row_to_filter); in convolveHorizontally_SSE2() local 140 __m128i src16 = _mm_unpacklo_epi8(src8, zero); in convolveHorizontally_SSE2() 148 src16 = _mm_unpackhi_epi8(src8, zero); in convolveHorizontally_SSE2() 215 __m128i src8, src16, mul_hi, mul_lo, t; in convolve4RowsHorizontally_SSE2() local 218 src8 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src)); \ in convolve4RowsHorizontally_SSE2() 219 src16 = _mm_unpacklo_epi8(src8, zero); \ in convolve4RowsHorizontally_SSE2() 226 src16 = _mm_unpackhi_epi8(src8, zero); \ in convolve4RowsHorizontally_SSE2() [all …]
|
D | SkBitmapProcState_arm_neon.cpp | 241 uint8x16_t src8 = vld1q_u8(&sourceDataRows[filterY][outX << 2]); in convolveVertically_neon() local 243 int16x8_t src16_01 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(src8))); in convolveVertically_neon() 244 int16x8_t src16_23 = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(src8))); in convolveVertically_neon() 309 uint8x16_t src8 = vld1q_u8(&sourceDataRows[filterY][width << 2]); in convolveVertically_neon() local 311 int16x8_t src16_01 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(src8))); in convolveVertically_neon() 312 int16x8_t src16_23 = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(src8))); in convolveVertically_neon()
|
/external/libvpx/libvpx/vpx_dsp/ |
D | vpx_convolve.c | 340 static void highbd_convolve_horiz(const uint8_t *src8, ptrdiff_t src_stride, in highbd_convolve_horiz() argument 346 uint16_t *src = CONVERT_TO_SHORTPTR(src8); in highbd_convolve_horiz() 365 static void highbd_convolve_avg_horiz(const uint8_t *src8, ptrdiff_t src_stride, in highbd_convolve_avg_horiz() argument 371 uint16_t *src = CONVERT_TO_SHORTPTR(src8); in highbd_convolve_avg_horiz() 391 static void highbd_convolve_vert(const uint8_t *src8, ptrdiff_t src_stride, in highbd_convolve_vert() argument 397 uint16_t *src = CONVERT_TO_SHORTPTR(src8); in highbd_convolve_vert() 417 static void highbd_convolve_avg_vert(const uint8_t *src8, ptrdiff_t src_stride, in highbd_convolve_avg_vert() argument 423 uint16_t *src = CONVERT_TO_SHORTPTR(src8); in highbd_convolve_avg_vert() 569 void vpx_highbd_convolve_copy_c(const uint8_t *src8, ptrdiff_t src_stride, in vpx_highbd_convolve_copy_c() argument 575 uint16_t *src = CONVERT_TO_SHORTPTR(src8); in vpx_highbd_convolve_copy_c() [all …]
|
D | subtract.c | 38 const uint8_t *src8, ptrdiff_t src_stride, in vpx_highbd_subtract_block_c() argument 42 uint16_t *src = CONVERT_TO_SHORTPTR(src8); in vpx_highbd_subtract_block_c()
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
D | copymem_msa.c | 41 v16u8 src8, src9, src10, src11, src12, src13, src14, src15; in copy_16x16_msa() local 45 LD_UB8(src, src_stride, src8, src9, src10, src11, src12, src13, src14, in copy_16x16_msa() 50 ST_UB8(src8, src9, src10, src11, src12, src13, src14, src15, dst, in copy_16x16_msa()
|
D | sixtap_filter_msa.c | 317 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; in common_vt_6t_4w_msa() local 338 LD_SB4(src, src_stride, src5, src6, src7, src8); in common_vt_6t_4w_msa() 341 ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, in common_vt_6t_4w_msa() 355 src4 = src8; in common_vt_6t_4w_msa() 364 v16i8 src0, src1, src2, src3, src4, src7, src8, src9, src10; in common_vt_6t_8w_msa() local 384 LD_SB4(src, src_stride, src7, src8, src9, src10); in common_vt_6t_8w_msa() 385 XORI_B4_128_SB(src7, src8, src9, src10); in common_vt_6t_8w_msa() 388 ILVR_B4_SB(src7, src4, src8, src7, src9, src8, src10, src9, src76_r, in common_vt_6t_8w_msa() 414 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; in common_vt_6t_16w_msa() local 437 LD_SB4(src, src_stride, src5, src6, src7, src8); in common_vt_6t_16w_msa() [all …]
|
D | bilinear_filter_msa.c | 295 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; in common_vt_2t_4x8_msa() local 308 src8 = LD_SB(src); in common_vt_2t_4x8_msa() 313 ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r, in common_vt_2t_4x8_msa() 366 v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8; in common_vt_2t_8x8mult_msa() local 380 LD_UB8(src, src_stride, src1, src2, src3, src4, src5, src6, src7, src8); in common_vt_2t_8x8mult_msa() 385 ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, in common_vt_2t_8x8mult_msa() 401 src0 = src8; in common_vt_2t_8x8mult_msa() 504 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, mask; in common_hv_2ht_2vt_4x8_msa() local 519 src8 = LD_SB(src); in common_hv_2ht_2vt_4x8_msa() 525 hz_out8 = HORIZ_2TAP_FILT_UH(src8, src8, mask, filt_hz, VP8_FILTER_SHIFT); in common_hv_2ht_2vt_4x8_msa()
|
D | postproc_msa.c | 584 v16u8 src7, src8, src_r, src_l; in vp8_mbpost_proc_across_ip_msa() local 596 src8 = LD_UB(src_dup - 8); in vp8_mbpost_proc_across_ip_msa() 599 ILVRL_B2_UB(src7, src8, src_r, src_l); in vp8_mbpost_proc_across_ip_msa() 616 src8 = (v16u8)((const8 + sum_l + (v8i16)src_l_h) >> 4); in vp8_mbpost_proc_across_ip_msa() 617 tmp = (v16u8)__msa_pckev_b((v16i8)src8, (v16i8)src7); in vp8_mbpost_proc_across_ip_msa() 675 src8 = LD_UB(src_dup + 16 * (col + 1) - 8); in vp8_mbpost_proc_across_ip_msa()
|
/external/skia/src/core/ |
D | SkConfig8888.cpp | 142 const uint8_t* src8 = (const uint8_t*)src; in copy_g8_to_32() local 146 dst32[x] = SkPackARGB32(0xFF, src8[x], src8[x], src8[x]); in copy_g8_to_32() 149 src8 += srcRB; in copy_g8_to_32()
|
/external/libvpx/libvpx/vpx_scale/generic/ |
D | yv12extend.c | 64 static void extend_plane_high(uint8_t *const src8, int src_stride, in extend_plane_high() argument 70 uint16_t *src = CONVERT_TO_SHORTPTR(src8); in extend_plane_high() 214 void memcpy_short_addr(uint8_t *dst8, const uint8_t *src8, int num) { in memcpy_short_addr() argument 216 uint16_t *src = CONVERT_TO_SHORTPTR(src8); in memcpy_short_addr()
|
/external/libvpx/libvpx/vp9/encoder/ |
D | vp9_extend.c | 60 static void highbd_copy_and_extend_plane(const uint8_t *src8, int src_pitch, in highbd_copy_and_extend_plane() argument 66 uint16_t *src = CONVERT_TO_SHORTPTR(src8); in highbd_copy_and_extend_plane()
|
/external/libvpx/libvpx/vp8/encoder/mips/msa/ |
D | denoising_msa.c | 32 v16u8 src8, src9, src10, src11, src12, src13, src14, src15; in vp8_denoiser_filter_msa() local 324 LD_UB8(sig_start, sig_stride, src8, src9, src10, src11, src12, src13, in vp8_denoiser_filter_msa() 330 ST_UB8(src8, src9, src10, src11, src12, src13, src14, src15, in vp8_denoiser_filter_msa()
|
/external/icu/icu4c/source/test/cintltst/ |
D | ncnvtst.c | 740 static const char src8[] = { (char)0xCC, (char)0x81, (char)0xCC, (char)0x80 }; in TestRegressionUTF8() local 752 srcBeg = src8; in TestRegressionUTF8() 754 srcEnd = src8 + 3; in TestRegressionUTF8() 760 srcEnd = src8 + 4; in TestRegressionUTF8()
|
/external/libvpx/libvpx/vp9/decoder/ |
D | vp9_decodeframe.c | 454 static void high_build_mc_border(const uint8_t *src8, int src_stride, in high_build_mc_border() argument 459 const uint16_t *src = CONVERT_TO_SHORTPTR(src8); in high_build_mc_border()
|
/external/libvncserver/x11vnc/misc/enhanced_tightvnc_viewer/src/patches/ |
D | tight-vncviewer-full.patch | 6762 + CARD8 *src8 = ( (CARD8 *)im->data) + y * src_width8 + x; 6836 + *(src8++) = BGR233ToPixel[*(buf++)]; 6838 + src8 += src_width8 - width; 6857 + src8 = ((CARD8 *)im->data) + (y * si.framebufferWidth + x) * 3; 6861 + *(src8 + b0) = (unsigned char) ((v & 0x0000ff) >> 0); 6862 + *(src8 + b1) = (unsigned char) ((v & 0x00ff00) >> 8); 6863 + *(src8 + b2) = (unsigned char) ((v & 0xff0000) >> 16); 6864 + src8 += 3; 6866 + src8 += (si.framebufferWidth - width) * 3;
|
/external/valgrind/VEX/priv/ |
D | guest_s390_toIR.c | 7223 IRTemp src8 = newTemp(Ity_F128); in s390_irgen_PFPO() local 7342 assign(src8, get_fpr_pair(4)); /* get source from FPR 4,6 */ in s390_irgen_PFPO() 7343 assign(dst8, binop(Iop_F128toD64, irrm, mkexpr(src8))); in s390_irgen_PFPO() 7346 s390_cc_thunk_put1f128Z(S390_CC_OP_PFPO_128, src8, gr0); in s390_irgen_PFPO()
|