/external/libvpx/libvpx/vpx_dsp/arm/ |
D | vpx_scaled_convolve8_neon.c | 161 int y_q4 = y0_q4; in scaledconvolve_vert_w4() local 166 const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; in scaledconvolve_vert_w4() 168 if (y_q4 & SUBPEL_MASK) { in scaledconvolve_vert_w4() 169 const int16x8_t filters = vld1q_s16(y_filters[y_q4 & SUBPEL_MASK]); in scaledconvolve_vert_w4() 195 y_q4 += y_step_q4; in scaledconvolve_vert_w4() 204 int y_q4 = y0_q4; in scaledconvolve_vert_w8() local 209 const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; in scaledconvolve_vert_w8() 210 if (y_q4 & SUBPEL_MASK) { in scaledconvolve_vert_w8() 211 const int16x8_t filters = vld1q_s16(y_filters[y_q4 & SUBPEL_MASK]); in scaledconvolve_vert_w8() 221 y_q4 += y_step_q4; in scaledconvolve_vert_w8() [all …]
|
/external/libaom/libaom/aom_dsp/ |
D | aom_convolve.c | 62 int y_q4 = y0_q4; in convolve_vert() local 64 const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; in convolve_vert() 65 const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK]; in convolve_vert() 68 y_q4 += y_step_q4; in convolve_vert() 177 int y_q4 = y0_q4; in highbd_convolve_vert() local 179 const uint16_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; in highbd_convolve_vert() 180 const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK]; in highbd_convolve_vert() 184 y_q4 += y_step_q4; in highbd_convolve_vert()
|
/external/libvpx/libvpx/vpx_dsp/ |
D | vpx_convolve.c | 75 int y_q4 = y0_q4; in convolve_vert() local 77 const uint8_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; in convolve_vert() 78 const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK]; in convolve_vert() 83 y_q4 += y_step_q4; in convolve_vert() 98 int y_q4 = y0_q4; in convolve_avg_vert() local 100 const uint8_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; in convolve_avg_vert() 101 const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK]; in convolve_avg_vert() 109 y_q4 += y_step_q4; in convolve_avg_vert() 348 int y_q4 = y0_q4; in highbd_convolve_vert() local 350 const uint16_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; in highbd_convolve_vert() [all …]
|
/external/libvpx/libvpx/vpx_dsp/ppc/ |
D | vpx_convolve_vsx.c | 275 int y_q4 = y0_q4; in convolve_vert() local 278 &src[(y_q4 >> SUBPEL_BITS) * src_stride], src_stride, in convolve_vert() 279 y_filters[y_q4 & SUBPEL_MASK]); in convolve_vert() 280 y_q4 += y_step_q4; in convolve_vert() 295 int y_q4 = y0_q4; in convolve_avg_vert() local 298 convolve_line_v(&v, &src[(y_q4 >> SUBPEL_BITS) * src_stride], src_stride, in convolve_avg_vert() 299 y_filters[y_q4 & SUBPEL_MASK]); in convolve_avg_vert() 301 y_q4 += y_step_q4; in convolve_avg_vert()
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | vpx_subpixel_8t_intrin_ssse3.c | 922 int y_q4 = y0_q4; in scaledconvolve_vert_w4() local 926 const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; in scaledconvolve_vert_w4() 927 const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK]; in scaledconvolve_vert_w4() 929 if (y_q4 & SUBPEL_MASK) { in scaledconvolve_vert_w4() 935 y_q4 += y_step_q4; in scaledconvolve_vert_w4() 955 int y_q4 = y0_q4; in scaledconvolve_vert_w8() local 959 const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; in scaledconvolve_vert_w8() 960 const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK]; in scaledconvolve_vert_w8() 961 if (y_q4 & SUBPEL_MASK) { in scaledconvolve_vert_w8() 966 y_q4 += y_step_q4; in scaledconvolve_vert_w8() [all …]
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | vpx_convolve8_mmi.c | 537 int y_q4 = y0_q4; in convolve_vert() local 539 const uint8_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; in convolve_vert() 540 const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK]; in convolve_vert() 545 y_q4 += y_step_q4; in convolve_vert() 560 int y_q4 = y0_q4; in convolve_avg_vert() local 562 const uint8_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; in convolve_avg_vert() 563 const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK]; in convolve_avg_vert() 571 y_q4 += y_step_q4; in convolve_avg_vert()
|
D | vpx_convolve8_msa.c | 1104 int y_q4 = y0_q4; in scaledconvolve_vert_w4() local 1109 const uint8_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; in scaledconvolve_vert_w4() 1110 const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK]; in scaledconvolve_vert_w4() 1112 if (y_q4 & SUBPEL_MASK) { in scaledconvolve_vert_w4() 1119 y_q4 += y_step_q4; in scaledconvolve_vert_w4() 1128 int y_q4 = y0_q4; in scaledconvolve_vert_w8() local 1133 const uint8_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; in scaledconvolve_vert_w8() 1134 const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK]; in scaledconvolve_vert_w8() 1136 if (y_q4 & SUBPEL_MASK) { in scaledconvolve_vert_w8() 1143 y_q4 += y_step_q4; in scaledconvolve_vert_w8() [all …]
|
/external/libvpx/libvpx/vp9/encoder/ |
D | vp9_frame_scale.c | 99 const int y_q4 = y * (16 / factor) * src_h / dst_h + phase_scaler; in vp9_scale_and_extend_frame_c() local 108 x_q4 & 0xf, 16 * src_w / dst_w, y_q4 & 0xf, in vp9_scale_and_extend_frame_c()
|
D | vp9_encoder.c | 3056 const int y_q4 = y * (16 / factor) * src_h / dst_h + phase_scaler; local 3067 x_q4 & 0xf, 16 * src_w / dst_w, y_q4 & 0xf, 3072 x_q4 & 0xf, 16 * src_w / dst_w, y_q4 & 0xf,
|
/external/libaom/libaom/av1/common/ |
D | convolve.c | 1147 int y_q4 = y0_q4; in convolve_add_src_vert_hip() local 1149 const uint16_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; in convolve_add_src_vert_hip() 1150 const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK]; in convolve_add_src_vert_hip() 1157 y_q4 += y_step_q4; in convolve_add_src_vert_hip() 1227 int y_q4 = y0_q4; in highbd_convolve_add_src_vert_hip() local 1229 const uint16_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; in highbd_convolve_add_src_vert_hip() 1230 const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK]; in highbd_convolve_add_src_vert_hip() 1238 y_q4 += y_step_q4; in highbd_convolve_add_src_vert_hip()
|