Searched refs:y_q4 (Results 1 – 7 of 7) sorted by relevance
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | vpx_scaled_convolve8_neon.c | 161 int y_q4 = y0_q4; in scaledconvolve_vert_w4() local 166 const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; in scaledconvolve_vert_w4() 168 if (y_q4 & SUBPEL_MASK) { in scaledconvolve_vert_w4() 169 const int16x8_t filters = vld1q_s16(y_filters[y_q4 & SUBPEL_MASK]); in scaledconvolve_vert_w4() 195 y_q4 += y_step_q4; in scaledconvolve_vert_w4() 204 int y_q4 = y0_q4; in scaledconvolve_vert_w8() local 209 const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; in scaledconvolve_vert_w8() 210 if (y_q4 & SUBPEL_MASK) { in scaledconvolve_vert_w8() 211 const int16x8_t filters = vld1q_s16(y_filters[y_q4 & SUBPEL_MASK]); in scaledconvolve_vert_w8() 221 y_q4 += y_step_q4; in scaledconvolve_vert_w8() [all …]
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | vpx_subpixel_8t_intrin_ssse3.c | 420 int y_q4 = y0_q4; in scaledconvolve_vert_w4() local 424 const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; in scaledconvolve_vert_w4() 425 const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK]; in scaledconvolve_vert_w4() 427 if (y_q4 & SUBPEL_MASK) { in scaledconvolve_vert_w4() 433 y_q4 += y_step_q4; in scaledconvolve_vert_w4() 453 int y_q4 = y0_q4; in scaledconvolve_vert_w8() local 457 const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; in scaledconvolve_vert_w8() 458 const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK]; in scaledconvolve_vert_w8() 459 if (y_q4 & SUBPEL_MASK) { in scaledconvolve_vert_w8() 464 y_q4 += y_step_q4; in scaledconvolve_vert_w8() [all …]
|
/external/libvpx/libvpx/vpx_dsp/ |
D | vpx_convolve.c | 75 int y_q4 = y0_q4; in convolve_vert() local 77 const uint8_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; in convolve_vert() 78 const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK]; in convolve_vert() 83 y_q4 += y_step_q4; in convolve_vert() 98 int y_q4 = y0_q4; in convolve_avg_vert() local 100 const uint8_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; in convolve_avg_vert() 101 const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK]; in convolve_avg_vert() 109 y_q4 += y_step_q4; in convolve_avg_vert() 348 int y_q4 = y0_q4; in highbd_convolve_vert() local 350 const uint16_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; in highbd_convolve_vert() [all …]
|
/external/libvpx/libvpx/vpx_dsp/ppc/ |
D | vpx_convolve_vsx.c | 261 int y_q4 = y0_q4; in convolve_vert() local 264 &src[(y_q4 >> SUBPEL_BITS) * src_stride], src_stride, in convolve_vert() 265 y_filters[y_q4 & SUBPEL_MASK]); in convolve_vert() 266 y_q4 += y_step_q4; in convolve_vert() 281 int y_q4 = y0_q4; in convolve_avg_vert() local 284 convolve_line_v(&v, &src[(y_q4 >> SUBPEL_BITS) * src_stride], src_stride, in convolve_avg_vert() 285 y_filters[y_q4 & SUBPEL_MASK]); in convolve_avg_vert() 287 y_q4 += y_step_q4; in convolve_avg_vert()
|
/external/libvpx/libvpx/vp9/encoder/ |
D | vp9_frame_scale.c | 99 const int y_q4 = y * (16 / factor) * src_h / dst_h + phase_scaler; in vp9_scale_and_extend_frame_c() local 108 x_q4 & 0xf, 16 * src_w / dst_w, y_q4 & 0xf, in vp9_scale_and_extend_frame_c()
|
D | vp9_encoder.c | 2683 const int y_q4 = y * (16 / factor) * src_h / dst_h + phase_scaler; local 2694 x_q4 & 0xf, 16 * src_w / dst_w, y_q4 & 0xf, 2699 x_q4 & 0xf, 16 * src_w / dst_w, y_q4 & 0xf,
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | vpx_convolve8_msa.c | 1104 int y_q4 = y0_q4; in scaledconvolve_vert_w4() local 1109 const uint8_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; in scaledconvolve_vert_w4() 1110 const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK]; in scaledconvolve_vert_w4() 1112 if (y_q4 & SUBPEL_MASK) { in scaledconvolve_vert_w4() 1119 y_q4 += y_step_q4; in scaledconvolve_vert_w4() 1128 int y_q4 = y0_q4; in scaledconvolve_vert_w8() local 1133 const uint8_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; in scaledconvolve_vert_w8() 1134 const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK]; in scaledconvolve_vert_w8() 1136 if (y_q4 & SUBPEL_MASK) { in scaledconvolve_vert_w8() 1143 y_q4 += y_step_q4; in scaledconvolve_vert_w8() [all …]
|