Home
last modified time | relevance | path

Searched refs:stride0 (Results 1 – 12 of 12) sorted by relevance

/external/mesa3d/src/egl/wayland/wayland-drm/
Dwayland-drm.c63 int32_t offset0, int32_t stride0, in create_buffer() argument
81 buffer->stride[0] = stride0; in create_buffer()
137 int32_t offset0, int32_t stride0, in drm_create_planar_buffer() argument
158 offset0, stride0, offset1, stride1, offset2, stride2); in drm_create_planar_buffer()
166 int32_t offset0, int32_t stride0, in drm_create_prime_buffer() argument
171 offset0, stride0, offset1, stride1, offset2, stride2); in drm_create_prime_buffer()
/external/libaom/libaom/aom_dsp/x86/
Dvariance_avx2.c363 const int stride0 = invert_mask ? width : ref_stride; in aom_comp_mask_pred_avx2() local
366 comp_mask_pred_8_ssse3(comp_pred, height, src0, stride0, src1, stride1, in aom_comp_mask_pred_avx2()
370 const __m256i sA0 = mm256_loadu2(src0 + stride0, src0); in aom_comp_mask_pred_avx2()
373 src0 += (stride0 << 1); in aom_comp_mask_pred_avx2()
376 const __m256i sB0 = mm256_loadu2(src0 + stride0, src0); in aom_comp_mask_pred_avx2()
379 src0 += (stride0 << 1); in aom_comp_mask_pred_avx2()
394 const __m256i sB0 = _mm256_lddqu_si256((const __m256i *)(src0 + stride0)); in aom_comp_mask_pred_avx2()
403 src0 += (stride0 << 1); in aom_comp_mask_pred_avx2()
446 const int stride0 = invert_mask ? width : ref_stride; in aom_highbd_comp_mask_pred_avx2() local
452 const __m256i s0 = mm256_loadu2_16(src0 + stride0, src0); in aom_highbd_comp_mask_pred_avx2()
[all …]
Dmasked_variance_intrin_ssse3.h51 const uint8_t *src0, int stride0, in comp_mask_pred_8_ssse3() argument
65 const __m128i sB0 = _mm_loadl_epi64((const __m128i *)(src0 + stride0)); in comp_mask_pred_8_ssse3()
85 src0 += (stride0 << 1); in comp_mask_pred_8_ssse3()
Dvariance_sse2.c727 const int stride0 = invert_mask ? width : ref_stride; in aom_highbd_comp_mask_pred_sse2() local
742 src0 += stride0; in aom_highbd_comp_mask_pred_sse2()
765 src0 += stride0; in aom_highbd_comp_mask_pred_sse2()
791 src0 += stride0; in aom_highbd_comp_mask_pred_sse2()
Dmasked_variance_intrin_ssse3.c1035 const int stride0 = invert_mask ? width : ref_stride; in aom_comp_mask_pred_ssse3() local
1040 comp_mask_pred_8_ssse3(comp_pred, height, src0, stride0, src1, stride1, in aom_comp_mask_pred_ssse3()
1045 comp_mask_pred_16_ssse3(src0 + stride0, src1 + stride1, in aom_comp_mask_pred_ssse3()
1048 src0 += (stride0 << 1); in aom_comp_mask_pred_ssse3()
1059 src0 += (stride0); in aom_comp_mask_pred_ssse3()
/external/libaom/libaom/av1/common/x86/
Dreconinter_sse4.c28 const uint8_t *src0, int stride0, in av1_build_compound_diffwtd_mask_sse4_1() argument
37 const __m128i s0B = _mm_cvtsi32_si128(*(uint32_t *)(src0 + stride0)); in av1_build_compound_diffwtd_mask_sse4_1()
51 src0 += (stride0 << 1); in av1_build_compound_diffwtd_mask_sse4_1()
65 src0 += stride0; in av1_build_compound_diffwtd_mask_sse4_1()
89 src0 += stride0; in av1_build_compound_diffwtd_mask_sse4_1()
Dreconinter_avx2.c31 const uint8_t *src0, int stride0, in av1_build_compound_diffwtd_mask_avx2() argument
40 const __m128i s0B = xx_loadl_32(src0 + stride0); in av1_build_compound_diffwtd_mask_avx2()
41 const __m128i s0C = xx_loadl_32(src0 + stride0 * 2); in av1_build_compound_diffwtd_mask_avx2()
42 const __m128i s0D = xx_loadl_32(src0 + stride0 * 3); in av1_build_compound_diffwtd_mask_avx2()
61 src0 += (stride0 << 2); in av1_build_compound_diffwtd_mask_avx2()
69 const __m128i s0B = xx_loadl_64(src0 + stride0); in av1_build_compound_diffwtd_mask_avx2()
70 const __m128i s0C = xx_loadl_64(src0 + stride0 * 2); in av1_build_compound_diffwtd_mask_avx2()
71 const __m128i s0D = xx_loadl_64(src0 + stride0 * 3); in av1_build_compound_diffwtd_mask_avx2()
84 src0 += stride0 << 2; in av1_build_compound_diffwtd_mask_avx2()
92 const __m128i s0B = xx_load_128(src0 + stride0); in av1_build_compound_diffwtd_mask_avx2()
[all …]
/external/minigbm/
Dvirtio_gpu.c82 uint32_t stride0; in virtio_virgl_bo_create() local
121 stride0 = drv_stride_from_format(format, width, 0); in virtio_virgl_bo_create()
122 drv_bo_from_format(bo, stride0, height, format); in virtio_virgl_bo_create()
/external/webp/src/enc/
Dpicture_psnr_enc.c203 const size_t stride0 = 4 * (size_t)p0.argb_stride; in WebPPictureDistortion() local
207 if (!WebPPlaneDistortion((const uint8_t*)p0.argb + offset, stride0, in WebPPictureDistortion()
/external/libaom/libaom/aom_dsp/
Dvariance.c1081 const int stride0 = invert_mask ? width : ref_stride; in aom_comp_mask_pred_c() local
1088 src0 += stride0; in aom_comp_mask_pred_c()
/external/python/cpython3/Lib/test/
Dtest_buffer.py794 stride0 = self.sizeof_void_p
796 stride0 = -stride0
797 strides = [stride0] + list(strides[1:])
/external/libaom/libaom/av1/encoder/
Drdopt.c7531 int stride0, const uint8_t *pred1, int stride1) { in estimate_wedge_sign() argument
7566 cpi->fn_ptr[f_index].vf(src, src_stride, pred0, stride0, &esq[0][0]); in estimate_wedge_sign()
7567 cpi->fn_ptr[f_index].vf(src + bw / 2, src_stride, pred0 + bw / 2, stride0, in estimate_wedge_sign()
7570 pred0 + bh / 2 * stride0, stride0, &esq[0][2]); in estimate_wedge_sign()
7572 pred0 + bh / 2 * stride0 + bw / 2, stride0, in estimate_wedge_sign()
7578 pred1 + bh / 2 * stride1, stride0, &esq[1][2]); in estimate_wedge_sign()
7580 pred1 + bh / 2 * stride1 + bw / 2, stride0, in estimate_wedge_sign()