/external/libvpx/vpx_dsp/arm/ |
D | intrapred_neon.c | 26 static INLINE void dc_store_4x4(uint8_t *dst, ptrdiff_t stride, in dc_store_4x4() argument 29 for (i = 0; i < 4; ++i, dst += stride) { in dc_store_4x4() 34 void vpx_dc_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, in vpx_dc_predictor_4x4_neon() argument 41 dc_store_4x4(dst, stride, dc); in vpx_dc_predictor_4x4_neon() 44 void vpx_dc_left_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, in vpx_dc_left_predictor_4x4_neon() argument 49 dc_store_4x4(dst, stride, dc); in vpx_dc_left_predictor_4x4_neon() 52 void vpx_dc_top_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, in vpx_dc_top_predictor_4x4_neon() argument 57 dc_store_4x4(dst, stride, dc); in vpx_dc_top_predictor_4x4_neon() 60 void vpx_dc_128_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, in vpx_dc_128_predictor_4x4_neon() argument 65 dc_store_4x4(dst, stride, dc); in vpx_dc_128_predictor_4x4_neon() [all …]
|
D | highbd_intrapred_neon.c | 26 static INLINE void dc_store_4x4(uint16_t *dst, ptrdiff_t stride, in dc_store_4x4() argument 29 for (i = 0; i < 4; ++i, dst += stride) { in dc_store_4x4() 34 void vpx_highbd_dc_predictor_4x4_neon(uint16_t *dst, ptrdiff_t stride, in vpx_highbd_dc_predictor_4x4_neon() argument 42 dc_store_4x4(dst, stride, dc); in vpx_highbd_dc_predictor_4x4_neon() 45 void vpx_highbd_dc_left_predictor_4x4_neon(uint16_t *dst, ptrdiff_t stride, in vpx_highbd_dc_left_predictor_4x4_neon() argument 52 dc_store_4x4(dst, stride, dc); in vpx_highbd_dc_left_predictor_4x4_neon() 55 void vpx_highbd_dc_top_predictor_4x4_neon(uint16_t *dst, ptrdiff_t stride, in vpx_highbd_dc_top_predictor_4x4_neon() argument 62 dc_store_4x4(dst, stride, dc); in vpx_highbd_dc_top_predictor_4x4_neon() 65 void vpx_highbd_dc_128_predictor_4x4_neon(uint16_t *dst, ptrdiff_t stride, in vpx_highbd_dc_128_predictor_4x4_neon() argument 71 dc_store_4x4(dst, stride, dc); in vpx_highbd_dc_128_predictor_4x4_neon() [all …]
|
D | idct16x16_1_add_neon.c | 17 static INLINE void idct16x16_1_add_pos_kernel(uint8_t **dest, const int stride, in idct16x16_1_add_pos_kernel() argument 22 *dest += stride; in idct16x16_1_add_pos_kernel() 25 static INLINE void idct16x16_1_add_neg_kernel(uint8_t **dest, const int stride, in idct16x16_1_add_neg_kernel() argument 30 *dest += stride; in idct16x16_1_add_neg_kernel() 34 int stride) { in vpx_idct16x16_1_add_neon() argument 42 idct16x16_1_add_pos_kernel(&dest, stride, dc); in vpx_idct16x16_1_add_neon() 43 idct16x16_1_add_pos_kernel(&dest, stride, dc); in vpx_idct16x16_1_add_neon() 44 idct16x16_1_add_pos_kernel(&dest, stride, dc); in vpx_idct16x16_1_add_neon() 45 idct16x16_1_add_pos_kernel(&dest, stride, dc); in vpx_idct16x16_1_add_neon() 46 idct16x16_1_add_pos_kernel(&dest, stride, dc); in vpx_idct16x16_1_add_neon() [all …]
|
D | idct8x8_1_add_neon.c | 21 static INLINE void idct8x8_1_add_pos_kernel(uint8_t **dest, const int stride, in idct8x8_1_add_pos_kernel() argument 26 *dest += stride; in idct8x8_1_add_pos_kernel() 29 static INLINE void idct8x8_1_add_neg_kernel(uint8_t **dest, const int stride, in idct8x8_1_add_neg_kernel() argument 34 *dest += stride; in idct8x8_1_add_neg_kernel() 38 int stride) { in vpx_idct8x8_1_add_neon() argument 46 idct8x8_1_add_pos_kernel(&dest, stride, dc); in vpx_idct8x8_1_add_neon() 47 idct8x8_1_add_pos_kernel(&dest, stride, dc); in vpx_idct8x8_1_add_neon() 48 idct8x8_1_add_pos_kernel(&dest, stride, dc); in vpx_idct8x8_1_add_neon() 49 idct8x8_1_add_pos_kernel(&dest, stride, dc); in vpx_idct8x8_1_add_neon() 50 idct8x8_1_add_pos_kernel(&dest, stride, dc); in vpx_idct8x8_1_add_neon() [all …]
|
/external/libdav1d/src/loongarch/ |
D | loongson_util.S | 90 .macro vld_x8 src, start, stride, in0, in1, in2, in3, in4, in5, in6, in7 92 vld \in1, \src, \start+(\stride*1) 93 vld \in2, \src, \start+(\stride*2) 94 vld \in3, \src, \start+(\stride*3) 95 vld \in4, \src, \start+(\stride*4) 96 vld \in5, \src, \start+(\stride*5) 97 vld \in6, \src, \start+(\stride*6) 98 vld \in7, \src, \start+(\stride*7) 101 .macro vst_x8 src, start, stride, in0, in1, in2, in3, in4, in5, in6, in7 103 vst \in1, \src, \start+(\stride*1) [all …]
|
/external/libvpx/vpx_dsp/x86/ |
D | mem_sse2.h | 48 static INLINE void load_8bit_4x4(const uint8_t *const s, const ptrdiff_t stride, in load_8bit_4x4() argument 50 d[0] = _mm_cvtsi32_si128(*(const int *)(s + 0 * stride)); in load_8bit_4x4() 51 d[1] = _mm_cvtsi32_si128(*(const int *)(s + 1 * stride)); in load_8bit_4x4() 52 d[2] = _mm_cvtsi32_si128(*(const int *)(s + 2 * stride)); in load_8bit_4x4() 53 d[3] = _mm_cvtsi32_si128(*(const int *)(s + 3 * stride)); in load_8bit_4x4() 56 static INLINE void load_8bit_4x8(const uint8_t *const s, const ptrdiff_t stride, in load_8bit_4x8() argument 58 load_8bit_4x4(s + 0 * stride, stride, &d[0]); in load_8bit_4x8() 59 load_8bit_4x4(s + 4 * stride, stride, &d[4]); in load_8bit_4x8() 62 static INLINE void load_8bit_8x4(const uint8_t *const s, const ptrdiff_t stride, in load_8bit_8x4() argument 64 d[0] = _mm_loadl_epi64((const __m128i *)(s + 0 * stride)); in load_8bit_8x4() [all …]
|
D | highbd_intrapred_intrin_sse2.c | 19 void vpx_highbd_h_predictor_4x4_sse2(uint16_t *dst, ptrdiff_t stride, in vpx_highbd_h_predictor_4x4_sse2() argument 30 dst += stride; in vpx_highbd_h_predictor_4x4_sse2() 32 dst += stride; in vpx_highbd_h_predictor_4x4_sse2() 34 dst += stride; in vpx_highbd_h_predictor_4x4_sse2() 38 void vpx_highbd_h_predictor_8x8_sse2(uint16_t *dst, ptrdiff_t stride, in vpx_highbd_h_predictor_8x8_sse2() argument 53 dst += stride; in vpx_highbd_h_predictor_8x8_sse2() 55 dst += stride; in vpx_highbd_h_predictor_8x8_sse2() 57 dst += stride; in vpx_highbd_h_predictor_8x8_sse2() 59 dst += stride; in vpx_highbd_h_predictor_8x8_sse2() 61 dst += stride; in vpx_highbd_h_predictor_8x8_sse2() [all …]
|
/external/libaom/aom_dsp/x86/ |
D | mem_sse2.h | 67 const ptrdiff_t stride) { in store_8bit_8x4_from_16x2() argument 68 _mm_storel_epi64((__m128i *)(d + 0 * stride), s[0]); in store_8bit_8x4_from_16x2() 69 _mm_storeh_epi64((__m128i *)(d + 1 * stride), s[0]); in store_8bit_8x4_from_16x2() 70 _mm_storel_epi64((__m128i *)(d + 2 * stride), s[1]); in store_8bit_8x4_from_16x2() 71 _mm_storeh_epi64((__m128i *)(d + 3 * stride), s[1]); in store_8bit_8x4_from_16x2() 75 const ptrdiff_t stride) { in store_8bit_4x4() argument 76 *(int *)(d + 0 * stride) = _mm_cvtsi128_si32(s[0]); in store_8bit_4x4() 77 *(int *)(d + 1 * stride) = _mm_cvtsi128_si32(s[1]); in store_8bit_4x4() 78 *(int *)(d + 2 * stride) = _mm_cvtsi128_si32(s[2]); in store_8bit_4x4() 79 *(int *)(d + 3 * stride) = _mm_cvtsi128_si32(s[3]); in store_8bit_4x4() [all …]
|
D | intrapred_sse2.c | 17 ptrdiff_t stride) { in dc_store_4xh() argument 20 dst += stride; in dc_store_4xh() 22 dst += stride; in dc_store_4xh() 27 ptrdiff_t stride) { in dc_store_8xh() argument 31 dst += stride; in dc_store_8xh() 36 ptrdiff_t stride) { in dc_store_16xh() argument 40 dst += stride; in dc_store_16xh() 45 ptrdiff_t stride) { in dc_store_32xh() argument 50 dst += stride; in dc_store_32xh() 55 ptrdiff_t stride) { in dc_store_64xh() argument [all …]
|
D | highbd_intrapred_sse2.c | 19 void aom_highbd_h_predictor_4x4_sse2(uint16_t *dst, ptrdiff_t stride, in aom_highbd_h_predictor_4x4_sse2() argument 30 dst += stride; in aom_highbd_h_predictor_4x4_sse2() 32 dst += stride; in aom_highbd_h_predictor_4x4_sse2() 34 dst += stride; in aom_highbd_h_predictor_4x4_sse2() 38 void aom_highbd_h_predictor_4x8_sse2(uint16_t *dst, ptrdiff_t stride, in aom_highbd_h_predictor_4x8_sse2() argument 41 aom_highbd_h_predictor_4x4_sse2(dst, stride, above, left, bd); in aom_highbd_h_predictor_4x8_sse2() 42 dst += stride << 2; in aom_highbd_h_predictor_4x8_sse2() 44 aom_highbd_h_predictor_4x4_sse2(dst, stride, above, left, bd); in aom_highbd_h_predictor_4x8_sse2() 47 void aom_highbd_h_predictor_8x4_sse2(uint16_t *dst, ptrdiff_t stride, in aom_highbd_h_predictor_8x4_sse2() argument 58 dst += stride; in aom_highbd_h_predictor_8x4_sse2() [all …]
|
/external/mesa3d/src/gallium/drivers/llvmpipe/ |
D | lp_setup_vbuf.c | 145 get_vert(const void *vertex_buffer, int index, int stride) in get_vert() argument 147 return (const_float4_ptr)((char *)vertex_buffer + index * stride); in get_vert() 175 const unsigned stride = setup->vertex_info->size * sizeof(float); in lp_setup_draw_elements() local 192 get_vert(vertex_buffer, indices[i-0], stride)); in lp_setup_draw_elements() 199 get_vert(vertex_buffer, indices[i-1], stride), in lp_setup_draw_elements() 200 get_vert(vertex_buffer, indices[i-0], stride)); in lp_setup_draw_elements() 207 get_vert(vertex_buffer, indices[i-1], stride), in lp_setup_draw_elements() 208 get_vert(vertex_buffer, indices[i-0], stride)); in lp_setup_draw_elements() 215 get_vert(vertex_buffer, indices[i-1], stride), in lp_setup_draw_elements() 216 get_vert(vertex_buffer, indices[i-0], stride)); in lp_setup_draw_elements() [all …]
|
/external/mesa3d/src/gallium/drivers/softpipe/ |
D | sp_prim_vbuf.c | 150 int stride ) in get_vert() argument 152 return (cptrf4)((char *)vertex_buffer + index * stride); in get_vert() 164 const unsigned stride = softpipe->vertex_info.size * sizeof(float); in sp_vbuf_draw_elements() local 174 get_vert(vertex_buffer, indices[i-0], stride) ); in sp_vbuf_draw_elements() 181 get_vert(vertex_buffer, indices[i-1], stride), in sp_vbuf_draw_elements() 182 get_vert(vertex_buffer, indices[i-0], stride) ); in sp_vbuf_draw_elements() 189 get_vert(vertex_buffer, indices[i-1], stride), in sp_vbuf_draw_elements() 190 get_vert(vertex_buffer, indices[i-0], stride) ); in sp_vbuf_draw_elements() 197 get_vert(vertex_buffer, indices[i-1], stride), in sp_vbuf_draw_elements() 198 get_vert(vertex_buffer, indices[i-0], stride) ); in sp_vbuf_draw_elements() [all …]
|
/external/libdav1d/src/ |
D | itx_1d.c | 66 inv_dct4_1d_internal_c(int32_t *const c, const ptrdiff_t stride, in inv_dct4_1d_internal_c() argument 69 assert(stride > 0); in inv_dct4_1d_internal_c() 70 const int in0 = c[0 * stride], in1 = c[1 * stride]; in inv_dct4_1d_internal_c() 78 const int in2 = c[2 * stride], in3 = c[3 * stride]; in inv_dct4_1d_internal_c() 86 c[0 * stride] = CLIP(t0 + t3); in inv_dct4_1d_internal_c() 87 c[1 * stride] = CLIP(t1 + t2); in inv_dct4_1d_internal_c() 88 c[2 * stride] = CLIP(t1 - t2); in inv_dct4_1d_internal_c() 89 c[3 * stride] = CLIP(t0 - t3); in inv_dct4_1d_internal_c() 92 static void inv_dct4_1d_c(int32_t *const c, const ptrdiff_t stride, in inv_dct4_1d_c() argument 95 inv_dct4_1d_internal_c(c, stride, min, max, 0); in inv_dct4_1d_c() [all …]
|
/external/libaom/av1/common/ |
D | idct.c | 34 void av1_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride, in av1_highbd_iwht4x4_add() argument 37 av1_highbd_iwht4x4_16_add(input, dest, stride, bd); in av1_highbd_iwht4x4_add() 39 av1_highbd_iwht4x4_1_add(input, dest, stride, bd); in av1_highbd_iwht4x4_add() 43 int stride, const TxfmParam *txfm_param) { in highbd_inv_txfm_add_4x4_c() argument 52 av1_highbd_iwht4x4_add(input, dest, stride, eob, bd); in highbd_inv_txfm_add_4x4_c() 56 av1_inv_txfm2d_add_4x4_c(src, CONVERT_TO_SHORTPTR(dest), stride, tx_type, bd); in highbd_inv_txfm_add_4x4_c() 60 int stride, const TxfmParam *txfm_param) { in highbd_inv_txfm_add_4x8_c() argument 63 av1_inv_txfm2d_add_4x8_c(src, CONVERT_TO_SHORTPTR(dest), stride, in highbd_inv_txfm_add_4x8_c() 68 int stride, const TxfmParam *txfm_param) { in highbd_inv_txfm_add_8x4_c() argument 71 av1_inv_txfm2d_add_8x4_c(src, CONVERT_TO_SHORTPTR(dest), stride, in highbd_inv_txfm_add_8x4_c() [all …]
|
/external/libvpx/vp9/common/ |
D | vp9_idct.c | 20 void vp9_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride, in vp9_iht4x4_16_add_c() argument 46 dest[j * stride + i] = clip_pixel_add(dest[j * stride + i], in vp9_iht4x4_16_add_c() 59 void vp9_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride, in vp9_iht8x8_64_add_c() argument 79 dest[j * stride + i] = clip_pixel_add(dest[j * stride + i], in vp9_iht8x8_64_add_c() 92 void vp9_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int stride, in vp9_iht16x16_256_add_c() argument 112 dest[j * stride + i] = clip_pixel_add(dest[j * stride + i], in vp9_iht16x16_256_add_c() 119 void vp9_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride, in vp9_idct4x4_add() argument 122 vpx_idct4x4_16_add(input, dest, stride); in vp9_idct4x4_add() 124 vpx_idct4x4_1_add(input, dest, stride); in vp9_idct4x4_add() 127 void vp9_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride, in vp9_iwht4x4_add() argument [all …]
|
/external/v4l-utils/lib/libv4lconvert/ |
D | bayer.c | 167 unsigned char *bgr, int width, int height, const unsigned int stride, unsigned int pixfmt, in bayer_to_rgbbgr24() argument 171 v4lconvert_border_bayer_line_to_bgr24(bayer, bayer + stride, bgr, width, in bayer_to_rgbbgr24() 183 t0 = (bayer[1] + bayer[stride * 2 + 1] + 1) >> 1; in bayer_to_rgbbgr24() 185 t1 = (bayer[0] + bayer[stride * 2] + bayer[stride + 1] + 1) / 3; in bayer_to_rgbbgr24() 189 *bgr++ = bayer[stride]; in bayer_to_rgbbgr24() 191 *bgr++ = bayer[stride]; in bayer_to_rgbbgr24() 197 t1 = (bayer[stride] + bayer[stride + 2] + 1) >> 1; in bayer_to_rgbbgr24() 200 *bgr++ = bayer[stride + 1]; in bayer_to_rgbbgr24() 204 *bgr++ = bayer[stride + 1]; in bayer_to_rgbbgr24() 210 t0 = (bayer[0] + bayer[stride * 2] + 1) >> 1; in bayer_to_rgbbgr24() [all …]
|
/external/deqp/framework/delibs/decpp/ |
D | deArrayBuffer.hpp | 47 template <typename T, size_t Alignment = (sizeof(T) > 4 ? 4 : sizeof(T)), size_t Stride = sizeof(T)> 51 DE_STATIC_ASSERT(Stride >= sizeof(T)); 76 template <typename T, size_t Alignment, size_t Stride> 77 ArrayBuffer<T, Alignment, Stride>::ArrayBuffer(void) throw() : m_ptr(nullptr) in ArrayBuffer() 82 template <typename T, size_t Alignment, size_t Stride> 83 ArrayBuffer<T, Alignment, Stride>::ArrayBuffer(size_t numElements) : m_ptr(nullptr) in ArrayBuffer() 88 …// \note no need to allocate stride for the last element, sizeof(T) is enough. Also handles cases … in ArrayBuffer() 89 const size_t storageSize = (numElements - 1) * Stride + sizeof(T); in ArrayBuffer() 100 template <typename T, size_t Alignment, size_t Stride> 101 ArrayBuffer<T, Alignment, Stride>::ArrayBuffer(const T *ptr, size_t numElements) : m_ptr(nullptr) in ArrayBuffer() [all …]
|
/external/libvpx/vpx_dsp/ppc/ |
D | intrapred_vsx.c | 14 void vpx_v_predictor_16x16_vsx(uint8_t *dst, ptrdiff_t stride, in vpx_v_predictor_16x16_vsx() argument 20 for (i = 0; i < 16; i++, dst += stride) { in vpx_v_predictor_16x16_vsx() 25 void vpx_v_predictor_32x32_vsx(uint8_t *dst, ptrdiff_t stride, in vpx_v_predictor_32x32_vsx() argument 32 for (i = 0; i < 32; i++, dst += stride) { in vpx_v_predictor_32x32_vsx() 42 void vpx_h_predictor_4x4_vsx(uint8_t *dst, ptrdiff_t stride, 53 dst += stride; 55 dst += stride; 57 dst += stride; 61 void vpx_h_predictor_8x8_vsx(uint8_t *dst, ptrdiff_t stride, 77 dst += stride; [all …]
|
/external/webp/swig/ |
D | libwebp.py | 131 def wrap_WebPEncodeRGB(rgb, unused1, unused2, width, height, stride, quality_factor): argument 133 return _libwebp.wrap_WebPEncodeRGB(rgb, unused1, unused2, width, height, stride, quality_factor) 135 def wrap_WebPEncodeBGR(rgb, unused1, unused2, width, height, stride, quality_factor): argument 137 return _libwebp.wrap_WebPEncodeBGR(rgb, unused1, unused2, width, height, stride, quality_factor) 139 def wrap_WebPEncodeRGBA(rgb, unused1, unused2, width, height, stride, quality_factor): argument 141 … return _libwebp.wrap_WebPEncodeRGBA(rgb, unused1, unused2, width, height, stride, quality_factor) 143 def wrap_WebPEncodeBGRA(rgb, unused1, unused2, width, height, stride, quality_factor): argument 145 … return _libwebp.wrap_WebPEncodeBGRA(rgb, unused1, unused2, width, height, stride, quality_factor) 147 def wrap_WebPEncodeLosslessRGB(rgb, unused1, unused2, width, height, stride): argument 149 return _libwebp.wrap_WebPEncodeLosslessRGB(rgb, unused1, unused2, width, height, stride) [all …]
|
/external/libaom/aom_dsp/ |
D | fft_common.h | 32 * Non-vectorized transforms (e.g., on a single row) would use a stride = 1. 36 * for input and output is typically square (n x n) and the stride will 42 * \param[in] stride The spacing in number of elements between rows 46 int stride); 50 void aom_fft1d_2_float(const float *input, float *output, int stride); 51 void aom_fft1d_4_float(const float *input, float *output, int stride); 52 void aom_fft1d_8_float(const float *input, float *output, int stride); 53 void aom_fft1d_16_float(const float *input, float *output, int stride); 54 void aom_fft1d_32_float(const float *input, float *output, int stride); 119 ret aom_fft1d_2_##suffix(const T *input, T *output, int stride) { \ [all …]
|
D | intrapred.c | 23 static inline void v_predictor(uint8_t *dst, ptrdiff_t stride, int bw, int bh, in v_predictor() argument 30 dst += stride; in v_predictor() 34 static inline void h_predictor(uint8_t *dst, ptrdiff_t stride, int bw, int bh, in h_predictor() argument 41 dst += stride; in h_predictor() 60 static inline void paeth_predictor(uint8_t *dst, ptrdiff_t stride, int bw, in paeth_predictor() argument 69 dst += stride; in paeth_predictor() 84 static inline void smooth_predictor(uint8_t *dst, ptrdiff_t stride, int bw, in smooth_predictor() argument 111 dst += stride; in smooth_predictor() 115 static inline void smooth_v_predictor(uint8_t *dst, ptrdiff_t stride, int bw, in smooth_v_predictor() argument 140 dst += stride; in smooth_v_predictor() [all …]
|
/external/pdfium/third_party/libtiff/ |
D | tif_predict.c | 107 sp->stride = in PredictorSetup() 344 tmsize_t stride = PredictorState(tif)->stride; in horAcc8() local 347 if ((cc % stride) != 0) in horAcc8() 349 TIFFErrorExtR(tif, "horAcc8", "%s", "(cc%stride)!=0"); in horAcc8() 353 if (cc > stride) in horAcc8() 358 if (stride == 3) in horAcc8() 363 tmsize_t i = stride; in horAcc8() 364 for (; i < cc; i += stride) in horAcc8() 371 else if (stride == 4) in horAcc8() 377 tmsize_t i = stride; in horAcc8() [all …]
|
/external/libaom/third_party/SVT-AV1/ |
D | EbMemory_AVX2.h | 33 const ptrdiff_t stride) { in load_u8_4x2_avx2() argument 35 src01 = _mm_cvtsi32_si128(*(int32_t *)(src + 0 * stride)); in load_u8_4x2_avx2() 36 src01 = _mm_insert_epi32(src01, *(int32_t *)(src + 1 * stride), 1); in load_u8_4x2_avx2() 41 const ptrdiff_t stride) { in load_u8_4x4_avx2() argument 43 src01 = _mm_cvtsi32_si128(*(int32_t *)(src + 0 * stride)); in load_u8_4x4_avx2() 44 src01 = _mm_insert_epi32(src01, *(int32_t *)(src + 1 * stride), 1); in load_u8_4x4_avx2() 45 src23 = _mm_cvtsi32_si128(*(int32_t *)(src + 2 * stride)); in load_u8_4x4_avx2() 46 src23 = _mm_insert_epi32(src23, *(int32_t *)(src + 3 * stride), 1); in load_u8_4x4_avx2() 51 const ptrdiff_t stride) { in load_u8_8x2_avx2() argument 52 const __m128i src0 = _mm_loadl_epi64((__m128i *)(src + 0 * stride)); in load_u8_8x2_avx2() [all …]
|
/external/libgav1/src/dsp/x86/ |
D | intrapred_sse4.cc | 59 using DcStoreFunc = void (*)(void* dest, ptrdiff_t stride, const __m128i dc); 60 using WriteDuplicateFunc = void (*)(void* dest, ptrdiff_t stride, 63 using ColumnStoreFunc = void (*)(void* dest, ptrdiff_t stride, 72 static void DcTop(void* dest, ptrdiff_t stride, const void* top_row, 74 static void DcLeft(void* dest, ptrdiff_t stride, const void* top_row, 76 static void Dc(void* dest, ptrdiff_t stride, const void* top_row, 85 static void Vertical(void* dest, ptrdiff_t stride, const void* top_row, 87 static void Horizontal(void* dest, ptrdiff_t stride, const void* top_row, 95 dc_mult>::DcTop(void* LIBGAV1_RESTRICT const dest, ptrdiff_t stride, in DcTop() argument 101 storefn(dest, stride, dc); in DcTop() [all …]
|
/external/pytorch/aten/src/ATen/native/ |
D | StridedRandomAccessor.h | 46 ConstStridedRandomAccessor(PtrType ptr, index_t stride) in ConstStridedRandomAccessor() argument 47 : ptr{ptr}, stride{stride} in ConstStridedRandomAccessor() 52 : ptr{ptr}, stride{static_cast<index_t>(1)} in ConstStridedRandomAccessor() 57 : ptr{nullptr}, stride{static_cast<index_t>(1)} in ConstStridedRandomAccessor() 74 return ptr[idx * stride]; 81 ptr += stride; 94 ptr -= stride; 109 ptr += offset * stride; 115 return ConstStridedRandomAccessor(ptr + offset * stride, stride); 128 ptr -= offset * stride; [all …]
|