Home
last modified time | relevance | path

Searched refs:src8 (Results 1 – 25 of 67) sorted by relevance

123

/external/libvpx/libvpx/vpx_dsp/x86/
Dhighbd_variance_sse2.c94 const uint8_t *src8, int src_stride, const uint8_t *ref8, \
96 uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
103 const uint8_t *src8, int src_stride, const uint8_t *ref8, \
105 uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
114 const uint8_t *src8, int src_stride, const uint8_t *ref8, \
116 uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
131 const uint8_t *src8, int src_stride, const uint8_t *ref8, \
134 uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
143 const uint8_t *src8, int src_stride, const uint8_t *ref8, \
147 uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
[all …]
/external/libaom/libaom/aom_dsp/mips/
Daom_convolve8_vert_msa.c22 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_4w_msa() local
45 LD_SB4(src, src_stride, src7, src8, src9, src10); in common_vt_8t_4w_msa()
48 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r, in common_vt_8t_4w_msa()
73 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_8w_msa() local
92 LD_SB4(src, src_stride, src7, src8, src9, src10); in common_vt_8t_8w_msa()
93 XORI_B4_128_SB(src7, src8, src9, src10); in common_vt_8t_8w_msa()
96 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r, in common_vt_8t_8w_msa()
127 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_16w_msa() local
151 LD_SB4(src, src_stride, src7, src8, src9, src10); in common_vt_8t_16w_msa()
152 XORI_B4_128_SB(src7, src8, src9, src10); in common_vt_8t_16w_msa()
[all …]
/external/libvpx/libvpx/vpx_dsp/mips/
Dvpx_convolve8_vert_msa.c19 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_4w_msa() local
42 LD_SB4(src, src_stride, src7, src8, src9, src10); in common_vt_8t_4w_msa()
45 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r, in common_vt_8t_4w_msa()
70 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_8w_msa() local
89 LD_SB4(src, src_stride, src7, src8, src9, src10); in common_vt_8t_8w_msa()
90 XORI_B4_128_SB(src7, src8, src9, src10); in common_vt_8t_8w_msa()
93 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r, in common_vt_8t_8w_msa()
124 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_16w_msa() local
148 LD_SB4(src, src_stride, src7, src8, src9, src10); in common_vt_8t_16w_msa()
149 XORI_B4_128_SB(src7, src8, src9, src10); in common_vt_8t_16w_msa()
[all …]
Dvpx_convolve8_avg_vert_msa.c21 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_and_aver_dst_4w_msa() local
44 LD_SB4(src, src_stride, src7, src8, src9, src10); in common_vt_8t_and_aver_dst_4w_msa()
49 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r, in common_vt_8t_and_aver_dst_4w_msa()
78 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_and_aver_dst_8w_msa() local
98 LD_SB4(src, src_stride, src7, src8, src9, src10); in common_vt_8t_and_aver_dst_8w_msa()
104 XORI_B4_128_SB(src7, src8, src9, src10); in common_vt_8t_and_aver_dst_8w_msa()
105 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r, in common_vt_8t_and_aver_dst_8w_msa()
137 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_and_aver_dst_16w_mult_msa() local
166 LD_SB4(src_tmp, src_stride, src7, src8, src9, src10); in common_vt_8t_and_aver_dst_16w_mult_msa()
170 XORI_B4_128_SB(src7, src8, src9, src10); in common_vt_8t_and_aver_dst_16w_mult_msa()
[all …]
Davg_msa.c86 v8i16 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in vpx_hadamard_16x16_msa() local
91 LD_SH2(src, 8, src0, src8); in vpx_hadamard_16x16_msa()
110 BUTTERFLY_8(src8, src10, src12, src14, src15, src13, src11, src9, tmp8, tmp10, in vpx_hadamard_16x16_msa()
129 BUTTERFLY_8(tmp8, tmp9, tmp12, tmp13, tmp15, tmp14, tmp11, tmp10, src8, src9, in vpx_hadamard_16x16_msa()
131 BUTTERFLY_8(src8, src9, src10, src11, src15, src14, src13, src12, tmp8, tmp15, in vpx_hadamard_16x16_msa()
133 TRANSPOSE8x8_SH_SH(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, src8, in vpx_hadamard_16x16_msa()
135 BUTTERFLY_8(src8, src10, src12, src14, src15, src13, src11, src9, tmp8, tmp10, in vpx_hadamard_16x16_msa()
137 BUTTERFLY_8(tmp8, tmp9, tmp12, tmp13, tmp15, tmp14, tmp11, tmp10, src8, src9, in vpx_hadamard_16x16_msa()
139 BUTTERFLY_8(src8, src9, src10, src11, src15, src14, src13, src12, tmp8, tmp15, in vpx_hadamard_16x16_msa()
144 LD_SH2(src, 8, src0, src8); in vpx_hadamard_16x16_msa()
[all …]
Dvpx_convolve_avg_msa.c106 v16u8 src8, src9, src10, src11, src12, src13, src14, src15; in avg_width32_msa() local
117 LD_UB4(src, src_stride, src8, src10, src12, src14); in avg_width32_msa()
128 AVER_UB4_UB(src8, dst8, src9, dst9, src10, dst10, src11, dst11, dst8, dst9, in avg_width32_msa()
147 v16u8 src8, src9, src10, src11, src12, src13, src14, src15; in avg_width64_msa() local
156 LD_UB4(src, 16, src8, src9, src10, src11); in avg_width64_msa()
174 AVER_UB4_UB(src8, dst8, src9, dst9, src10, dst10, src11, dst11, dst8, dst9, in avg_width64_msa()
Dvpx_convolve8_avg_msa.c20 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_hv_8ht_8vt_and_aver_dst_4w_msa() local
59 LD_SB4(src, src_stride, src7, src8, src9, src10); in common_hv_8ht_8vt_and_aver_dst_4w_msa()
60 XORI_B4_128_SB(src7, src8, src9, src10); in common_hv_8ht_8vt_and_aver_dst_4w_msa()
65 hz_out7 = HORIZ_8TAP_FILT(src7, src8, mask0, mask1, mask2, mask3, filt_hz0, in common_hv_8ht_8vt_and_aver_dst_4w_msa()
98 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_hv_8ht_8vt_and_aver_dst_8w_msa() local
144 LD_SB4(src, src_stride, src7, src8, src9, src10); in common_hv_8ht_8vt_and_aver_dst_8w_msa()
145 XORI_B4_128_SB(src7, src8, src9, src10); in common_hv_8ht_8vt_and_aver_dst_8w_msa()
158 hz_out8 = HORIZ_8TAP_FILT(src8, src8, mask0, mask1, mask2, mask3, filt_hz0, in common_hv_8ht_8vt_and_aver_dst_8w_msa()
268 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, mask; in common_hv_2ht_2vt_and_aver_dst_4x8_msa() local
286 src8 = LD_SB(src); in common_hv_2ht_2vt_and_aver_dst_4x8_msa()
[all …]
/external/libaom/libaom/aom_dsp/x86/
Dhighbd_variance_sse2.c105 void aom_highbd_get##S##x##S##var_sse2(const uint8_t *src8, int src_stride, \
108 uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
115 const uint8_t *src8, int src_stride, const uint8_t *ref8, \
117 uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
126 const uint8_t *src8, int src_stride, const uint8_t *ref8, \
128 uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
143 const uint8_t *src8, int src_stride, const uint8_t *ref8, \
146 uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
155 const uint8_t *src8, int src_stride, const uint8_t *ref8, \
159 uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
[all …]
Dmasked_sad_intrin_ssse3.c231 const uint8_t *src8, int src_stride, const uint8_t *a8, int a_stride,
237 const uint8_t *src8, int src_stride, const uint8_t *ref8, \
241 return highbd_masked_sad_ssse3(src8, src_stride, ref8, ref_stride, \
244 return highbd_masked_sad_ssse3(src8, src_stride, second_pred8, m, ref8, \
250 const uint8_t *src8, int src_stride, const uint8_t *ref8, \
254 return aom_highbd_masked_sad4xh_ssse3(src8, src_stride, ref8, \
258 return aom_highbd_masked_sad4xh_ssse3(src8, src_stride, second_pred8, 4, \
287 const uint8_t *src8, int src_stride, const uint8_t *a8, int a_stride, in highbd_masked_sad_ssse3() argument
290 const uint16_t *src_ptr = CONVERT_TO_SHORTPTR(src8); in highbd_masked_sad_ssse3()
344 unsigned int aom_highbd_masked_sad4xh_ssse3(const uint8_t *src8, int src_stride, in aom_highbd_masked_sad4xh_ssse3() argument
[all …]
/external/llvm/test/Transforms/SLPVectorizer/X86/
Dctlz.ll14 @src8 = common global [32 x i8] zeroinitializer, align 32
322 ; CHECK-NEXT: [[LD0:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8…
323 ; CHECK-NEXT: [[LD1:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8…
324 ; CHECK-NEXT: [[LD2:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8…
325 ; CHECK-NEXT: [[LD3:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8…
326 ; CHECK-NEXT: [[LD4:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8…
327 ; CHECK-NEXT: [[LD5:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8…
328 ; CHECK-NEXT: [[LD6:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8…
329 ; CHECK-NEXT: [[LD7:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8…
330 ; CHECK-NEXT: [[LD8:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8…
[all …]
Dcttz.ll14 @src8 = common global [32 x i8] zeroinitializer, align 32
322 ; CHECK-NEXT: [[LD0:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8…
323 ; CHECK-NEXT: [[LD1:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8…
324 ; CHECK-NEXT: [[LD2:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8…
325 ; CHECK-NEXT: [[LD3:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8…
326 ; CHECK-NEXT: [[LD4:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8…
327 ; CHECK-NEXT: [[LD5:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8…
328 ; CHECK-NEXT: [[LD6:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8…
329 ; CHECK-NEXT: [[LD7:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8…
330 ; CHECK-NEXT: [[LD8:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8…
[all …]
Dbitreverse.ll16 @src8 = common global [32 x i8] zeroinitializer, align 32
410 ; SSE-NEXT: [[LD0:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0…
411 ; SSE-NEXT: [[LD1:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0…
412 ; SSE-NEXT: [[LD2:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0…
413 ; SSE-NEXT: [[LD3:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0…
414 ; SSE-NEXT: [[LD4:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0…
415 ; SSE-NEXT: [[LD5:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0…
416 ; SSE-NEXT: [[LD6:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0…
417 ; SSE-NEXT: [[LD7:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0…
418 ; SSE-NEXT: [[LD8:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0…
[all …]
Dctpop.ll14 @src8 = common global [32 x i8] zeroinitializer, align 32
237 ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([32 x i8]* @src8 to <16 x i8>*)…
242 %ld0 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 0), align 1
243 %ld1 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 1), align 1
244 %ld2 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 2), align 1
245 %ld3 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 3), align 1
246 %ld4 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 4), align 1
247 %ld5 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 5), align 1
248 %ld6 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 6), align 1
249 %ld7 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SLPVectorizer/X86/
Dbitreverse.ll16 @src8 = common global [32 x i8] zeroinitializer, align 32
257 ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([32 x i8]* @src8 to <16 x i8>*)…
262 %ld0 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 0), align 1
263 %ld1 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 1), align 1
264 %ld2 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 2), align 1
265 %ld3 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 3), align 1
266 %ld4 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 4), align 1
267 %ld5 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 5), align 1
268 %ld6 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 6), align 1
269 %ld7 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1
[all …]
Dcttz.ll15 @src8 = common global [32 x i8] zeroinitializer, align 32
299 ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([32 x i8]* @src8 to <16 x i8>*)…
304 %ld0 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 0), align 1
305 %ld1 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 1), align 1
306 %ld2 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 2), align 1
307 %ld3 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 3), align 1
308 %ld4 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 4), align 1
309 %ld5 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 5), align 1
310 %ld6 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 6), align 1
311 %ld7 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1
[all …]
Dctlz.ll15 @src8 = common global [32 x i8] zeroinitializer, align 32
299 ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([32 x i8]* @src8 to <16 x i8>*)…
304 %ld0 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 0), align 1
305 %ld1 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 1), align 1
306 %ld2 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 2), align 1
307 %ld3 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 3), align 1
308 %ld4 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 4), align 1
309 %ld5 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 5), align 1
310 %ld6 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 6), align 1
311 %ld7 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1
[all …]
Dctpop.ll15 @src8 = common global [32 x i8] zeroinitializer, align 32
346 ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([32 x i8]* @src8 to <16 x i8>*)…
351 %ld0 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 0), align 1
352 %ld1 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 1), align 1
353 %ld2 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 2), align 1
354 %ld3 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 3), align 1
355 %ld4 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 4), align 1
356 %ld5 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 5), align 1
357 %ld6 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 6), align 1
358 %ld7 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1
[all …]
Duitofp.ll13 @src8 = common global [64 x i8] zeroinitializer, align 64
465 ; SSE-NEXT: [[LD0:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @src8, i32 …
466 ; SSE-NEXT: [[LD1:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @src8, i32 …
474 ; AVX256-NEXT: [[LD0:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @src8, i…
475 ; AVX256-NEXT: [[LD1:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @src8, i…
483 ; AVX512-NEXT: [[TMP1:%.*]] = load <2 x i8>, <2 x i8>* bitcast ([64 x i8]* @src8 to <2 x i8>*), …
488 %ld0 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @src8, i32 0, i64 0), align 64
489 %ld1 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @src8, i32 0, i64 1), align 1
499 ; SSE-NEXT: [[LD0:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @src8, i32 …
500 ; SSE-NEXT: [[LD1:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @src8, i32 …
[all …]
Dsitofp.ll13 @src8 = common global [64 x i8] zeroinitializer, align 64
450 ; CHECK-NEXT: [[LD0:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @src8, i3…
451 ; CHECK-NEXT: [[LD1:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @src8, i3…
458 %ld0 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @src8, i32 0, i64 0), align 64
459 %ld1 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @src8, i32 0, i64 1), align 1
469 ; SSE-NEXT: [[LD0:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @src8, i32 …
470 ; SSE-NEXT: [[LD1:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @src8, i32 …
471 ; SSE-NEXT: [[LD2:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @src8, i32 …
472 ; SSE-NEXT: [[LD3:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @src8, i32 …
484 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x i8>, <4 x i8>* bitcast ([64 x i8]* @src8 to <4 x i8>*), ali…
[all …]
/external/libpng/mips/
Dfilter_msa_intrinsics.c550 v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, dst0, dst1; in png_read_filter_row_avg4_msa() local
573 SLDI_B2_0_UB(src2, src6, src4, src8, 8); in png_read_filter_row_avg4_msa()
580 src8 += src4; in png_read_filter_row_avg4_msa()
581 src5 = __msa_ave_u_b(src5, src8); in png_read_filter_row_avg4_msa()
584 ILVEV_W2_UB(src6, src7, src8, src9, dst0, dst1); in png_read_filter_row_avg4_msa()
603 v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, dst0, dst1; in png_read_filter_row_avg3_msa() local
630 SLDI_B2_0_UB(src2, src6, src4, src8, 6); in png_read_filter_row_avg3_msa()
637 src8 += src4; in png_read_filter_row_avg3_msa()
638 src5 = __msa_ave_u_b(src5, src8); in png_read_filter_row_avg3_msa()
641 VSHF_B2_UB(src6, src7, src8, src9, mask0, mask0, dst0, dst1); in png_read_filter_row_avg3_msa()
[all …]
/external/libaom/libaom/test/
Dpickrst_test.cc31 const uint8_t *src8, int width, int height, int src_stride,
196 const uint8_t *src8, int width, int height, int src_stride,
268 uint8_t *src8 = CONVERT_TO_BYTEPTR(src_); in RunPixelProjErrorTest() local
274 src8, h_end, v_end, src_stride, dgd8, dgd_stride, flt0_, flt0_stride, in RunPixelProjErrorTest()
282 target_func_(src8, h_end, v_end, src_stride, dgd8, dgd_stride, flt0_, in RunPixelProjErrorTest()
322 uint8_t *src8 = CONVERT_TO_BYTEPTR(src_); in RunPixelProjErrorTest_ExtremeValues() local
325 src8, h_end - h_start, v_end - v_start, src_stride, dgd8, dgd_stride, in RunPixelProjErrorTest_ExtremeValues()
328 err_test = target_func_(src8, h_end - h_start, v_end - v_start, src_stride, in RunPixelProjErrorTest_ExtremeValues()
/external/libaom/libaom/aom_dsp/
Daom_convolve.c148 static void highbd_convolve_horiz(const uint8_t *src8, ptrdiff_t src_stride, in highbd_convolve_horiz() argument
152 uint16_t *src = CONVERT_TO_SHORTPTR(src8); in highbd_convolve_horiz()
169 static void highbd_convolve_vert(const uint8_t *src8, ptrdiff_t src_stride, in highbd_convolve_vert() argument
173 uint16_t *src = CONVERT_TO_SHORTPTR(src8); in highbd_convolve_vert()
219 void aom_highbd_convolve_copy_c(const uint8_t *src8, ptrdiff_t src_stride, in aom_highbd_convolve_copy_c() argument
225 uint16_t *src = CONVERT_TO_SHORTPTR(src8); in aom_highbd_convolve_copy_c()
Dsad_av1.c82 unsigned int highbd_masked_sad(const uint8_t *src8, int src_stride, in highbd_masked_sad() argument
89 const uint16_t *src = CONVERT_TO_SHORTPTR(src8); in highbd_masked_sad()
111 const uint8_t *src8, int src_stride, const uint8_t *ref8, \
115 return highbd_masked_sad(src8, src_stride, ref8, ref_stride, \
118 return highbd_masked_sad(src8, src_stride, second_pred8, m, ref8, \
/external/libvpx/libvpx/vp8/common/mips/msa/
Dsixtap_filter_msa.c299 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; in common_vt_6t_4w_msa() local
319 LD_SB4(src, src_stride, src5, src6, src7, src8); in common_vt_6t_4w_msa()
322 ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r, in common_vt_6t_4w_msa()
336 src4 = src8; in common_vt_6t_4w_msa()
344 v16i8 src0, src1, src2, src3, src4, src7, src8, src9, src10; in common_vt_6t_8w_msa() local
363 LD_SB4(src, src_stride, src7, src8, src9, src10); in common_vt_6t_8w_msa()
364 XORI_B4_128_SB(src7, src8, src9, src10); in common_vt_6t_8w_msa()
367 ILVR_B4_SB(src7, src4, src8, src7, src9, src8, src10, src9, src76_r, in common_vt_6t_8w_msa()
392 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; in common_vt_6t_16w_msa() local
414 LD_SB4(src, src_stride, src5, src6, src7, src8); in common_vt_6t_16w_msa()
[all …]
Dcopymem_msa.c38 v16u8 src8, src9, src10, src11, src12, src13, src14, src15; in copy_16x16_msa() local
42 LD_UB8(src, src_stride, src8, src9, src10, src11, src12, src13, src14, src15); in copy_16x16_msa()
46 ST_UB8(src8, src9, src10, src11, src12, src13, src14, src15, dst, dst_stride); in copy_16x16_msa()

123