/external/llvm/test/CodeGen/X86/ |
D | stack-protector-weight.ll | 19 ; MSVC-SELDAG: mem:Volatile LD4[@__security_cookie] 21 ; MSVC-SELDAG: LD4[FixedStack0] 25 ; MSVC-IR: mem:Volatile LD4[@__security_cookie] 27 ; MSVC-IR: LD4[%StackGuardSlot]
|
D | 2010-05-12-FastAllocKills.ll | 9 ; %reg1025<def> = MUL_Fp80m32 %reg1024, %RIP, 1, %reg0, <cp#0>, %reg0; mem:LD4[ConstantPool] 20 ; %FP2<def> = MUL_Fp80m32 %FP1, %RIP, 1, %reg0, <cp#0>, %reg0; mem:LD4[ConstantPool]
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
D | copymem_msa.c | 18 LD4(src, src_stride, src0, src1, src2, src3); in copy_8x4_msa() 26 LD4(src, src_stride, src0, src1, src2, src3); in copy_8x8_msa() 31 LD4(src, src_stride, src0, src1, src2, src3); in copy_8x8_msa()
|
/external/llvm/test/CodeGen/ARM/ |
D | ldrd-memoper.ll | 8 ; CHECK: Formed {{.*}} t2LDRD{{.*}} mem:LD4[%0] LD4[%0+4]
|
D | subreg-remat.ll | 8 ; %vreg6:ssub_0<def> = VLDRS <cp#0>, 0, pred:14, pred:%noreg; mem:LD4[ConstantPool] DPR_VFP2:%vre… 34 ; %vreg2:ssub_0<def> = VLDRS <cp#0>, 0, pred:14, pred:%noreg, %vreg2<imp-def>; mem:LD4[ConstantPo…
|
/external/llvm/test/CodeGen/PowerPC/ |
D | unal-altivec.ll | 43 ; CHECK-DAG: lvx [[LD4:[0-9]+]], [[B4]], [[C15]] 45 ; CHECK-DAG: vperm [[R2:[0-9]+]], [[LD3]], [[LD4]], [[MASK2]]
|
/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | subreg-remat.ll | 8 ; %6:ssub_0 = VLDRS %const.0, 0, 14, %noreg; mem:LD4[ConstantPool] DPR_VFP2:%6 34 ; %2:ssub_0 = VLDRS %const.0, 0, 14, %noreg, implicit-def %2; mem:LD4[ConstantPool]
|
/external/llvm-project/llvm/test/CodeGen/X86/ |
D | 2010-05-12-FastAllocKills.ll | 10 ; %reg1025 = MUL_Fp80m32 %reg1024, %rip, 1, %reg0, %const.0, %reg0; mem:LD4[ConstantPool] 21 ; %fp2 = MUL_Fp80m32 %fp1, %rip, 1, %reg0, %const.0, %reg0; mem:LD4[ConstantPool]
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | cluster_stores.ll | 24 ; GCN-NEXT: flat_load_dword [[LD4:v[0-9]+]], v[{{[0-9:]+}}] offset:24 38 ; GCN-NEXT: flat_store_dword v[{{[0-9:]+}}], [[LD4]] offset:24 69 ; GCN-NEXT: flat_load_dword [[LD4:v[0-9]+]], v[{{[0-9:]+}}] offset:24 84 ; GCN-NEXT: flat_store_dword v[{{[0-9:]+}}], [[LD4]] offset:24
|
/external/libxaac/decoder/armv8/ |
D | ixheaacd_pre_twiddle.s | 166 LD4 {v0.4h, v1.4h, v2.4h, v3.4h}, [x0], #32 168 LD4 {v4.4h, v5.4h, v6.4h, v7.4h}, [x1], x8 228 LD4 {v0.4h, v1.4h, v2.4h, v3.4h}, [x0], #32 233 LD4 {v4.4h, v5.4h, v6.4h, v7.4h}, [x1], x8 307 LD4 {v0.4h, v1.4h, v2.4h, v3.4h}, [x0], #32 315 LD4 {v4.4h, v5.4h, v6.4h, v7.4h}, [x1], x8
|
D | ixheaacd_post_twiddle.s | 139 LD4 {v0.4h, v1.4h, v2.4h, v3.4h}, [x5], x8 141 LD4 {v4.4h, v5.4h, v6.4h, v7.4h}, [x1], #32 252 LD4 {v0.4h, v1.4h, v2.4h, v3.4h}, [x5], x8 263 LD4 {v4.4h, v5.4h, v6.4h, v7.4h}, [x1], #32 380 LD4 {v0.4h, v1.4h, v2.4h, v3.4h}, [x5], x8 392 LD4 {v4.4h, v5.4h, v6.4h, v7.4h}, [x1], #32 540 LD4 {v0.4h, v1.4h, v2.4h, v3.4h}, [x5], x8
|
D | ixheaacd_sbr_imdct_using_fft.s | 629 LD4 {V16.4H, V17.4H, V18.4H, V19.4H}, [X14], X12 633 LD4 {V26.4H, V27.4H, V28.4H, V29.4H}, [X14], X12 638 LD4 {V0.4H, V1.4H, V2.4H, V3.4H}, [X14], X12
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-misched-memdep-bug.ll | 8 ; CHECK: SU(2): %vreg2<def> = LDRWui %vreg0, 1; mem:LD4[%ptr1_plus1] GPR32:%vreg2 GPR64common:%vr…
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | vpx_convolve8_msa.c | 647 LD4(src_x, src_pitch, srcd0, srcd1, srcd2, srcd3); in filter_horiz_w4_msa() 685 LD4(src_x, src_pitch, srcd0, srcd1, srcd2, srcd3); in filter_horiz_w8_msa() 688 LD4(src_x + 4 * src_pitch, src_pitch, srcd0, srcd1, srcd2, srcd3); in filter_horiz_w8_msa() 737 LD4(src_x, src_pitch, srcd0, srcd1, srcd2, srcd3); in filter_horiz_w16_msa() 740 LD4(src_x + 4 * src_pitch, src_pitch, srcd0, srcd1, srcd2, srcd3); in filter_horiz_w16_msa() 743 LD4(src_x + 8 * src_pitch, src_pitch, srcd0, srcd1, srcd2, srcd3); in filter_horiz_w16_msa() 746 LD4(src_x + 12 * src_pitch, src_pitch, srcd0, srcd1, srcd2, srcd3); in filter_horiz_w16_msa() 1011 LD4(src_y, src_pitch, srcd0, srcd1, srcd2, srcd3); in filter_vert_w8_msa() 1014 LD4(src_y + 4 * src_pitch, src_pitch, srcd0, srcd1, srcd2, srcd3); in filter_vert_w8_msa()
|
D | vpx_convolve8_avg_horiz_msa.c | 132 LD4(dst, dst_stride, tp0, tp1, tp2, tp3); in common_hz_8t_and_aver_dst_8w_msa() 403 LD4(dst, dst_stride, tp0, tp1, tp2, tp3); in common_hz_2t_and_aver_dst_8x4_msa() 430 LD4(dst, dst_stride, tp0, tp1, tp2, tp3); in common_hz_2t_and_aver_dst_8x8mult_msa() 443 LD4(dst, dst_stride, tp0, tp1, tp2, tp3); in common_hz_2t_and_aver_dst_8x8mult_msa() 458 LD4(dst, dst_stride, tp0, tp1, tp2, tp3); in common_hz_2t_and_aver_dst_8x8mult_msa() 470 LD4(dst, dst_stride, tp0, tp1, tp2, tp3); in common_hz_2t_and_aver_dst_8x8mult_msa()
|
D | vpx_convolve8_avg_vert_msa.c | 101 LD4(dst, dst_stride, tp0, tp1, tp2, tp3); in common_vt_8t_and_aver_dst_8w_msa() 344 LD4(dst, dst_stride, tp0, tp1, tp2, tp3); in common_vt_2t_and_aver_dst_8x4_msa() 376 LD4(dst, dst_stride, tp0, tp1, tp2, tp3); in common_vt_2t_and_aver_dst_8x8mult_msa() 379 LD4(dst + 4 * dst_stride, dst_stride, tp0, tp1, tp2, tp3); in common_vt_2t_and_aver_dst_8x8mult_msa()
|
D | sum_squares_msa.c | 26 LD4(src, src_stride, src0, src1, src2, src3); in vpx_sum_squares_2d_i16_msa()
|
D | vpx_convolve8_avg_msa.c | 148 LD4(dst, dst_stride, tp0, tp1, tp2, tp3); in common_hv_8ht_8vt_and_aver_dst_8w_msa() 344 LD4(dst, dst_stride, tp0, tp1, tp2, tp3); in common_hv_2ht_2vt_and_aver_dst_8x4_msa() 415 LD4(dst, dst_stride, tp0, tp1, tp2, tp3); in common_hv_2ht_2vt_and_aver_dst_8x8mult_msa()
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
D | ctlz.ll | 113 ; CHECK-NEXT: [[LD4:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32,… 121 ; CHECK-NEXT: [[CTLZ4:%.*]] = call i32 @llvm.ctlz.i32(i32 [[LD4]], i1 false) 168 ; CHECK-NEXT: [[LD4:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src1… 176 ; CHECK-NEXT: [[CTLZ4:%.*]] = call i16 @llvm.ctlz.i16(i16 [[LD4]], i1 false) 223 ; CHECK-NEXT: [[LD4:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src1… 239 ; CHECK-NEXT: [[CTLZ4:%.*]] = call i16 @llvm.ctlz.i16(i16 [[LD4]], i1 false) 326 ; CHECK-NEXT: [[LD4:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8… 342 ; CHECK-NEXT: [[CTLZ4:%.*]] = call i8 @llvm.ctlz.i8(i8 [[LD4]], i1 false) 429 ; CHECK-NEXT: [[LD4:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8… 461 ; CHECK-NEXT: [[CTLZ4:%.*]] = call i8 @llvm.ctlz.i8(i8 [[LD4]], i1 false) [all …]
|
D | cttz.ll | 113 ; CHECK-NEXT: [[LD4:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32,… 121 ; CHECK-NEXT: [[CTTZ4:%.*]] = call i32 @llvm.cttz.i32(i32 [[LD4]], i1 false) 168 ; CHECK-NEXT: [[LD4:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src1… 176 ; CHECK-NEXT: [[CTTZ4:%.*]] = call i16 @llvm.cttz.i16(i16 [[LD4]], i1 false) 223 ; CHECK-NEXT: [[LD4:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src1… 239 ; CHECK-NEXT: [[CTTZ4:%.*]] = call i16 @llvm.cttz.i16(i16 [[LD4]], i1 false) 326 ; CHECK-NEXT: [[LD4:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8… 342 ; CHECK-NEXT: [[CTTZ4:%.*]] = call i8 @llvm.cttz.i8(i8 [[LD4]], i1 false) 429 ; CHECK-NEXT: [[LD4:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8… 461 ; CHECK-NEXT: [[CTTZ4:%.*]] = call i8 @llvm.cttz.i8(i8 [[LD4]], i1 false) [all …]
|
D | fround.ll | 110 ; SSE2-NEXT: [[LD4:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x doubl… 118 ; SSE2-NEXT: [[CEIL4:%.*]] = call double @llvm.ceil.f64(double [[LD4]]) 281 ; SSE2-NEXT: [[LD4:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x doubl… 289 ; SSE2-NEXT: [[FLOOR4:%.*]] = call double @llvm.floor.f64(double [[LD4]]) 452 ; SSE2-NEXT: [[LD4:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x doubl… 460 ; SSE2-NEXT: [[NEARBYINT4:%.*]] = call double @llvm.nearbyint.f64(double [[LD4]]) 623 ; SSE2-NEXT: [[LD4:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x doubl… 631 ; SSE2-NEXT: [[RINT4:%.*]] = call double @llvm.rint.f64(double [[LD4]]) 794 ; SSE2-NEXT: [[LD4:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x doubl… 802 ; SSE2-NEXT: [[TRUNC4:%.*]] = call double @llvm.trunc.f64(double [[LD4]]) [all …]
|
D | bitreverse.ll | 165 ; SSE-NEXT: [[LD4:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i… 173 ; SSE-NEXT: [[BITREVERSE4:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD4]]) 232 ; SSE-NEXT: [[LD4:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16,… 240 ; SSE-NEXT: [[BITREVERSE4:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD4]]) 299 ; SSE-NEXT: [[LD4:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16,… 315 ; SSE-NEXT: [[BITREVERSE4:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD4]]) 414 ; SSE-NEXT: [[LD4:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0… 430 ; SSE-NEXT: [[BITREVERSE4:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD4]]) 529 ; SSE-NEXT: [[LD4:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0… 561 ; SSE-NEXT: [[BITREVERSE4:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD4]])
|
/external/llvm-project/llvm/test/Transforms/SLPVectorizer/X86/ |
D | sitofp.ll | 127 ; SSE-NEXT: [[LD4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i… 135 ; SSE-NEXT: [[CVT4:%.*]] = sitofp i64 [[LD4]] to double 154 ; AVX256NODQ-NEXT: [[LD4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @s… 162 ; AVX256NODQ-NEXT: [[CVT4:%.*]] = sitofp i64 [[LD4]] to double 280 ; SSE-NEXT: [[LD4:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @src32,… 288 ; SSE-NEXT: [[CVT4:%.*]] = sitofp i32 [[LD4]] to double 406 ; SSE-NEXT: [[LD4:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @src16,… 414 ; SSE-NEXT: [[CVT4:%.*]] = sitofp i16 [[LD4]] to double 532 ; SSE-NEXT: [[LD4:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @src8, i32 … 540 ; SSE-NEXT: [[CVT4:%.*]] = sitofp i8 [[LD4]] to double [all …]
|
D | fround.ll | 111 ; SSE2-NEXT: [[LD4:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x doubl… 119 ; SSE2-NEXT: [[CEIL4:%.*]] = call double @llvm.ceil.f64(double [[LD4]]) 282 ; SSE2-NEXT: [[LD4:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x doubl… 290 ; SSE2-NEXT: [[FLOOR4:%.*]] = call double @llvm.floor.f64(double [[LD4]]) 453 ; SSE2-NEXT: [[LD4:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x doubl… 461 ; SSE2-NEXT: [[NEARBYINT4:%.*]] = call double @llvm.nearbyint.f64(double [[LD4]]) 624 ; SSE2-NEXT: [[LD4:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x doubl… 632 ; SSE2-NEXT: [[RINT4:%.*]] = call double @llvm.rint.f64(double [[LD4]]) 795 ; SSE2-NEXT: [[LD4:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x doubl… 803 ; SSE2-NEXT: [[TRUNC4:%.*]] = call double @llvm.trunc.f64(double [[LD4]]) [all …]
|
/external/webp/src/dsp/ |
D | dec_mips_dsp_r2.c | 731 static void LD4(uint8_t* dst) { // Down-Left in LD4() function 980 VP8PredLuma4[6] = LD4; in VP8DspInitMIPSdspR2()
|