/external/libxaac/decoder/armv8/ |
D | ixheaacd_sbr_imdct_using_fft.s | 106 LD2 {V0.S, V1.S}[0], [X5], X1 108 LD2 {V4.S, V5.S}[0], [X5], X1 110 LD2 {V2.S, V3.S}[0], [X5], X1 112 LD2 {V6.S, V7.S}[0], [X5], X1 117 LD2 {V0.S, V1.S}[1], [X6] , X1 119 LD2 {V4.S, V5.S}[1], [X6] , X1 121 LD2 {V2.S, V3.S}[1], [X6] , X1 123 LD2 {V6.S, V7.S}[1], [X6], X1 129 LD2 {V0.S, V1.S}[2], [X7] , X1 131 LD2 {V4.S, V5.S}[2], [X7] , X1 [all …]
|
D | ixheaacd_imdct_using_fft.s | 136 LD2 {v0.S, v1.S}[0], [X5], X1 138 LD2 {v4.S, v5.S}[0], [X5], X1 140 LD2 {v2.S, v3.S}[0], [X5], X1 142 LD2 {v6.S, v7.S}[0], [X5], X1 147 LD2 {v0.S, v1.S}[1], [X6] , X1 149 LD2 {v4.S, v5.S}[1], [X6] , X1 151 LD2 {v2.S, v3.S}[1], [X6] , X1 153 LD2 {v6.S, v7.S}[1], [X6], X1 159 LD2 {v0.S, v1.S}[2], [X7] , X1 161 LD2 {v4.S, v5.S}[2], [X7] , X1 [all …]
|
D | ixheaacd_sbr_qmf_analysis32_neon.s | 147 LD2 {v1.4h, v2.4h}, [x2], #16 164 LD2 {v3.4h, v4.4h}, [x2], #16 171 LD2 {v5.4h, v6.4h}, [x2], #16 178 LD2 {v7.4h, v8.4h}, [x2], #16 186 LD2 {v9.4h, v10.4h}, [x2], #16 196 LD2 {v11.4h, v12.4h}, [x3], #16 205 LD2 {v13.4h, v14.4h}, [x3], #16 209 LD2 {v15.4h, v16.4h}, [x3], #16 219 LD2 {v17.4h, v18.4h}, [x3], #16 226 LD2 {v19.4h, v20.4h}, [x3], #16 [all …]
|
D | ixheaacd_overlap_add2.s | 59 LD2 {V0.4H, V1.4H}, [X10], #16 68 LD2 {V6.4H, V7.4H}, [X7], X12 76 LD2 {V2.4H, V3.4H}, [X3], #16 80 LD2 {V8.4H, V9.4H}, [X10], #16 82 LD2 {V10.4H, V11.4H}, [X3], #16 85 LD2 {V14.4H, V15.4H}, [X7], X12 95 LD2 {V0.4H, V1.4H}, [X10], #16 97 LD2 {V2.4H, V3.4H}, [X3], #16 99 LD2 {V6.4H, V7.4H}, [X7], X12 116 LD2 {V8.4H, V9.4H}, [X10], #16 [all …]
|
D | ixheaacd_pre_twiddle.s | 158 LD2 {v8.h, v9.h}[0], [x3], x6 159 LD2 {v8.h, v9.h}[1], [x3], x6 160 LD2 {v8.h, v9.h}[2], [x3], x6 161 LD2 {v8.h, v9.h}[3], [x3], x6 211 LD2 {v8.h, v9.h}[0], [x3], x6 214 LD2 {v8.h, v9.h}[1], [x3], x6 217 LD2 {v8.h, v9.h}[2], [x3], x6 220 LD2 {v8.h, v9.h}[3], [x3], x6 282 LD2 {v8.h, v9.h}[0], [x3], x6 286 LD2 {v8.h, v9.h}[1], [x3], x6 [all …]
|
D | ixheaacd_post_twiddle.s | 142 LD2 {v8.h, v9.h}[0], [x2], x6 143 LD2 {v8.h, v9.h}[1], [x2], x6 144 LD2 {v8.h, v9.h}[2], [x2], x6 145 LD2 {v8.h, v9.h}[3], [x2], x6 200 LD2 {v8.h, v9.h}[0], [x2], x6 203 LD2 {v8.h, v9.h}[1], [x2], x6 207 LD2 {v8.h, v9.h}[2], [x2], x6 210 LD2 {v8.h, v9.h}[3], [x2], x6 330 LD2 {v8.h, v9.h}[0], [x2], x6 333 LD2 {v8.h, v9.h}[1], [x2], x6 [all …]
|
D | ixheaacd_post_twiddle_overlap.s | 314 LD2 { v0.4s, v1.4s}, [x1] 337 LD2 {v8.4h, v9.4h}, [x2] 340 LD2 { v4.4s, v5.4s}, [x8] 365 LD2 { v10.4s, v11.4s}, [x6] 549 LD2 { v28.4s, v29.4s}, [x4] 627 LD2 { v0.4s, v1.4s}, [x1] 657 LD2 { v10.4s, v11.4s}, [x6] 707 LD2 { v4.4s, v5.4s}, [x8] 732 LD2 {v8.4h, v9.4h}, [x2] 965 LD2 { v28.4s, v29.4s}, [x4] [all …]
|
D | ixheaacd_overlap_add1.s | 81 LD2 {V2.4H, V3.4H}, [X8], X12 140 LD2 {V2.4H, V3.4H}, [X8], X12 184 LD2 {V2.4H, V3.4H}, [X8], X12 257 LD2 {V2.4H, V3.4H}, [X8], X12
|
D | ixheaacd_cos_sin_mod_loop2.s | 71 LD2 {v0.h, v1.h}[0], [x1], #4 171 LD2 {v0.h, v1.h}[0], [x1], #4
|
/external/llvm/test/CodeGen/X86/ |
D | merge-store-partially-alias-loads.ll | 21 ; DBGDAG-DAG: [[LD2:t[0-9]+]]: i16,ch = load<LD2[%tmp81](align=1)> [[ENTRYTOKEN]], [[BASEPTR]], und… 24 ; DBGDAG: [[LOADTOKEN:t[0-9]+]]: ch = TokenFactor [[LD2]]:1, [[LD1]]:1 26 ; DBGDAG-DAG: [[ST2:t[0-9]+]]: ch = store<ST2[%tmp10](align=1)> [[LOADTOKEN]], [[LD2]], t{{[0-9]+}}…
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/ |
D | multiple-uses-load-bitcast-select.ll | 9 ; CHECK-NEXT: [[LD2:%.*]] = load double, double* [[Z1]], align 8 10 ; CHECK-NEXT: [[TMP10:%.*]] = fcmp olt double [[LD1]], [[LD2]] 11 ; CHECK-NEXT: [[TMP121:%.*]] = select i1 [[TMP10]], double [[LD1]], double [[LD2]]
|
D | load-bitcast-select.ll | 89 ; CHECK-NEXT: [[LD2:%.*]] = load float, float* [[LOADADDR2:%.*]], align 4 90 ; CHECK-NEXT: [[COND:%.*]] = fcmp ogt float [[LD1]], [[LD2]] 91 ; CHECK-NEXT: [[LD3:%.*]] = select i1 [[COND]], float [[LD1]], float [[LD2]]
|
/external/libvpx/libvpx/vp9/common/mips/msa/ |
D | vp9_mfqe_msa.c | 31 LD2(src_ptr, src_stride, src0_d, src1_d); in filter_by_weight8x8_msa() 33 LD2(dst_ptr, dst_stride, dst0_d, dst1_d); in filter_by_weight8x8_msa() 37 LD2(src_ptr, src_stride, src0_d, src1_d); in filter_by_weight8x8_msa() 39 LD2((dst_ptr + 2 * dst_stride), dst_stride, dst0_d, dst1_d); in filter_by_weight8x8_msa()
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
D | mfqe_msa.c | 31 LD2(src_ptr, src_stride, src0_d, src1_d); in filter_by_weight8x8_msa() 33 LD2(dst_ptr, dst_stride, dst0_d, dst1_d); in filter_by_weight8x8_msa() 37 LD2(src_ptr, src_stride, src0_d, src1_d); in filter_by_weight8x8_msa() 39 LD2((dst_ptr + 2 * dst_stride), dst_stride, dst0_d, dst1_d); in filter_by_weight8x8_msa()
|
/external/libhevc/common/arm64/ |
D | ihevc_sao_band_offset_chroma.s | 309 LD2 {v5.8b, v6.8b},[x4] //vld1q_u8(pu1_src_cpy) 312 LD2 {v13.8b, v14.8b},[x5] //vld1q_u8(pu1_src_cpy) 315 LD2 {v17.8b, v18.8b},[x6] //vld1q_u8(pu1_src_cpy) 318 LD2 {v21.8b, v22.8b},[x7] //vld1q_u8(pu1_src_cpy) 363 LD2 {v5.8b, v6.8b},[x4] //vld1q_u8(pu1_src_cpy) 367 LD2 {v13.8b, v14.8b},[x5] //vld1q_u8(pu1_src_cpy) 370 LD2 {v17.8b, v18.8b},[x6] //vld1q_u8(pu1_src_cpy) 379 LD2 {v21.8b, v22.8b},[x7] //vld1q_u8(pu1_src_cpy)
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | merge-store-partially-alias-loads.ll | 21 ; DBGDAG-DAG: [[LD2:t[0-9]+]]: i16,ch = load<(load 2 from %ir.tmp81, align 1)> [[ENTRYTOKEN]], [[BA… 25 ; DBGDAG-DAG: [[LOADTOKEN:t[0-9]+]]: ch = TokenFactor [[LD2]]:1, [[LD1]]:1 26 ; DBGDAG-DAG: [[ST2:t[0-9]+]]: ch = store<(store 2 into %ir.tmp10, align 1)> [[LOADTOKEN]], [[LD2]]…
|
/external/llvm/test/CodeGen/Hexagon/vect/ |
D | vect-load-1.ll | 2 …= load 0x16c5890, 0x16f76e0, 0x16f76e0<LD2[undef](align=8), sext from v2i8>", 0x16c5890, 0x16f76e0…
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/ |
D | unal-altivec.ll | 38 ; CHECK-DAG: lvx [[LD2:[0-9]+]], [[B3]], 39 ; CHECK-DAG: vperm [[R1:[0-9]+]], [[LD1]], [[LD2]], [[MASK1]]
|
/external/llvm/test/CodeGen/PowerPC/ |
D | unal-altivec.ll | 41 ; CHECK-DAG: lvx [[LD2:[0-9]+]], [[B3]], [[C15]] 44 ; CHECK-DAG: vperm [[R1:[0-9]+]], [[LD1]], [[LD2]], [[MASK1]]
|
/external/libvpx/libvpx/vp8/encoder/mips/msa/ |
D | temporal_filter_msa.c | 148 LD2(frame1_ptr, stride, f0, f1); in temporal_filter_apply_8size_msa() 150 LD2(frame2_ptr, 8, f2, f3); in temporal_filter_apply_8size_msa() 152 LD2(frame1_ptr, stride, f4, f5); in temporal_filter_apply_8size_msa() 154 LD2(frame2_ptr, 8, f6, f7); in temporal_filter_apply_8size_msa()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Hexagon/vect/ |
D | vect-load-1.ll | 3 ; Used to fail with "Cannot select: v2i32,ch = load 0x16c5890, 0x16f76e0, 0x16f76e0<LD2[undef](alig…
|
/external/libhevc/decoder/arm64/ |
D | ihevcd_fmt_conv_420sp_to_rgba8888.s | 173 LD2 {v30.8b, v31.8b},[x0],#16 ////D0 - Y0,Y2,Y4,Y6,Y8,Y10,Y12,Y14 row 1 175 LD2 {v28.8b, v29.8b},[x7],#16 ////D0 - Y0,Y2,Y4,Y6,Y8,Y10,Y12,Y14 row2 292 LD2 {v30.8b, v31.8b},[x0],#16 ////D0 - Y0,Y2,Y4,Y6,Y8,Y10,Y12,Y14 row 1 294 LD2 {v28.8b, v29.8b},[x7],#16 ////D0 - Y0,Y2,Y4,Y6,Y8,Y10,Y12,Y14 row2
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/GlobalISel/ |
D | call-translator-ios.ll | 61 ; CHECK: [[LD2:%[0-9]+]]:_(s64) = G_LOAD %3(p0) :: (load 8 from %ir.ptr + 8) 64 ; CHECK: [[INS2:%[0-9]+]]:_(s128) = G_INSERT [[INS1]], [[LD2]](s64), 64
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SLPVectorizer/X86/ |
D | uitofp.ll | 60 ; SSE-NEXT: [[LD2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i… 64 ; SSE-NEXT: [[CVT2:%.*]] = uitofp i64 [[LD2]] to double 75 ; AVX256-NEXT: [[LD2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64… 79 ; AVX256-NEXT: [[CVT2:%.*]] = uitofp i64 [[LD2]] to double 112 ; SSE-NEXT: [[LD2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i… 120 ; SSE-NEXT: [[CVT2:%.*]] = uitofp i64 [[LD2]] to double 139 ; AVX256-NEXT: [[LD2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64… 147 ; AVX256-NEXT: [[CVT2:%.*]] = uitofp i64 [[LD2]] to double 234 ; SSE-NEXT: [[LD2:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @src32,… 238 ; SSE-NEXT: [[CVT2:%.*]] = uitofp i32 [[LD2]] to double [all …]
|
D | sitofp.ll | 60 ; SSE-NEXT: [[LD2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i… 64 ; SSE-NEXT: [[CVT2:%.*]] = sitofp i64 [[LD2]] to double 75 ; AVX256-NEXT: [[LD2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64… 79 ; AVX256-NEXT: [[CVT2:%.*]] = sitofp i64 [[LD2]] to double 112 ; SSE-NEXT: [[LD2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i… 120 ; SSE-NEXT: [[CVT2:%.*]] = sitofp i64 [[LD2]] to double 139 ; AVX256-NEXT: [[LD2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64… 147 ; AVX256-NEXT: [[CVT2:%.*]] = sitofp i64 [[LD2]] to double 219 ; SSE-NEXT: [[LD2:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @src32,… 223 ; SSE-NEXT: [[CVT2:%.*]] = sitofp i32 [[LD2]] to double [all …]
|