/external/libhevc/common/arm64/ |
D | ihevc_intra_pred_chroma_mode2.s | 123 ld2 {v0.8b, v1.8b},[x0],x8 129 ld2 {v2.8b, v3.8b},[x10],x8 132 ld2 {v4.8b, v5.8b},[x0],x8 133 ld2 {v6.8b, v7.8b},[x10],x8 136 ld2 {v8.8b, v9.8b},[x0],x8 137 ld2 {v10.8b, v11.8b},[x10],x8 138 ld2 {v12.8b, v13.8b},[x0],x8 141 ld2 {v14.8b, v15.8b},[x10],x8 188 ld2 {v0.8b, v1.8b},[x0],x8 191 ld2 {v2.8b, v3.8b},[x10],x8 [all …]
|
D | ihevc_intra_pred_chroma_dc.s | 131 ld2 {v30.8b, v31.8b}, [x6], #16 //load from src[nt] 137 ld2 {v26.8b, v27.8b}, [x8],#16 //load from src[2nt+1] 159 ld2 {v30.8b, v31.8b}, [x6],#16 //load from src[nt] 163 ld2 {v26.8b, v27.8b}, [x8],#16 //load from src[2nt+1] 255 ld2 {v30.8b, v31.8b},[x6] //load from src[nt] 258 ld2 {v26.8b, v27.8b},[x8] //load from src[2nt+1]
|
D | ihevc_intra_pred_chroma_ver.s | 117 ld2 {v20.8b, v21.8b}, [x6],#16 //16 loads (col 0:15) 121 ld2 {v22.8b, v23.8b}, [x6] //16 loads (col 16:31) 186 ld2 {v20.8b, v21.8b}, [x6],#16 //16 loads (col 0:15) 190 ld2 {v22.8b, v23.8b}, [x6] //16 loads (col 16:31)
|
/external/llvm/test/Transforms/SROA/ |
D | ppcf128-no-fold.ll | 5 %struct.ld2 = type { [2 x ppc_fp128] } 11 %z = alloca %struct.ld2, align 16 13 %dat = getelementptr inbounds %struct.ld2, %struct.ld2* %z, i32 0, i32 0 16 %dat1 = getelementptr inbounds %struct.ld2, %struct.ld2* %z, i32 0, i32 0 20 %coerce.dive = getelementptr %struct.ld2, %struct.ld2* %z, i32 0, i32 0
|
/external/libhevc/decoder/arm64/ |
D | ihevcd_itrans_recon_dc_chroma.s | 117 ld2 {v2.8b, v3.8b},[x7],x2 118 ld2 {v4.8b, v5.8b},[x7],x2 119 ld2 {v6.8b, v7.8b},[x7],x2 120 ld2 {v8.8b, v9.8b},[x7],x2 122 ld2 {v10.8b, v11.8b},[x7],x2 123 ld2 {v12.8b, v13.8b},[x7],x2 124 ld2 {v14.8b, v15.8b},[x7],x2 125 ld2 {v16.8b, v17.8b},[x7] 184 ld2 {v2.8b, v3.8b},[x0],x2 185 ld2 {v4.8b, v5.8b},[x0],x2 [all …]
|
D | ihevcd_fmt_conv_420sp_to_420p.s | 173 ld2 {v0.8b, v1.8b},[x1],#16 189 ld2 {v0.8b, v1.8b}, [x1],#16
|
/external/llvm/test/MC/AArch64/ |
D | neon-simd-ldst-multi-elem.s | 369 ld2 { v0.16b, v1.16b }, [x0] 370 ld2 { v15.8h, v16.8h }, [x15] 371 ld2 { v31.4s, v0.4s }, [sp] 372 ld2 { v0.2d, v1.2d }, [x0] 373 ld2 { v0.8b, v1.8b }, [x0] 374 ld2 { v15.4h, v16.4h }, [x15] 375 ld2 { v31.2s, v0.2s }, [sp] 384 ld2 { v0.16b-v1.16b }, [x0] 385 ld2 { v15.8h-v16.8h }, [x15] 386 ld2 { v31.4s-v0.4s }, [sp] [all …]
|
D | arm64-simd-ldst.s | 187 ld2.8b {v4, v5}, [x19] 188 ld2.16b {v4, v5}, [x19] 189 ld2.4h {v4, v5}, [x19] 190 ld2.8h {v4, v5}, [x19] 191 ld2.2s {v4, v5}, [x19] 192 ld2.4s {v4, v5}, [x19] 193 ld2.2d {v4, v5}, [x19] 205 ; CHECK: ld2.8b { v4, v5 }, [x19] ; encoding: [0x64,0x82,0x40,0x0c] 206 ; CHECK: ld2.16b { v4, v5 }, [x19] ; encoding: [0x64,0x82,0x40,0x4c] 207 ; CHECK: ld2.4h { v4, v5 }, [x19] ; encoding: [0x64,0x86,0x40,0x0c] [all …]
|
D | neon-simd-ldst-one-elem.s | 96 ld2 { v0.b, v1.b }[9], [x0] 97 ld2 { v15.h, v16.h }[7], [x15] 98 ld2 { v31.s, v0.s }[3], [sp] 99 ld2 { v0.d, v1.d }[1], [x0] 257 ld2 { v0.b, v1.b }[9], [x0], x3 258 ld2 { v15.h, v16.h }[7], [x15], #4 259 ld2 { v31.s, v0.s }[3], [sp], #8 260 ld2 { v0.d, v1.d }[1], [x0], x0
|
D | neon-simd-post-ldst-multi-elem.s | 124 ld2 { v0.16b, v1.16b }, [x0], x1 125 ld2 { v15.8h, v16.8h }, [x15], x2 126 ld2 { v31.4s, v0.4s }, [sp], #32 127 ld2 { v0.2d, v1.2d }, [x0], #32 128 ld2 { v0.8b, v1.8b }, [x0], x2 129 ld2 { v15.4h, v16.4h }, [x15], x3 130 ld2 { v31.2s, v0.2s }, [sp], #16
|
/external/llvm/test/CodeGen/X86/ |
D | atom-cmpb.ll | 17 %ld2 = load i8, i8* %incdec.ptr, align 1 19 %x5 = xor i8 %ld2, -1 20 %cmp34 = icmp ult i8 %ld2, %ld1
|
D | codegen-prepare-extload.ll | 261 ; a. This creates one sext of %ld2 and one of %zextld 262 ; b. The sext of %ld2 can be combine with %ld2, so we remove one sext but 264 ; => We have one zext of %zextld left and we created one sext of %ld2. 313 %ld2 = load i32, i32* %addr2 314 %add = add nsw i32 %ld2, %zextld
|
/external/llvm/test/Transforms/EarlyCSE/AArch64/ |
D | intrinsics.ll | 6 ; Check that @llvm.aarch64.neon.ld2 is optimized away by Early CSE. 8 ; CHECK-NOT: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8 27 %vld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8(i8* %5) 63 %vld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8(i8* %5) 76 ; Check that the first @llvm.aarch64.neon.ld2 is optimized away by Early CSE. 78 ; CHECK: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8 79 ; CHECK-NOT: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8 92 %vld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8(i8* %0) 96 %vld22 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8(i8* %1) 110 ; Check that the store prevents @llvm.aarch64.neon.ld2 from being optimized [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-indexed-vector-ldst.ll | 617 ;CHECK: ld2.16b { v0, v1 }, [x0], #32 618 %ld2 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0i8(i8* %A) 621 ret { <16 x i8>, <16 x i8> } %ld2 626 ;CHECK: ld2.16b { v0, v1 }, [x0], x{{[0-9]+}} 627 %ld2 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0i8(i8* %A) 630 ret { <16 x i8>, <16 x i8> } %ld2 633 declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0i8(i8*) 638 ;CHECK: ld2.8b { v0, v1 }, [x0], #16 639 %ld2 = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0i8(i8* %A) 642 ret { <8 x i8>, <8 x i8> } %ld2 [all …]
|
D | arm64-neon-vector-list-spill.ll | 9 ; CHECK: ld2 { v{{[0-9]+}}.2s, v{{[0-9]+}}.2s }, [{{x[0-9]+|sp}}] 13 %vld = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0i32(i32* %arg1) 69 ; CHECK: ld2 { v{{[0-9]+}}.4s, v{{[0-9]+}}.4s }, [{{x[0-9]+|sp}}] 73 %vld = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i32(i32* %arg1) 127 declare { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0i32(i32*) 130 declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i32(i32*)
|
D | arm64-ld1.ll | 11 ; CHECK: ld2.8b { v0, v1 }, [x0] 13 %tmp2 = call %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld2.v8i8.p0i8(i8* %A) 35 declare %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld2.v8i8.p0i8(i8*) nounwind readonly 46 ; CHECK: ld2.16b { v0, v1 }, [x0] 48 %tmp2 = call %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2.v16i8.p0i8(i8* %A) 70 declare %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2.v16i8.p0i8(i8*) nounwind readonly 81 ; CHECK: ld2.4h { v0, v1 }, [x0] 83 %tmp2 = call %struct.__neon_int16x4x2_t @llvm.aarch64.neon.ld2.v4i16.p0i16(i16* %A) 105 declare %struct.__neon_int16x4x2_t @llvm.aarch64.neon.ld2.v4i16.p0i16(i16*) nounwind readonly 116 ; CHECK: ld2.8h { v0, v1 }, [x0] [all …]
|
D | arm64-copy-tuple.ll | 16 %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) 33 %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) 50 %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) 67 %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8) 138 declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>*)
|
D | arm64-misched-basic-A53.ll | 118 %ld2 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0i8(i8* %A) 121 ret { <16 x i8>, <16 x i8> } %ld2 124 declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0i8(i8*)
|
D | fp16-vector-load-store.ll | 102 declare { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2.v4f16.p0v4f16(<4 x half>*) 108 declare { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2.v8f16.p0v8f16(<8 x half>*) 118 ; CHECK: ld2 { v0.4h, v1.4h }, [x0] 120 %0 = tail call { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2.v4f16.p0v4f16(<4 x half>* %a) 172 ; CHECK: ld2 { v0.8h, v1.8h }, [x0] 174 %0 = tail call { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2.v8f16.p0v8f16(<8 x half>* %a) 303 ; CHECK: ld2 { v0.h, v1.h }[2], [x0] 357 ; CHECK: ld2 { v0.h, v1.h }[2], [x0]
|
D | arm64-neon-copyPhysReg-tuple.ll | 8 ; CHECK: ld2 { {{v[0-9]+}}.s, {{v[0-9]+}}.s }[{{[0-9]+}}], [x{{[0-9]+|sp}}]
|
D | arm64-codegen-prepare-extload.ll | 255 ; a. This creates one sext of %ld2 and one of %zextld 256 ; b. The sext of %ld2 can be combine with %ld2, so we remove one sext but 258 ; => We have one zext of %zextld left and we created one sext of %ld2. 307 %ld2 = load i32, i32* %addr2 308 %add = add nsw i32 %ld2, %zextld
|
/external/llvm/test/CodeGen/NVPTX/ |
D | generic-to-nvvm.ll | 18 %ld2 = load i32, i32* @myconst 20 store i32 %ld2, i32* %b
|
/external/libavc/common/armv8/ |
D | ih264_deblk_chroma_av8.s | 93 ld2 {v6.8b, v7.8b}, [x0], x1 //D6 = p1u , D7 = p1v 95 ld2 {v4.8b, v5.8b}, [x0], x1 //D4 = p0u , D5 = p0v 99 ld2 {v0.8b, v1.8b}, [x0], x1 //D0 = q0u , D1 = q0v 103 ld2 {v2.8b, v3.8b}, [x0] //D2 = q1u , D3 = q1v 343 ld2 {v6.8b, v7.8b}, [x0], x1 //Q3=p1 348 ld2 {v4.8b, v5.8b}, [x0], x1 //Q2=p0 353 ld2 {v0.8b, v1.8b}, [x0], x1 //Q0=q0 358 ld2 {v2.8b, v3.8b}, [x0] //Q1=q1
|
D | ih264_ihadamard_scaling_av8.s | 220 ld2 {v0.4h, v1.4h}, [x0] //load 8 dc coeffs
|
/external/llvm/test/MC/Disassembler/AArch64/ |
D | arm64-advsimd.txt | 946 # CHECK: ld2.16b { v5, v6 }, [x2] 947 # CHECK: ld2.2s { v10, v11 }, [x0] 980 # CHECK: ld2.b { v1, v2 }[2], [x3] 981 # CHECK: ld2.d { v2, v3 }[1], [x4] 982 # CHECK: ld2.h { v3, v4 }[2], [x6] 983 # CHECK: ld2.s { v4, v5 }[3], [x7] 990 # CHECK: ld2.b { v1, v2 }[2], [x3], #2 991 # CHECK: ld2.d { v2, v3 }[1], [x4], #16 992 # CHECK: ld2.h { v3, v4 }[3], [x5], #4 993 # CHECK: ld2.s { v4, v5 }[2], [x6], #8 [all …]
|