Home
last modified time | relevance | path

Searched refs:ld2 (Results 1 – 25 of 125) sorted by relevance

12345

/external/libhevc/common/arm64/
Dihevc_intra_pred_chroma_mode2.s123 ld2 {v0.8b, v1.8b},[x0],x8
129 ld2 {v2.8b, v3.8b},[x10],x8
132 ld2 {v4.8b, v5.8b},[x0],x8
133 ld2 {v6.8b, v7.8b},[x10],x8
136 ld2 {v8.8b, v9.8b},[x0],x8
137 ld2 {v10.8b, v11.8b},[x10],x8
138 ld2 {v12.8b, v13.8b},[x0],x8
141 ld2 {v14.8b, v15.8b},[x10],x8
188 ld2 {v0.8b, v1.8b},[x0],x8
191 ld2 {v2.8b, v3.8b},[x10],x8
[all …]
Dihevc_intra_pred_chroma_dc.s131 ld2 {v30.8b, v31.8b}, [x6], #16 //load from src[nt]
137 ld2 {v26.8b, v27.8b}, [x8],#16 //load from src[2nt+1]
159 ld2 {v30.8b, v31.8b}, [x6],#16 //load from src[nt]
163 ld2 {v26.8b, v27.8b}, [x8],#16 //load from src[2nt+1]
255 ld2 {v30.8b, v31.8b},[x6] //load from src[nt]
258 ld2 {v26.8b, v27.8b},[x8] //load from src[2nt+1]
/external/llvm/test/Transforms/SROA/
Dppcf128-no-fold.ll5 %struct.ld2 = type { [2 x ppc_fp128] }
11 %z = alloca %struct.ld2, align 16
13 %dat = getelementptr inbounds %struct.ld2, %struct.ld2* %z, i32 0, i32 0
16 %dat1 = getelementptr inbounds %struct.ld2, %struct.ld2* %z, i32 0, i32 0
20 %coerce.dive = getelementptr %struct.ld2, %struct.ld2* %z, i32 0, i32 0
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SROA/
Dppcf128-no-fold.ll5 %struct.ld2 = type { [2 x ppc_fp128] }
11 %z = alloca %struct.ld2, align 16
13 %dat = getelementptr inbounds %struct.ld2, %struct.ld2* %z, i32 0, i32 0
16 %dat1 = getelementptr inbounds %struct.ld2, %struct.ld2* %z, i32 0, i32 0
20 %coerce.dive = getelementptr %struct.ld2, %struct.ld2* %z, i32 0, i32 0
/external/libhevc/decoder/arm64/
Dihevcd_itrans_recon_dc_chroma.s117 ld2 {v2.8b, v3.8b},[x7],x2
118 ld2 {v4.8b, v5.8b},[x7],x2
119 ld2 {v6.8b, v7.8b},[x7],x2
120 ld2 {v8.8b, v9.8b},[x7],x2
122 ld2 {v10.8b, v11.8b},[x7],x2
123 ld2 {v12.8b, v13.8b},[x7],x2
124 ld2 {v14.8b, v15.8b},[x7],x2
125 ld2 {v16.8b, v17.8b},[x7]
184 ld2 {v2.8b, v3.8b},[x0],x2
185 ld2 {v4.8b, v5.8b},[x0],x2
[all …]
/external/llvm/test/CodeGen/PowerPC/
Dppcf128sf.ll4 @ld2 = common global ppc_fp128 0xM00000000000000000000000000000000, align 16
15 %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
19 %3 = load ppc_fp128, ppc_fp128* @ld2, align 16
23 %5 = load ppc_fp128, ppc_fp128* @ld2, align 16
27 %7 = load ppc_fp128, ppc_fp128* @ld2, align 16
91 %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
104 %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
117 %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
130 %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
143 %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/
Dppcf128sf.ll4 @ld2 = common global ppc_fp128 0xM00000000000000000000000000000000, align 16
15 %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
19 %3 = load ppc_fp128, ppc_fp128* @ld2, align 16
23 %5 = load ppc_fp128, ppc_fp128* @ld2, align 16
27 %7 = load ppc_fp128, ppc_fp128* @ld2, align 16
91 %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
104 %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
117 %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
130 %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
143 %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/AArch64/
Dneon-simd-ldst-multi-elem.s369 ld2 { v0.16b, v1.16b }, [x0]
370 ld2 { v15.8h, v16.8h }, [x15]
371 ld2 { v31.4s, v0.4s }, [sp]
372 ld2 { v0.2d, v1.2d }, [x0]
373 ld2 { v0.8b, v1.8b }, [x0]
374 ld2 { v15.4h, v16.4h }, [x15]
375 ld2 { v31.2s, v0.2s }, [sp]
384 ld2 { v0.16b-v1.16b }, [x0]
385 ld2 { v15.8h-v16.8h }, [x15]
386 ld2 { v31.4s-v0.4s }, [sp]
[all …]
Darm64-simd-ldst.s187 ld2.8b {v4, v5}, [x19]
188 ld2.16b {v4, v5}, [x19]
189 ld2.4h {v4, v5}, [x19]
190 ld2.8h {v4, v5}, [x19]
191 ld2.2s {v4, v5}, [x19]
192 ld2.4s {v4, v5}, [x19]
193 ld2.2d {v4, v5}, [x19]
205 ; CHECK: ld2.8b { v4, v5 }, [x19] ; encoding: [0x64,0x82,0x40,0x0c]
206 ; CHECK: ld2.16b { v4, v5 }, [x19] ; encoding: [0x64,0x82,0x40,0x4c]
207 ; CHECK: ld2.4h { v4, v5 }, [x19] ; encoding: [0x64,0x86,0x40,0x0c]
[all …]
Dneon-simd-ldst-one-elem.s96 ld2 { v0.b, v1.b }[9], [x0]
97 ld2 { v15.h, v16.h }[7], [x15]
98 ld2 { v31.s, v0.s }[3], [sp]
99 ld2 { v0.d, v1.d }[1], [x0]
257 ld2 { v0.b, v1.b }[9], [x0], x3
258 ld2 { v15.h, v16.h }[7], [x15], #4
259 ld2 { v31.s, v0.s }[3], [sp], #8
260 ld2 { v0.d, v1.d }[1], [x0], x0
Dneon-simd-post-ldst-multi-elem.s124 ld2 { v0.16b, v1.16b }, [x0], x1
125 ld2 { v15.8h, v16.8h }, [x15], x2
126 ld2 { v31.4s, v0.4s }, [sp], #32
127 ld2 { v0.2d, v1.2d }, [x0], #32
128 ld2 { v0.8b, v1.8b }, [x0], x2
129 ld2 { v15.4h, v16.4h }, [x15], x3
130 ld2 { v31.2s, v0.2s }, [sp], #16
/external/llvm/test/MC/AArch64/
Dneon-simd-ldst-multi-elem.s369 ld2 { v0.16b, v1.16b }, [x0]
370 ld2 { v15.8h, v16.8h }, [x15]
371 ld2 { v31.4s, v0.4s }, [sp]
372 ld2 { v0.2d, v1.2d }, [x0]
373 ld2 { v0.8b, v1.8b }, [x0]
374 ld2 { v15.4h, v16.4h }, [x15]
375 ld2 { v31.2s, v0.2s }, [sp]
384 ld2 { v0.16b-v1.16b }, [x0]
385 ld2 { v15.8h-v16.8h }, [x15]
386 ld2 { v31.4s-v0.4s }, [sp]
[all …]
Darm64-simd-ldst.s187 ld2.8b {v4, v5}, [x19]
188 ld2.16b {v4, v5}, [x19]
189 ld2.4h {v4, v5}, [x19]
190 ld2.8h {v4, v5}, [x19]
191 ld2.2s {v4, v5}, [x19]
192 ld2.4s {v4, v5}, [x19]
193 ld2.2d {v4, v5}, [x19]
205 ; CHECK: ld2.8b { v4, v5 }, [x19] ; encoding: [0x64,0x82,0x40,0x0c]
206 ; CHECK: ld2.16b { v4, v5 }, [x19] ; encoding: [0x64,0x82,0x40,0x4c]
207 ; CHECK: ld2.4h { v4, v5 }, [x19] ; encoding: [0x64,0x86,0x40,0x0c]
[all …]
Dneon-simd-ldst-one-elem.s96 ld2 { v0.b, v1.b }[9], [x0]
97 ld2 { v15.h, v16.h }[7], [x15]
98 ld2 { v31.s, v0.s }[3], [sp]
99 ld2 { v0.d, v1.d }[1], [x0]
257 ld2 { v0.b, v1.b }[9], [x0], x3
258 ld2 { v15.h, v16.h }[7], [x15], #4
259 ld2 { v31.s, v0.s }[3], [sp], #8
260 ld2 { v0.d, v1.d }[1], [x0], x0
Dneon-simd-post-ldst-multi-elem.s124 ld2 { v0.16b, v1.16b }, [x0], x1
125 ld2 { v15.8h, v16.8h }, [x15], x2
126 ld2 { v31.4s, v0.4s }, [sp], #32
127 ld2 { v0.2d, v1.2d }, [x0], #32
128 ld2 { v0.8b, v1.8b }, [x0], x2
129 ld2 { v15.4h, v16.4h }, [x15], x3
130 ld2 { v31.2s, v0.2s }, [sp], #16
/external/capstone/suite/MC/AArch64/
Dneon-simd-ldst-multi-elem.s.cs156 0x00,0x80,0x40,0x4c = ld2 {v0.16b, v1.16b}, [x0]
157 0xef,0x85,0x40,0x4c = ld2 {v15.8h, v16.8h}, [x15]
158 0xff,0x8b,0x40,0x4c = ld2 {v31.4s, v0.4s}, [sp]
159 0x00,0x8c,0x40,0x4c = ld2 {v0.2d, v1.2d}, [x0]
160 0x00,0x80,0x40,0x0c = ld2 {v0.8b, v1.8b}, [x0]
161 0xef,0x85,0x40,0x0c = ld2 {v15.4h, v16.4h}, [x15]
162 0xff,0x8b,0x40,0x0c = ld2 {v31.2s, v0.2s}, [sp]
163 0x00,0x80,0x40,0x4c = ld2 {v0.16b, v1.16b}, [x0]
164 0xef,0x85,0x40,0x4c = ld2 {v15.8h, v16.8h}, [x15]
165 0xff,0x8b,0x40,0x4c = ld2 {v31.4s, v0.4s}, [sp]
[all …]
Dneon-simd-ldst-one-elem.s.cs38 0x00,0x04,0x60,0x4d = ld2 {v0.b, v1.b}[9], [x0]
39 0xef,0x59,0x60,0x4d = ld2 {v15.h, v16.h}[7], [x15]
40 0xff,0x93,0x60,0x4d = ld2 {v31.s, v0.s}[3], [sp]
41 0x00,0x84,0x60,0x4d = ld2 {v0.d, v1.d}[1], [x0]
102 0x00,0x04,0xe3,0x4d = ld2 {v0.b, v1.b}[9], [x0], x3
103 0xef,0x59,0xff,0x4d = ld2 {v15.h, v16.h}[7], [x15], #4
104 0xff,0x93,0xff,0x4d = ld2 {v31.s, v0.s}[3], [sp], #8
105 0x00,0x84,0xe0,0x4d = ld2 {v0.d, v1.d}[1], [x0], x0
Dneon-simd-post-ldst-multi-elem.s.cs34 0x00,0x80,0xc1,0x4c = ld2 {v0.16b, v1.16b}, [x0], x1
35 0xef,0x85,0xc2,0x4c = ld2 {v15.8h, v16.8h}, [x15], x2
36 0xff,0x8b,0xdf,0x4c = ld2 {v31.4s, v0.4s}, [sp], #32
37 0x00,0x8c,0xdf,0x4c = ld2 {v0.2d, v1.2d}, [x0], #32
38 0x00,0x80,0xc2,0x0c = ld2 {v0.8b, v1.8b}, [x0], x2
39 0xef,0x85,0xc3,0x0c = ld2 {v15.4h, v16.4h}, [x15], x3
40 0xff,0x8b,0xdf,0x0c = ld2 {v31.2s, v0.2s}, [sp], #16
/external/llvm/test/CodeGen/AArch64/
Daarch64-interleaved-accesses-extract-user.ll4 ; CHECK: %ldN = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0v4i32
22 ; CHECK: %ldN = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0v4i32
42 ; CHECK-NOT: %ldN = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0v4i32
59 ; CHECK-NOT: %ldN = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0v4i32
69 ; CHECK-NOT: %ldN = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0v4i32
79 ; CHECK-NOT: %ldN = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0v4i32
/external/llvm/test/CodeGen/AMDGPU/
Dpromote-alloca-array-allocation.ll19 %ld2 = load i32, i32* %arrayidx10, align 4
20 store i32 %ld2, i32 addrspace(1)* %out, align 4
41 %ld2 = load i32, i32* %arrayidx10, align 4
42 store i32 %ld2, i32 addrspace(1)* %out, align 4
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/
Datom-cmpb.ll17 %ld2 = load i8, i8* %incdec.ptr, align 1
19 %x5 = xor i8 %ld2, -1
20 %cmp34 = icmp ult i8 %ld2, %ld1
/external/llvm/test/CodeGen/X86/
Datom-cmpb.ll17 %ld2 = load i8, i8* %incdec.ptr, align 1
19 %x5 = xor i8 %ld2, -1
20 %cmp34 = icmp ult i8 %ld2, %ld1
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/
Dpromote-alloca-array-allocation.ll19 %ld2 = load i32, i32* %arrayidx10, align 4
20 store i32 %ld2, i32 addrspace(1)* %out, align 4
41 %ld2 = load i32, i32* %arrayidx10, align 4
42 store i32 %ld2, i32 addrspace(1)* %out, align 4
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/EarlyCSE/AArch64/
Dintrinsics.ll8 ; Check that @llvm.aarch64.neon.ld2 is optimized away by Early CSE.
10 ; CHECK-NOT: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8
29 %vld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8(i8* %5)
65 %vld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8(i8* %5)
78 ; Check that the first @llvm.aarch64.neon.ld2 is optimized away by Early CSE.
80 ; CHECK: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8
81 ; CHECK-NOT: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8
94 %vld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8(i8* %0)
98 %vld22 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8(i8* %1)
112 ; Check that the store prevents @llvm.aarch64.neon.ld2 from being optimized
[all …]
/external/llvm/test/Transforms/EarlyCSE/AArch64/
Dintrinsics.ll6 ; Check that @llvm.aarch64.neon.ld2 is optimized away by Early CSE.
8 ; CHECK-NOT: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8
27 %vld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8(i8* %5)
63 %vld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8(i8* %5)
76 ; Check that the first @llvm.aarch64.neon.ld2 is optimized away by Early CSE.
78 ; CHECK: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8
79 ; CHECK-NOT: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8
92 %vld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8(i8* %0)
96 %vld22 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8(i8* %1)
110 ; Check that the store prevents @llvm.aarch64.neon.ld2 from being optimized
[all …]

12345