Home
last modified time | relevance | path

Searched refs:ld3 (Results 1 – 25 of 60) sorted by relevance

123

/external/llvm/test/MC/AArch64/
Darm64-simd-ldst.s223 ld3.8b {v4, v5, v6}, [x19]
224 ld3.16b {v4, v5, v6}, [x19]
225 ld3.4h {v4, v5, v6}, [x19]
226 ld3.8h {v4, v5, v6}, [x19]
227 ld3.2s {v4, v5, v6}, [x19]
228 ld3.4s {v4, v5, v6}, [x19]
229 ld3.2d {v4, v5, v6}, [x19]
231 ld3.8b {v9, v10, v11}, [x9]
232 ld3.16b {v14, v15, v16}, [x19]
233 ld3.4h {v24, v25, v26}, [x29]
[all …]
Dneon-simd-ldst-multi-elem.s402 ld3 { v0.16b, v1.16b, v2.16b }, [x0]
403 ld3 { v15.8h, v16.8h, v17.8h }, [x15]
404 ld3 { v31.4s, v0.4s, v1.4s }, [sp]
405 ld3 { v0.2d, v1.2d, v2.2d }, [x0]
406 ld3 { v0.8b, v1.8b, v2.8b }, [x0]
407 ld3 { v15.4h, v16.4h, v17.4h }, [x15]
408 ld3 { v31.2s, v0.2s, v1.2s }, [sp]
417 ld3 { v0.16b-v2.16b }, [x0]
418 ld3 { v15.8h-v17.8h }, [x15]
419 ld3 { v31.4s-v1.4s }, [sp]
[all …]
Dneon-simd-ldst-one-elem.s105 ld3 { v0.b, v1.b, v2.b }[9], [x0]
106 ld3 { v15.h, v16.h, v17.h }[7], [x15]
107 ld3 { v31.s, v0.s, v1.s }[3], [sp]
108 ld3 { v0.d, v1.d, v2.d }[1], [x0]
266 ld3 { v0.b, v1.b, v2.b }[9], [x0], #3
267 ld3 { v15.h, v16.h, v17.h }[7], [x15], #6
268 ld3 { v31.s, v0.s, v1.s }[3], [sp], x3
269 ld3 { v0.d, v1.d, v2.d }[1], [x0], x6
Dneon-simd-post-ldst-multi-elem.s150 ld3 { v0.16b, v1.16b, v2.16b }, [x0], x1
151 ld3 { v15.8h, v16.8h, v17.8h }, [x15], x2
152 ld3 { v31.4s, v0.4s, v1.4s }, [sp], #48
153 ld3 { v0.2d, v1.2d, v2.2d }, [x0], #48
154 ld3 { v0.8b, v1.8b, v2.8b }, [x0], x2
155 ld3 { v15.4h, v16.4h, v17.4h }, [x15], x3
156 ld3 { v31.2s, v0.2s, v1.2s }, [sp], #24
/external/capstone/suite/MC/AArch64/
Dneon-simd-ldst-multi-elem.s.cs170 0x00,0x40,0x40,0x4c = ld3 {v0.16b, v1.16b, v2.16b}, [x0]
171 0xef,0x45,0x40,0x4c = ld3 {v15.8h, v16.8h, v17.8h}, [x15]
172 0xff,0x4b,0x40,0x4c = ld3 {v31.4s, v0.4s, v1.4s}, [sp]
173 0x00,0x4c,0x40,0x4c = ld3 {v0.2d, v1.2d, v2.2d}, [x0]
174 0x00,0x40,0x40,0x0c = ld3 {v0.8b, v1.8b, v2.8b}, [x0]
175 0xef,0x45,0x40,0x0c = ld3 {v15.4h, v16.4h, v17.4h}, [x15]
176 0xff,0x4b,0x40,0x0c = ld3 {v31.2s, v0.2s, v1.2s}, [sp]
177 0x00,0x40,0x40,0x4c = ld3 {v0.16b, v1.16b, v2.16b}, [x0]
178 0xef,0x45,0x40,0x4c = ld3 {v15.8h, v16.8h, v17.8h}, [x15]
179 0xff,0x4b,0x40,0x4c = ld3 {v31.4s, v0.4s, v1.4s}, [sp]
[all …]
Dneon-simd-ldst-one-elem.s.cs42 0x00,0x24,0x40,0x4d = ld3 {v0.b, v1.b, v2.b}[9], [x0]
43 0xef,0x79,0x40,0x4d = ld3 {v15.h, v16.h, v17.h}[7], [x15]
44 0xff,0xb3,0x40,0x4d = ld3 {v31.s, v0.s, v1.s}[3], [sp]
45 0x00,0xa4,0x40,0x4d = ld3 {v0.d, v1.d, v2.d}[1], [x0]
106 0x00,0x24,0xdf,0x4d = ld3 {v0.b, v1.b, v2.b}[9], [x0], #3
107 0xef,0x79,0xdf,0x4d = ld3 {v15.h, v16.h, v17.h}[7], [x15], #6
108 0xff,0xb3,0xc3,0x4d = ld3 {v31.s, v0.s, v1.s}[3], [sp], x3
109 0x00,0xa4,0xc6,0x4d = ld3 {v0.d, v1.d, v2.d}[1], [x0], x6
Dneon-simd-post-ldst-multi-elem.s.cs41 0x00,0x40,0xc1,0x4c = ld3 {v0.16b, v1.16b, v2.16b}, [x0], x1
42 0xef,0x45,0xc2,0x4c = ld3 {v15.8h, v16.8h, v17.8h}, [x15], x2
43 0xff,0x4b,0xdf,0x4c = ld3 {v31.4s, v0.4s, v1.4s}, [sp], #48
44 0x00,0x4c,0xdf,0x4c = ld3 {v0.2d, v1.2d, v2.2d}, [x0], #48
45 0x00,0x40,0xc2,0x0c = ld3 {v0.8b, v1.8b, v2.8b}, [x0], x2
46 0xef,0x45,0xc3,0x0c = ld3 {v15.4h, v16.4h, v17.4h}, [x15], x3
47 0xff,0x4b,0xdf,0x0c = ld3 {v31.2s, v0.2s, v1.2s}, [sp], #24
/external/llvm/test/CodeGen/AMDGPU/
Dpromote-alloca-array-allocation.ll22 %ld3 = load i32, i32* %arrayidx12
24 store i32 %ld3, i32 addrspace(1)* %arrayidx13
44 %ld3 = load i32, i32* %arrayidx12
46 store i32 %ld3, i32 addrspace(1)* %arrayidx13
/external/llvm/test/CodeGen/AArch64/
Darm64-indexed-vector-ldst.ll869 ;CHECK: ld3.16b { v0, v1, v2 }, [x0], #48
870 %ld3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0i8(i8* %A)
873 ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld3
878 ;CHECK: ld3.16b { v0, v1, v2 }, [x0], x{{[0-9]+}}
879 %ld3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0i8(i8* %A)
882 ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld3
885 declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0i8(i8*)
890 ;CHECK: ld3.8b { v0, v1, v2 }, [x0], #24
891 %ld3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0i8(i8* %A)
894 ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld3
[all …]
Darm64-neon-vector-list-spill.ll29 ; CHECK: ld3 { v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h }, [{{x[0-9]+|sp}}]
33 …%vld = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0i16(i16* %arg1)
89 ; CHECK: ld3 { v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s }, [{{x[0-9]+|sp}}]
93 …%vld3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0f32(flo…
128 declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0i16(i16*)
131 declare { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0f32(float*)
Darm64-ld1.ll20 ; CHECK: ld3.8b { v0, v1, v2 }, [x0]
22 %tmp2 = call %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld3.v8i8.p0i8(i8* %A)
36 declare %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld3.v8i8.p0i8(i8*) nounwind readonly
55 ; CHECK: ld3.16b { v0, v1, v2 }, [x0]
57 %tmp2 = call %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3.v16i8.p0i8(i8* %A)
71 declare %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3.v16i8.p0i8(i8*) nounwind readonly
90 ; CHECK: ld3.4h { v0, v1, v2 }, [x0]
92 %tmp2 = call %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld3.v4i16.p0i16(i16* %A)
106 declare %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld3.v4i16.p0i16(i16*) nounwind readonly
125 ; CHECK: ld3.8h { v0, v1, v2 }, [x0]
[all …]
Daarch64-interleaved-accesses.ll17 ; NEON: ld3 { v0.4s, v1.4s, v2.4s }, [x0]
19 ; NONEON-NOT: ld3
93 ; NEON: ld3 { v0.2d, v1.2d, v2.2d }, [x0]
95 ; NONEON-NOT: ld3
174 ; NEON: ld3 { v0.4s, v1.4s, v2.4s }, [x0]
176 ; NONEON-NOT: ld3
Darm64-copy-tuple.ll85 …%vec = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0v8i8(<8 x i8>* %ad…
105 …%vec = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0v16i8(<16 x i8…
139 declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0v8i8(<8 x i8>*)
140 declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0v16i8(<16 x i8>*)
Dfp16-vector-load-store.ll103 declare { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3.v4f16.p0v4f16(<4 x half>*)
109 declare { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3.v8f16.p0v8f16(<8 x half>*)
127 ; CHECK: ld3 { v0.4h, v1.4h, v2.4h }, [x0]
129 …%0 = tail call { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3.v4f16.p0v4f16(<4 x ha…
181 ; CHECK: ld3 { v0.8h, v1.8h, v2.8h }, [x0]
183 …%0 = tail call { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3.v8f16.p0v8f16(<8 x ha…
312 ; CHECK: ld3 { v0.h, v1.h, v2.h }[2], [x0]
366 ; CHECK: ld3 { v0.h, v1.h, v2.h }[2], [x0]
/external/llvm/test/Transforms/SLPVectorizer/X86/
Dbswap.ll69 %ld3 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 3), align 4
73 %bswap3 = call i64 @llvm.bswap.i64(i64 %ld3)
91 %ld3 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 3), align 4
95 %bswap3 = call i32 @llvm.bswap.i32(i32 %ld3)
122 %ld3 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 3), align 2
130 %bswap3 = call i32 @llvm.bswap.i32(i32 %ld3)
156 …%ld3 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), align…
164 %bswap3 = call i16 @llvm.bswap.i16(i16 %ld3)
199 …%ld3 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), ali…
215 %bswap3 = call i16 @llvm.bswap.i16(i16 %ld3)
Dctpop.ll57 %ld3 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 3), align 4
61 %ctpop3 = call i64 @llvm.ctpop.i64(i64 %ld3)
79 %ld3 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 3), align 4
83 %ctpop3 = call i32 @llvm.ctpop.i32(i32 %ld3)
110 %ld3 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 3), align 2
118 %ctpop3 = call i32 @llvm.ctpop.i32(i32 %ld3)
144 …%ld3 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), align…
152 %ctpop3 = call i16 @llvm.ctpop.i16(i16 %ld3)
187 …%ld3 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), ali…
203 %ctpop3 = call i16 @llvm.ctpop.i16(i16 %ld3)
[all …]
Dfround.ll92 …%ld3 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64…
96 %ceil3 = call double @llvm.ceil.f64(double %ld3)
174 …%ld3 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64…
182 %ceil3 = call double @llvm.ceil.f64(double %ld3)
263 …%ld3 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64…
267 %floor3 = call double @llvm.floor.f64(double %ld3)
345 …%ld3 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64…
353 %floor3 = call double @llvm.floor.f64(double %ld3)
434 …%ld3 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64…
438 %nearbyint3 = call double @llvm.nearbyint.f64(double %ld3)
[all …]
Dctlz.ll64 %ld3 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 3), align 4
68 %ctlz3 = call i64 @llvm.ctlz.i64(i64 %ld3, i1 0)
95 %ld3 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 3), align 4
99 %ctlz3 = call i32 @llvm.ctlz.i32(i32 %ld3, i1 0)
138 %ld3 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 3), align 2
146 %ctlz3 = call i32 @llvm.ctlz.i32(i32 %ld3, i1 0)
193 …%ld3 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), align…
201 %ctlz3 = call i16 @llvm.ctlz.i16(i16 %ld3, i1 0)
272 …%ld3 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), ali…
288 %ctlz3 = call i16 @llvm.ctlz.i16(i16 %ld3, i1 0)
[all …]
Dcttz.ll64 %ld3 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 3), align 4
68 %cttz3 = call i64 @llvm.cttz.i64(i64 %ld3, i1 0)
95 %ld3 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 3), align 4
99 %cttz3 = call i32 @llvm.cttz.i32(i32 %ld3, i1 0)
138 %ld3 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 3), align 2
146 %cttz3 = call i32 @llvm.cttz.i32(i32 %ld3, i1 0)
193 …%ld3 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), align…
201 %cttz3 = call i16 @llvm.cttz.i16(i16 %ld3, i1 0)
272 …%ld3 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), ali…
288 %cttz3 = call i16 @llvm.cttz.i16(i16 %ld3, i1 0)
[all …]
/external/llvm/test/Transforms/EarlyCSE/AArch64/
Dintrinsics.ll146 ; Check that @llvm.aarch64.neon.ld3 is not optimized away by Early CSE due
147 ; to mismatch between st2 and ld3.
149 ; CHECK: call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i8
168 %vld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i8(i8* %5)
205 %vld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i8(i8* %5)
226 declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i8(i8*)
/external/llvm/test/MC/Disassembler/AArch64/
Darm64-advsimd.txt1109 # CHECK: ld3.8b { v1, v2, v3 }, [x1]
1110 # CHECK: ld3.16b { v5, v6, v7 }, [x2]
1111 # CHECK: ld3.2s { v10, v11, v12 }, [x0]
1126 # CHECK: ld3.b { v1, v2, v3 }[2], [x3], x4
1127 # CHECK: ld3.d { v2, v3, v4 }[1], [x4], x5
1128 # CHECK: ld3.h { v3, v4, v5 }[3], [x5], x6
1129 # CHECK: ld3.s { v4, v5, v6 }[2], [x6], x7
1159 # CHECK: ld3.8b { v1, v2, v3 }, [x2], x3
1160 # CHECK: ld3.16b { v2, v3, v4 }, [x2], x4
1161 # CHECK: ld3.4h { v4, v5, v6 }, [x3], x5
[all …]
/external/llvm/test/CodeGen/ARM/
Dspill-q.ll26 %ld3 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind
70 %tmp1 = fadd <4 x float> %20, %ld3
/external/llvm/test/CodeGen/Thumb2/
Dthumb2-spill-q.ll26 %ld3 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind
70 %tmp1 = fadd <4 x float> %20, %ld3
/external/vixl/test/aarch64/
Dtest-trace-aarch64.cc1022 __ ld3(v20.V16B(), v21.V16B(), v22.V16B(), MemOperand(x0)); in GenerateTestSequenceNEON() local
1023 __ ld3(v28.V16B(), v29.V16B(), v30.V16B(), MemOperand(x1, x2, PostIndex)); in GenerateTestSequenceNEON() local
1024 __ ld3(v20.V16B(), v21.V16B(), v22.V16B(), MemOperand(x1, 48, PostIndex)); in GenerateTestSequenceNEON() local
1025 __ ld3(v21.V2D(), v22.V2D(), v23.V2D(), MemOperand(x0)); in GenerateTestSequenceNEON() local
1026 __ ld3(v18.V2D(), v19.V2D(), v20.V2D(), MemOperand(x1, x2, PostIndex)); in GenerateTestSequenceNEON() local
1027 __ ld3(v27.V2D(), v28.V2D(), v29.V2D(), MemOperand(x1, 48, PostIndex)); in GenerateTestSequenceNEON() local
1028 __ ld3(v7.V2S(), v8.V2S(), v9.V2S(), MemOperand(x0)); in GenerateTestSequenceNEON() local
1029 __ ld3(v20.V2S(), v21.V2S(), v22.V2S(), MemOperand(x1, x2, PostIndex)); in GenerateTestSequenceNEON() local
1030 __ ld3(v26.V2S(), v27.V2S(), v28.V2S(), MemOperand(x1, 24, PostIndex)); in GenerateTestSequenceNEON() local
1031 __ ld3(v27.V4H(), v28.V4H(), v29.V4H(), MemOperand(x0)); in GenerateTestSequenceNEON() local
[all …]
/external/cronet/buildtools/third_party/libc++/trunk/test/std/numerics/numbers/
Dspecialize.pass.cpp60 [[maybe_unused]] long double ld3{std::numbers::pi_v<long double>}; in tests() local

123