Home
last modified time | relevance | path

Searched refs:neon (Results 1 – 25 of 1116) sorted by relevance

12345678910>>...45

/external/llvm/test/CodeGen/AArch64/
Darm64-cvt.ll1 ; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
10 %tmp3 = call i32 @llvm.aarch64.neon.fcvtas.i32.f32(float %A)
18 %tmp3 = call i64 @llvm.aarch64.neon.fcvtas.i64.f32(float %A)
26 %tmp3 = call i32 @llvm.aarch64.neon.fcvtas.i32.f64(double %A)
34 %tmp3 = call i64 @llvm.aarch64.neon.fcvtas.i64.f64(double %A)
38 declare i32 @llvm.aarch64.neon.fcvtas.i32.f32(float) nounwind readnone
39 declare i64 @llvm.aarch64.neon.fcvtas.i64.f32(float) nounwind readnone
40 declare i32 @llvm.aarch64.neon.fcvtas.i32.f64(double) nounwind readnone
41 declare i64 @llvm.aarch64.neon.fcvtas.i64.f64(double) nounwind readnone
50 %tmp3 = call i32 @llvm.aarch64.neon.fcvtau.i32.f32(float %A)
[all …]
Darm64-neon-across.ll1 ; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon | FileCheck %s
3 declare float @llvm.aarch64.neon.fminnmv.f32.v4f32(<4 x float>)
5 declare float @llvm.aarch64.neon.fmaxnmv.f32.v4f32(<4 x float>)
7 declare float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float>)
9 declare float @llvm.aarch64.neon.fmaxv.f32.v4f32(<4 x float>)
11 declare i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32>)
13 declare i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16>)
15 declare i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8>)
17 declare i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16>)
19 declare i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8>)
[all …]
Darm64-vsqrt.ll1 ; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
8 %tmp3 = call <2 x float> @llvm.aarch64.neon.frecps.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
17 %tmp3 = call <4 x float> @llvm.aarch64.neon.frecps.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
26 %tmp3 = call <2 x double> @llvm.aarch64.neon.frecps.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
30 declare <2 x float> @llvm.aarch64.neon.frecps.v2f32(<2 x float>, <2 x float>) nounwind readnone
31 declare <4 x float> @llvm.aarch64.neon.frecps.v4f32(<4 x float>, <4 x float>) nounwind readnone
32 declare <2 x double> @llvm.aarch64.neon.frecps.v2f64(<2 x double>, <2 x double>) nounwind readnone
40 %tmp3 = call <2 x float> @llvm.aarch64.neon.frsqrts.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
49 %tmp3 = call <4 x float> @llvm.aarch64.neon.frsqrts.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
58 %tmp3 = call <2 x double> @llvm.aarch64.neon.frsqrts.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
[all …]
Darm64-fminv.ll6 %min = call float @llvm.aarch64.neon.fminv.f32.v2f32(<2 x float> %in)
13 %min = call float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float> %in)
20 %min = call double @llvm.aarch64.neon.fminv.f64.v2f64(<2 x double> %in)
24 declare float @llvm.aarch64.neon.fminv.f32.v2f32(<2 x float>)
25 declare float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float>)
26 declare double @llvm.aarch64.neon.fminv.f64.v2f64(<2 x double>)
31 %max = call float @llvm.aarch64.neon.fmaxv.f32.v2f32(<2 x float> %in)
38 %max = call float @llvm.aarch64.neon.fmaxv.f32.v4f32(<4 x float> %in)
45 %max = call double @llvm.aarch64.neon.fmaxv.f64.v2f64(<2 x double> %in)
49 declare float @llvm.aarch64.neon.fmaxv.f32.v2f32(<2 x float>)
[all …]
Darm64-vqadd.ll1 ; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
8 %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
17 %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
26 %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
35 %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
44 %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
53 %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
62 %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
71 %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
80 %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
[all …]
Darm64-vmax.ll1 ; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
8 %tmp3 = call <8 x i8> @llvm.aarch64.neon.smax.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
17 %tmp3 = call <16 x i8> @llvm.aarch64.neon.smax.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
26 %tmp3 = call <4 x i16> @llvm.aarch64.neon.smax.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
35 %tmp3 = call <8 x i16> @llvm.aarch64.neon.smax.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
44 %tmp3 = call <2 x i32> @llvm.aarch64.neon.smax.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
53 %tmp3 = call <4 x i32> @llvm.aarch64.neon.smax.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
57 declare <8 x i8> @llvm.aarch64.neon.smax.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
58 declare <16 x i8> @llvm.aarch64.neon.smax.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
59 declare <4 x i16> @llvm.aarch64.neon.smax.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/
Darm64-cvt.ll1 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
10 %tmp3 = call i32 @llvm.aarch64.neon.fcvtas.i32.f32(float %A)
18 %tmp3 = call i64 @llvm.aarch64.neon.fcvtas.i64.f32(float %A)
26 %tmp3 = call i32 @llvm.aarch64.neon.fcvtas.i32.f64(double %A)
34 %tmp3 = call i64 @llvm.aarch64.neon.fcvtas.i64.f64(double %A)
38 declare i32 @llvm.aarch64.neon.fcvtas.i32.f32(float) nounwind readnone
39 declare i64 @llvm.aarch64.neon.fcvtas.i64.f32(float) nounwind readnone
40 declare i32 @llvm.aarch64.neon.fcvtas.i32.f64(double) nounwind readnone
41 declare i64 @llvm.aarch64.neon.fcvtas.i64.f64(double) nounwind readnone
50 %tmp3 = call i32 @llvm.aarch64.neon.fcvtau.i32.f32(float %A)
[all …]
Darm64-neon-across.ll1 ; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon | FileCheck %s
3 declare float @llvm.aarch64.neon.fminnmv.f32.v4f32(<4 x float>)
5 declare float @llvm.aarch64.neon.fmaxnmv.f32.v4f32(<4 x float>)
7 declare float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float>)
9 declare float @llvm.aarch64.neon.fmaxv.f32.v4f32(<4 x float>)
11 declare i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32>)
13 declare i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16>)
15 declare i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8>)
17 declare i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16>)
19 declare i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8>)
[all …]
Darm64-vsqrt.ll1 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
8 %tmp3 = call <2 x float> @llvm.aarch64.neon.frecps.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
17 %tmp3 = call <4 x float> @llvm.aarch64.neon.frecps.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
26 %tmp3 = call <2 x double> @llvm.aarch64.neon.frecps.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
30 declare <2 x float> @llvm.aarch64.neon.frecps.v2f32(<2 x float>, <2 x float>) nounwind readnone
31 declare <4 x float> @llvm.aarch64.neon.frecps.v4f32(<4 x float>, <4 x float>) nounwind readnone
32 declare <2 x double> @llvm.aarch64.neon.frecps.v2f64(<2 x double>, <2 x double>) nounwind readnone
40 %tmp3 = call <2 x float> @llvm.aarch64.neon.frsqrts.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
49 %tmp3 = call <4 x float> @llvm.aarch64.neon.frsqrts.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
58 %tmp3 = call <2 x double> @llvm.aarch64.neon.frsqrts.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
[all …]
Darm64-fminv.ll6 %min = call float @llvm.aarch64.neon.fminv.f32.v2f32(<2 x float> %in)
13 %min = call float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float> %in)
20 %min = call double @llvm.aarch64.neon.fminv.f64.v2f64(<2 x double> %in)
24 declare float @llvm.aarch64.neon.fminv.f32.v2f32(<2 x float>)
25 declare float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float>)
26 declare double @llvm.aarch64.neon.fminv.f64.v2f64(<2 x double>)
31 %max = call float @llvm.aarch64.neon.fmaxv.f32.v2f32(<2 x float> %in)
38 %max = call float @llvm.aarch64.neon.fmaxv.f32.v4f32(<4 x float> %in)
45 %max = call double @llvm.aarch64.neon.fmaxv.f64.v2f64(<2 x double> %in)
49 declare float @llvm.aarch64.neon.fmaxv.f32.v2f32(<2 x float>)
[all …]
Darm64-vqadd.ll1 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
8 %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
17 %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
26 %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
35 %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
44 %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
53 %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
62 %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
71 %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
80 %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
[all …]
Darm64-vmax.ll1 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
8 %tmp3 = call <8 x i8> @llvm.aarch64.neon.smax.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
17 %tmp3 = call <16 x i8> @llvm.aarch64.neon.smax.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
26 %tmp3 = call <4 x i16> @llvm.aarch64.neon.smax.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
35 %tmp3 = call <8 x i16> @llvm.aarch64.neon.smax.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
44 %tmp3 = call <2 x i32> @llvm.aarch64.neon.smax.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
53 %tmp3 = call <4 x i32> @llvm.aarch64.neon.smax.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
57 declare <8 x i8> @llvm.aarch64.neon.smax.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
58 declare <16 x i8> @llvm.aarch64.neon.smax.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
59 declare <4 x i16> @llvm.aarch64.neon.smax.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/
Dmisched-fusion-aes.ll3 declare <16 x i8> @llvm.arm.neon.aese(<16 x i8> %d, <16 x i8> %k)
4 declare <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %d)
5 declare <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %d, <16 x i8> %k)
6 declare <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %d)
17 %e00 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %d0, <16 x i8> %k0)
18 %f00 = call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %e00)
19 %e01 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %d1, <16 x i8> %k0)
20 %f01 = call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %e01)
21 %e02 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %d2, <16 x i8> %k0)
22 %f02 = call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %e02)
[all …]
Dintrinsics-crypto.ll6 %tmp3 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %tmp, <16 x i8> %tmp2)
8 %tmp4 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %tmp3, <16 x i8> %tmp2)
10 %tmp5 = call <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %tmp4)
12 %tmp6 = call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %tmp5)
22 %resscalar = call i32 @llvm.arm.neon.sha1h(i32 %scalar)
25 %res2 = call <4 x i32> @llvm.arm.neon.sha1c(<4 x i32> %tmp2, i32 %scalar, <4 x i32> %res1)
27 %res3 = call <4 x i32> @llvm.arm.neon.sha1m(<4 x i32> %res2, i32 %scalar, <4 x i32> %res1)
29 %res4 = call <4 x i32> @llvm.arm.neon.sha1p(<4 x i32> %res3, i32 %scalar, <4 x i32> %res1)
31 %res5 = call <4 x i32> @llvm.arm.neon.sha1su0(<4 x i32> %res4, <4 x i32> %tmp3, <4 x i32> %res1)
33 %res6 = call <4 x i32> @llvm.arm.neon.sha1su1(<4 x i32> %res5, <4 x i32> %res1)
[all …]
Dvcvt-v8.ll1 ; RUN: llc < %s -mtriple=armv8 -mattr=+neon | FileCheck %s
6 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtas.v4i32.v4f32(<4 x float> %tmp1)
14 %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtas.v2i32.v2f32(<2 x float> %tmp1)
22 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtns.v4i32.v4f32(<4 x float> %tmp1)
30 %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtns.v2i32.v2f32(<2 x float> %tmp1)
38 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtps.v4i32.v4f32(<4 x float> %tmp1)
46 %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtps.v2i32.v2f32(<2 x float> %tmp1)
54 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtms.v4i32.v4f32(<4 x float> %tmp1)
62 %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtms.v2i32.v2f32(<2 x float> %tmp1)
70 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtau.v4i32.v4f32(<4 x float> %tmp1)
[all …]
Darm-vld1.ll36 declare %struct.uint16x4x2_t @llvm.arm.neon.vld1x2.v4i16.p0i16(i16*) nounwind readonly
37 declare %struct.uint16x4x3_t @llvm.arm.neon.vld1x3.v4i16.p0i16(i16*) nounwind readonly
38 declare %struct.uint16x4x4_t @llvm.arm.neon.vld1x4.v4i16.p0i16(i16*) nounwind readonly
40 declare %struct.uint32x2x2_t @llvm.arm.neon.vld1x2.v2i32.p0i32(i32*) nounwind readonly
41 declare %struct.uint32x2x3_t @llvm.arm.neon.vld1x3.v2i32.p0i32(i32*) nounwind readonly
42 declare %struct.uint32x2x4_t @llvm.arm.neon.vld1x4.v2i32.p0i32(i32*) nounwind readonly
44 declare %struct.uint64x1x2_t @llvm.arm.neon.vld1x2.v1i64.p0i64(i64*) nounwind readonly
45 declare %struct.uint64x1x3_t @llvm.arm.neon.vld1x3.v1i64.p0i64(i64*) nounwind readonly
46 declare %struct.uint64x1x4_t @llvm.arm.neon.vld1x4.v1i64.p0i64(i64*) nounwind readonly
48 declare %struct.uint8x8x2_t @llvm.arm.neon.vld1x2.v8i8.p0i8(i8*) nounwind readonly
[all …]
Dneon-v8.1a.ll6 declare <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16>, <4 x i16>)
7 declare <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16>, <8 x i16>)
8 declare <2 x i32> @llvm.arm.neon.vqrdmulh.v2i32(<2 x i32>, <2 x i32>)
9 declare <4 x i32> @llvm.arm.neon.vqrdmulh.v4i32(<4 x i32>, <4 x i32>)
11 declare <4 x i16> @llvm.arm.neon.vqadds.v4i16(<4 x i16>, <4 x i16>)
12 declare <8 x i16> @llvm.arm.neon.vqadds.v8i16(<8 x i16>, <8 x i16>)
13 declare <2 x i32> @llvm.arm.neon.vqadds.v2i32(<2 x i32>, <2 x i32>)
14 declare <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32>, <4 x i32>)
16 declare <4 x i16> @llvm.arm.neon.vqsubs.v4i16(<4 x i16>, <4 x i16>)
17 declare <8 x i16> @llvm.arm.neon.vqsubs.v8i16(<8 x i16>, <8 x i16>)
[all …]
/external/llvm/test/CodeGen/ARM/
Dintrinsics-crypto.ll6 %tmp3 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %tmp, <16 x i8> %tmp2)
8 %tmp4 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %tmp3, <16 x i8> %tmp2)
10 %tmp5 = call <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %tmp4)
12 %tmp6 = call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %tmp5)
22 %resscalar = call i32 @llvm.arm.neon.sha1h(i32 %scalar)
25 %res2 = call <4 x i32> @llvm.arm.neon.sha1c(<4 x i32> %tmp2, i32 %scalar, <4 x i32> %res1)
27 %res3 = call <4 x i32> @llvm.arm.neon.sha1m(<4 x i32> %res2, i32 %scalar, <4 x i32> %res1)
29 %res4 = call <4 x i32> @llvm.arm.neon.sha1p(<4 x i32> %res3, i32 %scalar, <4 x i32> %res1)
31 %res5 = call <4 x i32> @llvm.arm.neon.sha1su0(<4 x i32> %res4, <4 x i32> %tmp3, <4 x i32> %res1)
33 %res6 = call <4 x i32> @llvm.arm.neon.sha1su1(<4 x i32> %res5, <4 x i32> %res1)
[all …]
Dvcvt-v8.ll1 ; RUN: llc < %s -mtriple=armv8 -mattr=+neon | FileCheck %s
6 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtas.v4i32.v4f32(<4 x float> %tmp1)
14 %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtas.v2i32.v2f32(<2 x float> %tmp1)
22 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtns.v4i32.v4f32(<4 x float> %tmp1)
30 %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtns.v2i32.v2f32(<2 x float> %tmp1)
38 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtps.v4i32.v4f32(<4 x float> %tmp1)
46 %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtps.v2i32.v2f32(<2 x float> %tmp1)
54 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtms.v4i32.v4f32(<4 x float> %tmp1)
62 %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtms.v2i32.v2f32(<2 x float> %tmp1)
70 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtau.v4i32.v4f32(<4 x float> %tmp1)
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/BasicAA/
Dcs-cs-arm.ll7 declare <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8*, i32) nounwind readonly
8 declare void @llvm.arm.neon.vst1.p0i8.v8i16(i8*, <8 x i16>, i32) nounwind
13 %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind
14 call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
15 %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind
22 ; CHECK: Just Ref (MustAlias): Ptr: i8* %p <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i…
23 ; CHECK: NoModRef: Ptr: i8* %q <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* …
24 ; CHECK: NoModRef: Ptr: i8* %p <-> call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i…
25 ; CHECK: Both ModRef (MustAlias): Ptr: i8* %q <-> call void @llvm.arm.neon.vst1.p0i8.v8i16(i8…
26 ; CHECK: Just Ref (MustAlias): Ptr: i8* %p <-> %b = call <8 x i16> @llvm.arm.neon.vld1.v8i…
[all …]
/external/swiftshader/third_party/LLVM/test/CodeGen/ARM/
Dvcnt.ll1 ; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
7 %tmp2 = call <8 x i8> @llvm.arm.neon.vcnt.v8i8(<8 x i8> %tmp1)
15 %tmp2 = call <16 x i8> @llvm.arm.neon.vcnt.v16i8(<16 x i8> %tmp1)
19 declare <8 x i8> @llvm.arm.neon.vcnt.v8i8(<8 x i8>) nounwind readnone
20 declare <16 x i8> @llvm.arm.neon.vcnt.v16i8(<16 x i8>) nounwind readnone
26 %tmp2 = call <8 x i8> @llvm.arm.neon.vclz.v8i8(<8 x i8> %tmp1)
34 %tmp2 = call <4 x i16> @llvm.arm.neon.vclz.v4i16(<4 x i16> %tmp1)
42 %tmp2 = call <2 x i32> @llvm.arm.neon.vclz.v2i32(<2 x i32> %tmp1)
50 %tmp2 = call <16 x i8> @llvm.arm.neon.vclz.v16i8(<16 x i8> %tmp1)
58 %tmp2 = call <8 x i16> @llvm.arm.neon.vclz.v8i16(<8 x i16> %tmp1)
[all …]
Dvabs.ll1 ; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
7 %tmp2 = call <8 x i8> @llvm.arm.neon.vabs.v8i8(<8 x i8> %tmp1)
15 %tmp2 = call <4 x i16> @llvm.arm.neon.vabs.v4i16(<4 x i16> %tmp1)
23 %tmp2 = call <2 x i32> @llvm.arm.neon.vabs.v2i32(<2 x i32> %tmp1)
31 %tmp2 = call <2 x float> @llvm.arm.neon.vabs.v2f32(<2 x float> %tmp1)
39 %tmp2 = call <16 x i8> @llvm.arm.neon.vabs.v16i8(<16 x i8> %tmp1)
47 %tmp2 = call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %tmp1)
55 %tmp2 = call <4 x i32> @llvm.arm.neon.vabs.v4i32(<4 x i32> %tmp1)
63 %tmp2 = call <4 x float> @llvm.arm.neon.vabs.v4f32(<4 x float> %tmp1)
67 declare <8 x i8> @llvm.arm.neon.vabs.v8i8(<8 x i8>) nounwind readnone
[all …]
Dvqshl.ll1 ; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
8 %tmp3 = call <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
17 %tmp3 = call <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
26 %tmp3 = call <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
35 %tmp3 = call <1 x i64> @llvm.arm.neon.vqshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
44 %tmp3 = call <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
53 %tmp3 = call <4 x i16> @llvm.arm.neon.vqshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
62 %tmp3 = call <2 x i32> @llvm.arm.neon.vqshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
71 %tmp3 = call <1 x i64> @llvm.arm.neon.vqshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
80 %tmp3 = call <16 x i8> @llvm.arm.neon.vqshifts.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
[all …]
Dvminmax.ll1 ; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
8 %tmp3 = call <8 x i8> @llvm.arm.neon.vmins.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
17 %tmp3 = call <4 x i16> @llvm.arm.neon.vmins.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
26 %tmp3 = call <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
35 %tmp3 = call <8 x i8> @llvm.arm.neon.vminu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
44 %tmp3 = call <4 x i16> @llvm.arm.neon.vminu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
53 %tmp3 = call <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
62 %tmp3 = call <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
71 %tmp3 = call <16 x i8> @llvm.arm.neon.vmins.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
80 %tmp3 = call <8 x i16> @llvm.arm.neon.vmins.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
[all …]
Dvhadd.ll1 ; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
8 %tmp3 = call <8 x i8> @llvm.arm.neon.vhadds.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
17 %tmp3 = call <4 x i16> @llvm.arm.neon.vhadds.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
26 %tmp3 = call <2 x i32> @llvm.arm.neon.vhadds.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
35 %tmp3 = call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
44 %tmp3 = call <4 x i16> @llvm.arm.neon.vhaddu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
53 %tmp3 = call <2 x i32> @llvm.arm.neon.vhaddu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
62 %tmp3 = call <16 x i8> @llvm.arm.neon.vhadds.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
71 %tmp3 = call <8 x i16> @llvm.arm.neon.vhadds.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
80 %tmp3 = call <4 x i32> @llvm.arm.neon.vhadds.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
[all …]

12345678910>>...45