Home
last modified time | relevance | path

Searched refs:altivec (Results 1 – 25 of 269) sorted by relevance

1234567891011

/external/llvm-project/llvm/test/CodeGen/PowerPC/
Dp10-vector-mask-ops.ll12 declare i32 @llvm.ppc.altivec.vextractbm(<16 x i8>)
13 declare i32 @llvm.ppc.altivec.vextracthm(<8 x i16>)
14 declare i32 @llvm.ppc.altivec.vextractwm(<4 x i32>)
15 declare i32 @llvm.ppc.altivec.vextractdm(<2 x i64>)
16 declare i32 @llvm.ppc.altivec.vextractqm(<1 x i128>)
24 %ext = tail call i32 @llvm.ppc.altivec.vextractbm(<16 x i8> %a)
34 %ext = tail call i32 @llvm.ppc.altivec.vextracthm(<8 x i16> %a)
44 %ext = tail call i32 @llvm.ppc.altivec.vextractwm(<4 x i32> %a)
54 %ext = tail call i32 @llvm.ppc.altivec.vextractdm(<2 x i64> %a)
64 %ext = tail call i32 @llvm.ppc.altivec.vextractqm(<1 x i128> %a)
[all …]
D2007-09-04-AltivecDST.ll1 ; RUN: llc -verify-machineinstrs < %s -mtriple=ppc64-- -mattr=+altivec | grep dst | count 4
5 tail call void @llvm.ppc.altivec.dst( i8* %image, i32 8, i32 0 )
6 tail call void @llvm.ppc.altivec.dstt( i8* %image, i32 8, i32 0 )
7 tail call void @llvm.ppc.altivec.dstst( i8* %image, i32 8, i32 0 )
8 tail call void @llvm.ppc.altivec.dststt( i8* %image, i32 8, i32 0 )
12 declare void @llvm.ppc.altivec.dst(i8*, i32, i32)
13 declare void @llvm.ppc.altivec.dstt(i8*, i32, i32)
14 declare void @llvm.ppc.altivec.dstst(i8*, i32, i32)
15 declare void @llvm.ppc.altivec.dststt(i8*, i32, i32)
Dp10-string-ops.ll11 declare <16 x i8> @llvm.ppc.altivec.vclrlb(<16 x i8>, i32)
12 declare <16 x i8> @llvm.ppc.altivec.vclrrb(<16 x i8>, i32)
20 %tmp = tail call <16 x i8> @llvm.ppc.altivec.vclrlb(<16 x i8> %a, i32 %n)
30 %tmp = tail call <16 x i8> @llvm.ppc.altivec.vclrrb(<16 x i8> %a, i32 %n)
34 declare <16 x i8> @llvm.ppc.altivec.vstribr(<16 x i8>)
35 declare <16 x i8> @llvm.ppc.altivec.vstribl(<16 x i8>)
36 declare <8 x i16> @llvm.ppc.altivec.vstrihr(<8 x i16>)
37 declare <8 x i16> @llvm.ppc.altivec.vstrihl(<8 x i16>)
39 declare i32 @llvm.ppc.altivec.vstribr.p(i32, <16 x i8>)
40 declare i32 @llvm.ppc.altivec.vstribl.p(i32, <16 x i8>)
[all …]
Dbuiltins-ppc-p10permute.ll14 %0 = tail call <16 x i8> @llvm.ppc.altivec.vsldbi(<16 x i8> %a, <16 x i8> %b, i32 1)
17 declare <16 x i8> @llvm.ppc.altivec.vsldbi(<16 x i8>, <16 x i8>, i32 immarg)
25 %0 = tail call <16 x i8> @llvm.ppc.altivec.vsrdbi(<16 x i8> %a, <16 x i8> %b, i32 1)
28 declare <16 x i8> @llvm.ppc.altivec.vsrdbi(<16 x i8>, <16 x i8>, i32 immarg)
91 %0 = tail call <16 x i8> @llvm.ppc.altivec.vinsblx(<16 x i8> %a, i32 %b, i32 %c)
94 declare <16 x i8> @llvm.ppc.altivec.vinsblx(<16 x i8>, i32, i32)
102 %0 = tail call <16 x i8> @llvm.ppc.altivec.vinsbrx(<16 x i8> %a, i32 %b, i32 %c)
105 declare <16 x i8> @llvm.ppc.altivec.vinsbrx(<16 x i8>, i32, i32)
113 %0 = tail call <8 x i16> @llvm.ppc.altivec.vinshlx(<8 x i16> %a, i32 %b, i32 %c)
116 declare <8 x i16> @llvm.ppc.altivec.vinshlx(<8 x i16>, i32, i32)
[all …]
Dp9-vector-compares-and-counts.ll32 %0 = tail call <16 x i8> @llvm.ppc.altivec.vcmpneb(<16 x i8> %a, <16 x i8> %b)
39 declare <16 x i8> @llvm.ppc.altivec.vcmpneb(<16 x i8>, <16 x i8>)
44 %0 = tail call <16 x i8> @llvm.ppc.altivec.vcmpnezb(<16 x i8> %a, <16 x i8> %b)
51 declare <16 x i8> @llvm.ppc.altivec.vcmpnezb(<16 x i8>, <16 x i8>)
56 %0 = tail call <8 x i16> @llvm.ppc.altivec.vcmpneh(<8 x i16> %a, <8 x i16> %b)
63 declare <8 x i16> @llvm.ppc.altivec.vcmpneh(<8 x i16>, <8 x i16>)
68 %0 = tail call <8 x i16> @llvm.ppc.altivec.vcmpnezh(<8 x i16> %a, <8 x i16> %b)
75 declare <8 x i16> @llvm.ppc.altivec.vcmpnezh(<8 x i16>, <8 x i16>)
80 %0 = tail call <4 x i32> @llvm.ppc.altivec.vcmpnew(<4 x i32> %a, <4 x i32> %b)
87 declare <4 x i32> @llvm.ppc.altivec.vcmpnew(<4 x i32>, <4 x i32>)
[all …]
Dpr44239.ll6 %0 = tail call <4 x float> @llvm.ppc.altivec.vcfsx(<4 x i32> %a, i32 1)
14 %0 = tail call <4 x float> @llvm.ppc.altivec.vcfux(<4 x i32> %a, i32 1)
22 %0 = tail call <4 x i32> @llvm.ppc.altivec.vctsxs(<4 x float> %a, i32 1)
30 %0 = tail call <4 x i32> @llvm.ppc.altivec.vctuxs(<4 x float> %a, i32 1)
36 declare <4 x float> @llvm.ppc.altivec.vcfsx(<4 x i32>, i32 immarg)
37 declare <4 x float> @llvm.ppc.altivec.vcfux(<4 x i32>, i32 immarg)
38 declare <4 x i32> @llvm.ppc.altivec.vctsxs(<4 x float>, i32 immarg)
39 declare <4 x i32> @llvm.ppc.altivec.vctuxs(<4 x float>, i32 immarg)
Dp9-vector-sign-extend.ll8 declare <4 x i32> @llvm.ppc.altivec.vextsb2w(<16 x i8>) nounwind readnone
9 declare <2 x i64> @llvm.ppc.altivec.vextsb2d(<16 x i8>) nounwind readnone
10 declare <4 x i32> @llvm.ppc.altivec.vextsh2w(<8 x i16>) nounwind readnone
11 declare <2 x i64> @llvm.ppc.altivec.vextsh2d(<8 x i16>) nounwind readnone
12 declare <2 x i64> @llvm.ppc.altivec.vextsw2d(<4 x i32>) nounwind readnone
19 %tmp = tail call <4 x i32> @llvm.ppc.altivec.vextsb2w(<16 x i8> %x)
28 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vextsb2d(<16 x i8> %x)
37 %tmp = tail call <4 x i32> @llvm.ppc.altivec.vextsh2w(<8 x i16> %x)
46 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vextsh2d(<8 x i16> %x)
55 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vextsw2d(<4 x i32> %x)
Dvec_mul_even_odd.ll6 declare <2 x i64> @llvm.ppc.altivec.vmuleuw(<4 x i32>, <4 x i32>) nounwind readnone
7 declare <2 x i64> @llvm.ppc.altivec.vmulesw(<4 x i32>, <4 x i32>) nounwind readnone
8 declare <2 x i64> @llvm.ppc.altivec.vmulouw(<4 x i32>, <4 x i32>) nounwind readnone
9 declare <2 x i64> @llvm.ppc.altivec.vmulosw(<4 x i32>, <4 x i32>) nounwind readnone
10 declare <4 x i32> @llvm.ppc.altivec.vmuluwm(<4 x i32>, <4 x i32>) nounwind readnone
13 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmuleuw(<4 x i32> %x, <4 x i32> %y)
19 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmulesw(<4 x i32> %x, <4 x i32> %y)
25 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmulouw(<4 x i32> %x, <4 x i32> %y)
31 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmulosw(<4 x i32> %x, <4 x i32> %y)
Dvec_minmax.ll5 declare <2 x i64> @llvm.ppc.altivec.vmaxsd(<2 x i64>, <2 x i64>) nounwind readnone
6 declare <2 x i64> @llvm.ppc.altivec.vmaxud(<2 x i64>, <2 x i64>) nounwind readnone
7 declare <2 x i64> @llvm.ppc.altivec.vminsd(<2 x i64>, <2 x i64>) nounwind readnone
8 declare <2 x i64> @llvm.ppc.altivec.vminud(<2 x i64>, <2 x i64>) nounwind readnone
11 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmaxsd(<2 x i64> %x, <2 x i64> %y)
17 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmaxud(<2 x i64> %x, <2 x i64> %y)
23 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vminsd(<2 x i64> %x, <2 x i64> %y)
29 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vminud(<2 x i64> %x, <2 x i64> %y)
Dp10-vector-multiply.ll82 declare <4 x i32> @llvm.ppc.altivec.vmulhsw(<4 x i32>, <4 x i32>)
83 declare <4 x i32> @llvm.ppc.altivec.vmulhuw(<4 x i32>, <4 x i32>)
84 declare <2 x i64> @llvm.ppc.altivec.vmulhsd(<2 x i64>, <2 x i64>)
85 declare <2 x i64> @llvm.ppc.altivec.vmulhud(<2 x i64>, <2 x i64>)
93 %mulh = tail call <4 x i32> @llvm.ppc.altivec.vmulhsw(<4 x i32> %a, <4 x i32> %b)
103 %mulh = tail call <4 x i32> @llvm.ppc.altivec.vmulhuw(<4 x i32> %a, <4 x i32> %b)
113 %mulh = tail call <2 x i64> @llvm.ppc.altivec.vmulhsd(<2 x i64> %a, <2 x i64> %b)
123 %mulh = tail call <2 x i64> @llvm.ppc.altivec.vmulhud(<2 x i64> %a, <2 x i64> %b)
127 declare <1 x i128> @llvm.ppc.altivec.vmuleud(<2 x i64>, <2 x i64>) nounwind readnone
128 declare <1 x i128> @llvm.ppc.altivec.vmuloud(<2 x i64>, <2 x i64>) nounwind readnone
[all …]
Dcrypto_bifs.ll19 %2 = call <16 x i8> @llvm.ppc.altivec.crypto.vpmsumb(<16 x i8> %0, <16 x i8> %1)
25 declare <16 x i8> @llvm.ppc.altivec.crypto.vpmsumb(<16 x i8>, <16 x i8>) #1
36 %2 = call <8 x i16> @llvm.ppc.altivec.crypto.vpmsumh(<8 x i16> %0, <8 x i16> %1)
42 declare <8 x i16> @llvm.ppc.altivec.crypto.vpmsumh(<8 x i16>, <8 x i16>) #1
53 %2 = call <4 x i32> @llvm.ppc.altivec.crypto.vpmsumw(<4 x i32> %0, <4 x i32> %1)
59 declare <4 x i32> @llvm.ppc.altivec.crypto.vpmsumw(<4 x i32>, <4 x i32>) #1
70 %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vpmsumd(<2 x i64> %0, <2 x i64> %1)
76 declare <2 x i64> @llvm.ppc.altivec.crypto.vpmsumd(<2 x i64>, <2 x i64>) #1
84 %1 = call <2 x i64> @llvm.ppc.altivec.crypto.vsbox(<2 x i64> %0)
90 declare <2 x i64> @llvm.ppc.altivec.crypto.vsbox(<2 x i64>) #1
[all …]
Dp10-bit-manip-ops.ll8 declare <2 x i64> @llvm.ppc.altivec.vpdepd(<2 x i64>, <2 x i64>)
9 declare <2 x i64> @llvm.ppc.altivec.vpextd(<2 x i64>, <2 x i64>)
12 declare <2 x i64> @llvm.ppc.altivec.vcfuged(<2 x i64>, <2 x i64>)
14 declare i64 @llvm.ppc.altivec.vgnb(<1 x i128>, i32)
16 declare <2 x i64> @llvm.ppc.altivec.vclzdm(<2 x i64>, <2 x i64>)
17 declare <2 x i64> @llvm.ppc.altivec.vctzdm(<2 x i64>, <2 x i64>)
27 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vpdepd(<2 x i64> %a, <2 x i64> %b)
37 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vpextd(<2 x i64> %a, <2 x i64> %b)
67 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vcfuged(<2 x i64> %a, <2 x i64> %b)
87 %tmp = tail call i64 @llvm.ppc.altivec.vgnb(<1 x i128> %a, i32 2)
[all …]
Dp10-vector-divide.ll55 declare <4 x i32> @llvm.ppc.altivec.vdivesw(<4 x i32>, <4 x i32>)
56 declare <4 x i32> @llvm.ppc.altivec.vdiveuw(<4 x i32>, <4 x i32>)
57 declare <2 x i64> @llvm.ppc.altivec.vdivesd(<2 x i64>, <2 x i64>)
58 declare <2 x i64> @llvm.ppc.altivec.vdiveud(<2 x i64>, <2 x i64>)
66 %div = tail call <4 x i32> @llvm.ppc.altivec.vdivesw(<4 x i32> %a, <4 x i32> %b)
76 %div = tail call <4 x i32> @llvm.ppc.altivec.vdiveuw(<4 x i32> %a, <4 x i32> %b)
104 %div = tail call <2 x i64> @llvm.ppc.altivec.vdivesd(<2 x i64> %a, <2 x i64> %b)
114 %div = tail call <2 x i64> @llvm.ppc.altivec.vdiveud(<2 x i64> %a, <2 x i64> %b)
118 declare <1 x i128> @llvm.ppc.altivec.vdivesq(<1 x i128>, <1 x i128>) nounwind readnone
119 declare <1 x i128> @llvm.ppc.altivec.vdiveuq(<1 x i128>, <1 x i128>) nounwind readnone
[all …]
Daix-lower-block-address.ll1 ; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec -mtriple powerpc-ibm-aix-xcoff \
5 ; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec -mtriple powerpc-ibm-aix-xcoff \
9 ; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec -mtriple powerpc64-ibm-aix-xcoff \
13 ; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec -mtriple powerpc64-ibm-aix-xcoff \
17 ; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec -mtriple powerpc-ibm-aix-xcoff \
20 ; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec -mtriple powerpc-ibm-aix-xcoff \
23 ; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec -mtriple powerpc64-ibm-aix-xcoff \
26 ; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec -mtriple powerpc64-ibm-aix-xcoff \
Dvec_add_sub_quadword.ll118 declare <1 x i128> @llvm.ppc.altivec.vaddeuqm(<1 x i128> %x, <1 x i128> %y, <1 x i128> %z) nounwind…
119 declare <1 x i128> @llvm.ppc.altivec.vaddcuq(<1 x i128> %x, <1 x i128> %y) nounwind readnone
120 declare <1 x i128> @llvm.ppc.altivec.vaddecuq(<1 x i128> %x, <1 x i128> %y, <1 x i128> %z) nounwind…
121 declare <1 x i128> @llvm.ppc.altivec.vsubeuqm(<1 x i128> %x, <1 x i128> %y, <1 x i128> %z) nounwind…
122 declare <1 x i128> @llvm.ppc.altivec.vsubcuq(<1 x i128> %x, <1 x i128> %y) nounwind readnone
123 declare <1 x i128> @llvm.ppc.altivec.vsubecuq(<1 x i128> %x, <1 x i128> %y, <1 x i128> %z) nounwind…
130 %tmp = tail call <1 x i128> @llvm.ppc.altivec.vaddeuqm(<1 x i128> %x,
141 %tmp = tail call <1 x i128> @llvm.ppc.altivec.vaddcuq(<1 x i128> %x,
151 %tmp = tail call <1 x i128> @llvm.ppc.altivec.vaddecuq(<1 x i128> %x,
162 %tmp = tail call <1 x i128> @llvm.ppc.altivec.vsubeuqm(<1 x i128> %x,
[all …]
Dvec_rotate_shift.ll5 declare <2 x i64> @llvm.ppc.altivec.vrld(<2 x i64>, <2 x i64>) nounwind readnone
6 declare <2 x i64> @llvm.ppc.altivec.vsld(<2 x i64>, <2 x i64>) nounwind readnone
7 declare <2 x i64> @llvm.ppc.altivec.vsrd(<2 x i64>, <2 x i64>) nounwind readnone
8 declare <2 x i64> @llvm.ppc.altivec.vsrad(<2 x i64>, <2 x i64>) nounwind readnone
11 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vrld(<2 x i64> %x, <2 x i64> %y)
/external/llvm/test/CodeGen/PowerPC/
D2007-09-04-AltivecDST.ll1 ; RUN: llc < %s -march=ppc64 -mattr=+altivec | grep dst | count 4
5 tail call void @llvm.ppc.altivec.dst( i8* %image, i32 8, i32 0 )
6 tail call void @llvm.ppc.altivec.dstt( i8* %image, i32 8, i32 0 )
7 tail call void @llvm.ppc.altivec.dstst( i8* %image, i32 8, i32 0 )
8 tail call void @llvm.ppc.altivec.dststt( i8* %image, i32 8, i32 0 )
12 declare void @llvm.ppc.altivec.dst(i8*, i32, i32)
13 declare void @llvm.ppc.altivec.dstt(i8*, i32, i32)
14 declare void @llvm.ppc.altivec.dstst(i8*, i32, i32)
15 declare void @llvm.ppc.altivec.dststt(i8*, i32, i32)
Dvec_mul_even_odd.ll6 declare <2 x i64> @llvm.ppc.altivec.vmuleuw(<4 x i32>, <4 x i32>) nounwind readnone
7 declare <2 x i64> @llvm.ppc.altivec.vmulesw(<4 x i32>, <4 x i32>) nounwind readnone
8 declare <2 x i64> @llvm.ppc.altivec.vmulouw(<4 x i32>, <4 x i32>) nounwind readnone
9 declare <2 x i64> @llvm.ppc.altivec.vmulosw(<4 x i32>, <4 x i32>) nounwind readnone
10 declare <4 x i32> @llvm.ppc.altivec.vmuluwm(<4 x i32>, <4 x i32>) nounwind readnone
13 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmuleuw(<4 x i32> %x, <4 x i32> %y)
19 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmulesw(<4 x i32> %x, <4 x i32> %y)
25 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmulouw(<4 x i32> %x, <4 x i32> %y)
31 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmulosw(<4 x i32> %x, <4 x i32> %y)
Dvec_minmax.ll5 declare <2 x i64> @llvm.ppc.altivec.vmaxsd(<2 x i64>, <2 x i64>) nounwind readnone
6 declare <2 x i64> @llvm.ppc.altivec.vmaxud(<2 x i64>, <2 x i64>) nounwind readnone
7 declare <2 x i64> @llvm.ppc.altivec.vminsd(<2 x i64>, <2 x i64>) nounwind readnone
8 declare <2 x i64> @llvm.ppc.altivec.vminud(<2 x i64>, <2 x i64>) nounwind readnone
11 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmaxsd(<2 x i64> %x, <2 x i64> %y)
17 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmaxud(<2 x i64> %x, <2 x i64> %y)
23 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vminsd(<2 x i64> %x, <2 x i64> %y)
29 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vminud(<2 x i64> %x, <2 x i64> %y)
Dcrypto_bifs.ll19 %2 = call <16 x i8> @llvm.ppc.altivec.crypto.vpmsumb(<16 x i8> %0, <16 x i8> %1)
25 declare <16 x i8> @llvm.ppc.altivec.crypto.vpmsumb(<16 x i8>, <16 x i8>) #1
36 %2 = call <8 x i16> @llvm.ppc.altivec.crypto.vpmsumh(<8 x i16> %0, <8 x i16> %1)
42 declare <8 x i16> @llvm.ppc.altivec.crypto.vpmsumh(<8 x i16>, <8 x i16>) #1
53 %2 = call <4 x i32> @llvm.ppc.altivec.crypto.vpmsumw(<4 x i32> %0, <4 x i32> %1)
59 declare <4 x i32> @llvm.ppc.altivec.crypto.vpmsumw(<4 x i32>, <4 x i32>) #1
70 %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vpmsumd(<2 x i64> %0, <2 x i64> %1)
76 declare <2 x i64> @llvm.ppc.altivec.crypto.vpmsumd(<2 x i64>, <2 x i64>) #1
84 %1 = call <2 x i64> @llvm.ppc.altivec.crypto.vsbox(<2 x i64> %0)
90 declare <2 x i64> @llvm.ppc.altivec.crypto.vsbox(<2 x i64>) #1
[all …]
Dvec_add_sub_quadword.ll52 declare <1 x i128> @llvm.ppc.altivec.vaddeuqm(<1 x i128> %x,
55 declare <1 x i128> @llvm.ppc.altivec.vaddcuq(<1 x i128> %x,
57 declare <1 x i128> @llvm.ppc.altivec.vaddecuq(<1 x i128> %x,
60 declare <1 x i128> @llvm.ppc.altivec.vsubeuqm(<1 x i128> %x,
63 declare <1 x i128> @llvm.ppc.altivec.vsubcuq(<1 x i128> %x,
65 declare <1 x i128> @llvm.ppc.altivec.vsubecuq(<1 x i128> %x,
72 %tmp = tail call <1 x i128> @llvm.ppc.altivec.vaddeuqm(<1 x i128> %x,
82 %tmp = tail call <1 x i128> @llvm.ppc.altivec.vaddcuq(<1 x i128> %x,
92 %tmp = tail call <1 x i128> @llvm.ppc.altivec.vaddecuq(<1 x i128> %x,
103 %tmp = tail call <1 x i128> @llvm.ppc.altivec.vsubeuqm(<1 x i128> %x,
[all …]
Dvsx-spill-norwstore.ll15 …%0 = tail call <8 x i16> @llvm.ppc.altivec.vupkhsb(<16 x i8> <i8 0, i8 -1, i8 -1, i8 0, i8 0, i8 0…
16 …%1 = tail call <8 x i16> @llvm.ppc.altivec.vupklsb(<16 x i8> <i8 0, i8 -1, i8 -1, i8 0, i8 0, i8 0…
30 …%2 = tail call i32 @llvm.ppc.altivec.vcmpequh.p(i32 2, <8 x i16> %0, <8 x i16> <i16 0, i16 -1, i16…
38 …%3 = tail call i32 @llvm.ppc.altivec.vcmpequh.p(i32 2, <8 x i16> %1, <8 x i16> <i16 -1, i16 0, i16…
50 declare <8 x i16> @llvm.ppc.altivec.vupkhsb(<16 x i8>) #1
53 declare <8 x i16> @llvm.ppc.altivec.vupklsb(<16 x i8>) #1
59 declare i32 @llvm.ppc.altivec.vcmpequh.p(i32, <8 x i16>, <8 x i16>) #1
Dvec_rotate_shift.ll5 declare <2 x i64> @llvm.ppc.altivec.vrld(<2 x i64>, <2 x i64>) nounwind readnone
6 declare <2 x i64> @llvm.ppc.altivec.vsld(<2 x i64>, <2 x i64>) nounwind readnone
7 declare <2 x i64> @llvm.ppc.altivec.vsrd(<2 x i64>, <2 x i64>) nounwind readnone
8 declare <2 x i64> @llvm.ppc.altivec.vsrad(<2 x i64>, <2 x i64>) nounwind readnone
11 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vrld(<2 x i64> %x, <2 x i64> %y)
/external/llvm-project/llvm/test/Transforms/InstCombine/PowerPC/
Daligned-altivec.ll5 declare <4 x i32> @llvm.ppc.altivec.lvx(i8*) #1
11 %vl = call <4 x i32> @llvm.ppc.altivec.lvx(i8* %hv)
14 ; CHECK: @llvm.ppc.altivec.lvx
26 %vl = call <4 x i32> @llvm.ppc.altivec.lvx(i8* %hv)
29 ; CHECK-NOT: @llvm.ppc.altivec.lvx
37 declare void @llvm.ppc.altivec.stvx(<4 x i32>, i8*) #0
43 call void @llvm.ppc.altivec.stvx(<4 x i32> %d, i8* %hv)
49 ; CHECK: @llvm.ppc.altivec.stvx
57 call void @llvm.ppc.altivec.stvx(<4 x i32> %d, i8* %hv)
63 ; CHECK-NOT: @llvm.ppc.altivec.stvx
[all …]
/external/llvm/test/Transforms/InstCombine/
Daligned-altivec.ll5 declare <4 x i32> @llvm.ppc.altivec.lvx(i8*) #1
11 %vl = call <4 x i32> @llvm.ppc.altivec.lvx(i8* %hv)
14 ; CHECK: @llvm.ppc.altivec.lvx
26 %vl = call <4 x i32> @llvm.ppc.altivec.lvx(i8* %hv)
29 ; CHECK-NOT: @llvm.ppc.altivec.lvx
37 declare void @llvm.ppc.altivec.stvx(<4 x i32>, i8*) #0
43 call void @llvm.ppc.altivec.stvx(<4 x i32> %d, i8* %hv)
49 ; CHECK: @llvm.ppc.altivec.stvx
57 call void @llvm.ppc.altivec.stvx(<4 x i32> %d, i8* %hv)
63 ; CHECK-NOT: @llvm.ppc.altivec.stvx
[all …]

1234567891011