/external/llvm/test/CodeGen/PowerPC/ |
D | 2007-09-04-AltivecDST.ll | 1 ; RUN: llc < %s -march=ppc64 -mattr=+altivec | grep dst | count 4 5 tail call void @llvm.ppc.altivec.dst( i8* %image, i32 8, i32 0 ) 6 tail call void @llvm.ppc.altivec.dstt( i8* %image, i32 8, i32 0 ) 7 tail call void @llvm.ppc.altivec.dstst( i8* %image, i32 8, i32 0 ) 8 tail call void @llvm.ppc.altivec.dststt( i8* %image, i32 8, i32 0 ) 12 declare void @llvm.ppc.altivec.dst(i8*, i32, i32) 13 declare void @llvm.ppc.altivec.dstt(i8*, i32, i32) 14 declare void @llvm.ppc.altivec.dstst(i8*, i32, i32) 15 declare void @llvm.ppc.altivec.dststt(i8*, i32, i32)
|
D | vec_mul_even_odd.ll | 6 declare <2 x i64> @llvm.ppc.altivec.vmuleuw(<4 x i32>, <4 x i32>) nounwind readnone 7 declare <2 x i64> @llvm.ppc.altivec.vmulesw(<4 x i32>, <4 x i32>) nounwind readnone 8 declare <2 x i64> @llvm.ppc.altivec.vmulouw(<4 x i32>, <4 x i32>) nounwind readnone 9 declare <2 x i64> @llvm.ppc.altivec.vmulosw(<4 x i32>, <4 x i32>) nounwind readnone 10 declare <4 x i32> @llvm.ppc.altivec.vmuluwm(<4 x i32>, <4 x i32>) nounwind readnone 13 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmuleuw(<4 x i32> %x, <4 x i32> %y) 19 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmulesw(<4 x i32> %x, <4 x i32> %y) 25 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmulouw(<4 x i32> %x, <4 x i32> %y) 31 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmulosw(<4 x i32> %x, <4 x i32> %y)
|
D | vec_minmax.ll | 5 declare <2 x i64> @llvm.ppc.altivec.vmaxsd(<2 x i64>, <2 x i64>) nounwind readnone 6 declare <2 x i64> @llvm.ppc.altivec.vmaxud(<2 x i64>, <2 x i64>) nounwind readnone 7 declare <2 x i64> @llvm.ppc.altivec.vminsd(<2 x i64>, <2 x i64>) nounwind readnone 8 declare <2 x i64> @llvm.ppc.altivec.vminud(<2 x i64>, <2 x i64>) nounwind readnone 11 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmaxsd(<2 x i64> %x, <2 x i64> %y) 17 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmaxud(<2 x i64> %x, <2 x i64> %y) 23 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vminsd(<2 x i64> %x, <2 x i64> %y) 29 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vminud(<2 x i64> %x, <2 x i64> %y)
|
D | crypto_bifs.ll | 18 %2 = call <16 x i8> @llvm.ppc.altivec.crypto.vpmsumb(<16 x i8> %0, <16 x i8> %1) 24 declare <16 x i8> @llvm.ppc.altivec.crypto.vpmsumb(<16 x i8>, <16 x i8>) #1 35 %2 = call <8 x i16> @llvm.ppc.altivec.crypto.vpmsumh(<8 x i16> %0, <8 x i16> %1) 41 declare <8 x i16> @llvm.ppc.altivec.crypto.vpmsumh(<8 x i16>, <8 x i16>) #1 52 %2 = call <4 x i32> @llvm.ppc.altivec.crypto.vpmsumw(<4 x i32> %0, <4 x i32> %1) 58 declare <4 x i32> @llvm.ppc.altivec.crypto.vpmsumw(<4 x i32>, <4 x i32>) #1 69 %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vpmsumd(<2 x i64> %0, <2 x i64> %1) 75 declare <2 x i64> @llvm.ppc.altivec.crypto.vpmsumd(<2 x i64>, <2 x i64>) #1 83 %1 = call <2 x i64> @llvm.ppc.altivec.crypto.vsbox(<2 x i64> %0) 89 declare <2 x i64> @llvm.ppc.altivec.crypto.vsbox(<2 x i64>) #1 [all …]
|
D | vec_add_sub_quadword.ll | 52 declare <1 x i128> @llvm.ppc.altivec.vaddeuqm(<1 x i128> %x, 55 declare <1 x i128> @llvm.ppc.altivec.vaddcuq(<1 x i128> %x, 57 declare <1 x i128> @llvm.ppc.altivec.vaddecuq(<1 x i128> %x, 60 declare <1 x i128> @llvm.ppc.altivec.vsubeuqm(<1 x i128> %x, 63 declare <1 x i128> @llvm.ppc.altivec.vsubcuq(<1 x i128> %x, 65 declare <1 x i128> @llvm.ppc.altivec.vsubecuq(<1 x i128> %x, 72 %tmp = tail call <1 x i128> @llvm.ppc.altivec.vaddeuqm(<1 x i128> %x, 82 %tmp = tail call <1 x i128> @llvm.ppc.altivec.vaddcuq(<1 x i128> %x, 92 %tmp = tail call <1 x i128> @llvm.ppc.altivec.vaddecuq(<1 x i128> %x, 103 %tmp = tail call <1 x i128> @llvm.ppc.altivec.vsubeuqm(<1 x i128> %x, [all …]
|
D | vsx-spill-norwstore.ll | 15 …%0 = tail call <8 x i16> @llvm.ppc.altivec.vupkhsb(<16 x i8> <i8 0, i8 -1, i8 -1, i8 0, i8 0, i8 0… 16 …%1 = tail call <8 x i16> @llvm.ppc.altivec.vupklsb(<16 x i8> <i8 0, i8 -1, i8 -1, i8 0, i8 0, i8 0… 30 …%2 = tail call i32 @llvm.ppc.altivec.vcmpequh.p(i32 2, <8 x i16> %0, <8 x i16> <i16 0, i16 -1, i16… 38 …%3 = tail call i32 @llvm.ppc.altivec.vcmpequh.p(i32 2, <8 x i16> %1, <8 x i16> <i16 -1, i16 0, i16… 50 declare <8 x i16> @llvm.ppc.altivec.vupkhsb(<16 x i8>) #1 53 declare <8 x i16> @llvm.ppc.altivec.vupklsb(<16 x i8>) #1 59 declare i32 @llvm.ppc.altivec.vcmpequh.p(i32, <8 x i16>, <8 x i16>) #1
|
D | vec_rotate_shift.ll | 5 declare <2 x i64> @llvm.ppc.altivec.vrld(<2 x i64>, <2 x i64>) nounwind readnone 6 declare <2 x i64> @llvm.ppc.altivec.vsld(<2 x i64>, <2 x i64>) nounwind readnone 7 declare <2 x i64> @llvm.ppc.altivec.vsrd(<2 x i64>, <2 x i64>) nounwind readnone 8 declare <2 x i64> @llvm.ppc.altivec.vsrad(<2 x i64>, <2 x i64>) nounwind readnone 11 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vrld(<2 x i64> %x, <2 x i64> %y)
|
D | vec_mul.ll | 1 …mtriple=powerpc-unknown-linux-gnu -march=ppc32 -mattr=+altivec -mattr=-vsx -mattr=-power8-altivec … 2 ; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -march=ppc64 -mattr=+altivec -mattr=-vsx -mcpu… 3 …owerpc64le-unknown-linux-gnu -march=ppc64 -mattr=+altivec -mattr=-vsx -mcpu=pwr8 -mattr=-power8-al… 4 ; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -march=ppc64 -mattr=+altivec -mattr=+vsx -mcpu… 5 …owerpc64le-unknown-linux-gnu -march=ppc64 -mattr=+altivec -mattr=+vsx -mcpu=pwr8 -mattr=-power8-al…
|
D | vcmp-fold.ll | 10 …%tmp.upgrd.1 = call i32 @llvm.ppc.altivec.vcmpbfp.p( i32 1, <4 x float> %tmp, <4 x float> %tmp2 ) … 13 …%tmp.upgrd.2 = call <4 x i32> @llvm.ppc.altivec.vcmpbfp( <4 x float> %tmp4, <4 x float> %tmp6 ) ;… 20 declare i32 @llvm.ppc.altivec.vcmpbfp.p(i32, <4 x float>, <4 x float>) 22 declare <4 x i32> @llvm.ppc.altivec.vcmpbfp(<4 x float>, <4 x float>)
|
D | vec_cmpd.ll | 207 declare <2 x i64> @llvm.ppc.altivec.vcmpequd(<2 x i64>, <2 x i64>) nounwind readnone 208 declare i32 @llvm.ppc.altivec.vcmpequd.p(i32, <2 x i64>, <2 x i64>) nounwind readnone 209 declare <2 x i64> @llvm.ppc.altivec.vcmpgtsd(<2 x i64>, <2 x i64>) nounwind readnone 210 declare i32 @llvm.ppc.altivec.vcmpgtsd.p(i32, <2 x i64>, <2 x i64>) nounwind readnone 211 declare <2 x i64> @llvm.ppc.altivec.vcmpgtud(<2 x i64>, <2 x i64>) nounwind readnone 212 declare i32 @llvm.ppc.altivec.vcmpgtud.p(i32, <2 x i64>, <2 x i64>) nounwind readnone 215 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vcmpequd(<2 x i64> %x, <2 x i64> %y) 222 %tmp = tail call i32 @llvm.ppc.altivec.vcmpequd.p(i32 2, <2 x i64> %x, <2 x i64> %y) 229 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vcmpgtsd(<2 x i64> %x, <2 x i64> %y) 236 %tmp = tail call i32 @llvm.ppc.altivec.vcmpgtsd.p(i32 2, <2 x i64> %x, <2 x i64> %y) [all …]
|
D | unal-altivec-wint.ll | 5 declare <4 x i32> @llvm.ppc.altivec.lvx(i8*) #1 11 %vl = call <4 x i32> @llvm.ppc.altivec.lvx(i8* %hv) 26 declare void @llvm.ppc.altivec.stvx(<4 x i32>, i8*) #0 32 call void @llvm.ppc.altivec.stvx(<4 x i32> %d, i8* %hv)
|
D | builtins-ppc-p8vector.ll | 24 %4 = call <2 x i64> @llvm.ppc.altivec.vbpermq(<16 x i8> %2, <16 x i8> %3) 45 %4 = call <2 x i64> @llvm.ppc.altivec.vbpermq(<16 x i8> %2, <16 x i8> %3) 62 %2 = call <16 x i8> @llvm.ppc.altivec.vgbbd(<16 x i8> %1) 78 %2 = call <16 x i8> @llvm.ppc.altivec.vgbbd(<16 x i8> %1) 88 declare <2 x i64> @llvm.ppc.altivec.vbpermq(<16 x i8>, <16 x i8>) 91 declare <16 x i8> @llvm.ppc.altivec.vgbbd(<16 x i8>)
|
D | vrspill.ll | 1 ; RUN: llc -O0 -mtriple=powerpc-unknown-linux-gnu -mattr=+altivec -mattr=-vsx -verify-machineinstrs… 2 ; RUN: llc -O0 -mtriple=powerpc64-unknown-linux-gnu -mattr=+altivec -mattr=-vsx -verify-machineinst… 3 ; RUN: llc -O0 -mtriple=powerpc64-unknown-linux-gnu -mattr=+altivec -mattr=+vsx -verify-machineinst…
|
D | unal-vec-negarith.ll | 12 ; CHECK-NOT: v4i32,ch = llvm.ppc.altivec.lvx{{.*}}<LD31[%p+4294967281](align=1)> 13 ; CHECK: v4i32,ch = llvm.ppc.altivec.lvx{{.*}}<LD31[%p+-15](align=1)>
|
D | 2012-10-12-bitcast.ll | 1 ; RUN: llc -mattr=-vsx -mattr=+altivec -mcpu=pwr7 < %s | FileCheck %s 2 ; RUN: llc -mattr=+vsx -mattr=+altivec -mcpu=pwr7 < %s | FileCheck -check-prefix=CHECK-VSX %s
|
D | vec_br_cmp.ll | 10 …%tmp.upgrd.1 = tail call i32 @llvm.ppc.altivec.vcmpeqfp.p( i32 1, <4 x float> %tmp, <4 x float> %t… 22 declare i32 @llvm.ppc.altivec.vcmpeqfp.p(i32, <4 x float>, <4 x float>)
|
D | vperm-instcombine.ll | 9 …%1 = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> <i32 50462976, i32 117835012, i32 185207048,… 17 declare <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32>, <4 x i32>, <16 x i8>)
|
D | mc-instrlat.ll | 22 …"true" "no-nans-fp-math"="true" "target-cpu"="ppc64" "target-features"="+altivec,-bpermd,-crypto,-… 23 …tack-protector-buffer-size"="8" "target-cpu"="ppc64" "target-features"="+altivec,-bpermd,-crypto,-…
|
D | vec_conv.ll | 1 ; RUN: llc -mattr=+altivec < %s | FileCheck %s 3 ; Check vector float/int conversion using altivec.
|
/external/llvm/test/Transforms/InstCombine/ |
D | aligned-altivec.ll | 5 declare <4 x i32> @llvm.ppc.altivec.lvx(i8*) #1 11 %vl = call <4 x i32> @llvm.ppc.altivec.lvx(i8* %hv) 14 ; CHECK: @llvm.ppc.altivec.lvx 26 %vl = call <4 x i32> @llvm.ppc.altivec.lvx(i8* %hv) 29 ; CHECK-NOT: @llvm.ppc.altivec.lvx 37 declare void @llvm.ppc.altivec.stvx(<4 x i32>, i8*) #0 43 call void @llvm.ppc.altivec.stvx(<4 x i32> %d, i8* %hv) 49 ; CHECK: @llvm.ppc.altivec.stvx 57 call void @llvm.ppc.altivec.stvx(<4 x i32> %d, i8* %hv) 63 ; CHECK-NOT: @llvm.ppc.altivec.stvx [all …]
|
/external/clang/lib/Headers/ |
D | module.modulemap | 2 explicit module altivec { 3 requires altivec 4 header "altivec.h"
|
/external/valgrind/none/tests/ppc32/ |
D | testVMX.vgtest | 1 prereq: ../../../tests/check_ppc64_auxv_cap altivec
|
D | jm-vmx.vgtest | 1 prereq: ../../../tests/check_ppc64_auxv_cap altivec
|
D | bug129390-ppc32.vgtest | 1 prereq: ../../../tests/check_ppc64_auxv_cap altivec
|
/external/valgrind/none/tests/ppc64/ |
D | jm-vmx.vgtest | 1 prereq: ../../../tests/check_ppc64_auxv_cap altivec
|