Home
last modified time | relevance | path

Searched refs:avx (Results 1 – 25 of 1120) sorted by relevance

12345678910>>...45

/external/llvm/test/CodeGen/X86/
Dvector-shuffle-combining-avx.ll2 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefi…
8 declare <4 x float> @llvm.x86.avx.vpermil.ps(<4 x float>, i8)
9 declare <8 x float> @llvm.x86.avx.vpermil.ps.256(<8 x float>, i8)
10 declare <2 x double> @llvm.x86.avx.vpermil.pd(<2 x double>, i8)
11 declare <4 x double> @llvm.x86.avx.vpermil.pd.256(<4 x double>, i8)
13 declare <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>, <4 x i32>)
14 declare <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>, <8 x i32>)
15 declare <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>, <2 x i64>)
16 declare <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double>, <4 x i64>)
18 declare <8 x i32> @llvm.x86.avx.vperm2f128.si.256(<8 x i32>, <8 x i32>, i8)
[all …]
Dpr18846.ll1 ; RUN: llc -O3 -disable-peephole -mcpu=corei7-avx -mattr=+avx < %s | FileCheck %s
6 ; pr18846 - needless avx spill/reload
58 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4577) #1
70 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4547) #1
71 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4545) #1
72 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4543) #1
80 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4439) #1
81 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4437) #1
86 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> zeroinitializer) #1
87 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4399) #1
[all …]
Dcombine-avx-intrinsics.ll1 ; RUN: llc < %s -march=x86-64 -mcpu=corei7-avx | FileCheck %s
5 %1 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a0, i32 7)
14 %1 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a0, i32 7)
23 %1 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a1, i32 0)
32 %1 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a1, i32 0)
41 %1 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a1, i32 -1)
50 %1 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a1, i32 -1)
58 declare <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double>, <4 x double>, i32)
59 declare <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float>, <8 x float>, i32)
Davx-intrinsics-x86-upgrade.ll2 ; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=avx | FileCheck %s
11 …%res = call <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double> %a0, <2 x double> %a1, i8 1)
14 declare <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double>, <2 x double>, i8) nounwind read…
21 %res = call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> %a0, <4 x float> %a1, i8 1)
24 declare <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float>, <4 x float>, i8) nounwind readnone
31 %res = call <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32> %a0, <4 x i32> %a1, i8 1)
44 %res = call <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32> %a0, <4 x i32> %a1, i8 2)
47 declare <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32>, <4 x i32>, i8) nounwind readnone
57 %res = call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a0, i8 1)
60 declare <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double>, i8) nounwind readnone
[all …]
Dx86-upgrade-avx-vbroadcast.ll2 ; RUN: llc < %s -mtriple=x86_64-apple-macosx10.9.0 -mattr=+avx | FileCheck %s
17 %1 = tail call <4 x float> @llvm.x86.avx.vbroadcast.ss(i8* %0)
20 declare <8 x float> @llvm.x86.avx.vbroadcast.ss.256(i8*)
29 %1 = tail call <4 x double> @llvm.x86.avx.vbroadcast.sd.256(i8* %0)
32 declare <4 x double> @llvm.x86.avx.vbroadcast.sd.256(i8*)
41 %1 = tail call <8 x float> @llvm.x86.avx.vbroadcast.ss.256(i8* %0)
44 declare <4 x float> @llvm.x86.avx.vbroadcast.ss(i8*)
Davx-vextractf128.ll2 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s
31 %0 = tail call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %a, i8 1)
44 %0 = tail call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a, i8 1)
58 %1 = tail call <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32> %0, i8 1)
71 %0 = tail call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %a, i8 0)
84 %0 = tail call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a, i8 0)
98 %1 = tail call <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32> %0, i8 0)
112 %1 = tail call <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32> %0, i8 0)
136 declare <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double>, i8) nounwind readnone
137 declare <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float>, i8) nounwind readnone
[all …]
Dfold-vex.ll3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx | FileCheck %s
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-avx | FileCheck %s --check…
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx -mattr=-avx | FileCheck %s --check…
8 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=btver2 -mattr=-avx | FileCheck %s --check…
Davx-brcond.ll1 ; RUN: llc < %s -mtriple=i386-apple-darwin10 -mcpu=corei7-avx -mattr=+avx | FileCheck %s
3 declare i32 @llvm.x86.avx.ptestz.256(<4 x i64> %p1, <4 x i64> %p2) nounwind
4 declare i32 @llvm.x86.avx.ptestc.256(<4 x i64> %p1, <4 x i64> %p2) nounwind
13 %res = call i32 @llvm.x86.avx.ptestz.256(<4 x i64> %a, <4 x i64> %a) nounwind
37 %res = call i32 @llvm.x86.avx.ptestz.256(<4 x i64> %a, <4 x i64> %a) nounwind
61 %res = call i32 @llvm.x86.avx.ptestc.256(<4 x i64> %a, <4 x i64> %a) nounwind
85 %res = call i32 @llvm.x86.avx.ptestc.256(<4 x i64> %a, <4 x i64> %a) nounwind
109 %res = call i32 @llvm.x86.avx.ptestz.256(<4 x i64> %a, <4 x i64> %a) nounwind
133 %res = call i32 @llvm.x86.avx.ptestz.256(<4 x i64> %a, <4 x i64> %a) nounwind
Dmaskmovdqu.ll1 ; RUN: llc < %s -march=x86 -mattr=+sse2,-avx | grep -i EDI
2 ; RUN: llc < %s -march=x86-64 -mattr=+sse2,-avx | grep -i RDI
3 ; RUN: llc < %s -march=x86 -mattr=+avx | grep -i EDI
4 ; RUN: llc < %s -march=x86-64 -mattr=+avx | grep -i RDI
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/
Dvector-shuffle-combining-avx.ll2 ; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefix=X32 --check-prefix=…
5 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=X64 --check-prefi…
11 declare <4 x float> @llvm.x86.avx.vpermil.ps(<4 x float>, i8)
12 declare <8 x float> @llvm.x86.avx.vpermil.ps.256(<8 x float>, i8)
13 declare <2 x double> @llvm.x86.avx.vpermil.pd(<2 x double>, i8)
14 declare <4 x double> @llvm.x86.avx.vpermil.pd.256(<4 x double>, i8)
16 declare <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>, <4 x i32>)
17 declare <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>, <8 x i32>)
18 declare <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>, <2 x i64>)
19 declare <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double>, <4 x i64>)
[all …]
Davx-intrinsics-x86.ll2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+pclmul,+avx -show-mc-encoding | FileCheck %s …
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+pclmul,+avx -show-mc-encoding | FileCheck %…
12 …%res = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x d…
15 declare <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double>, <4 x double>) nounwind readnone
23 …%res = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x floa…
26 declare <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float>, <8 x float>) nounwind readnone
34 …%res = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x doub…
37 declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4 x double>) nounwind…
45 …%res = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> …
48 declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind rea…
[all …]
Dpr18846.ll1 ; RUN: llc -O3 -disable-peephole -mcpu=corei7-avx -mattr=+avx < %s | FileCheck %s
6 ; pr18846 - needless avx spill/reload
58 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4577) #1
70 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4547) #1
71 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4545) #1
72 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4543) #1
80 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4439) #1
81 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4437) #1
86 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> zeroinitializer) #1
87 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4399) #1
[all …]
Dcombine-avx-intrinsics.ll2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s
9 %1 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a0, i32 7)
17 %1 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a0, i32 7)
25 %1 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a1, i32 0)
33 %1 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a1, i32 0)
42 %1 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a1, i32 -1)
51 %1 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a1, i32 -1)
55 declare <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double>, <4 x double>, i32)
56 declare <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float>, <8 x float>, i32)
Dx86-upgrade-avx-vbroadcast.ll2 ; RUN: llc < %s -mtriple=x86_64-apple-macosx10.9.0 -mattr=+avx | FileCheck %s
17 %1 = tail call <4 x float> @llvm.x86.avx.vbroadcast.ss(i8* %0)
20 declare <8 x float> @llvm.x86.avx.vbroadcast.ss.256(i8*)
29 %1 = tail call <4 x double> @llvm.x86.avx.vbroadcast.sd.256(i8* %0)
32 declare <4 x double> @llvm.x86.avx.vbroadcast.sd.256(i8*)
41 %1 = tail call <8 x float> @llvm.x86.avx.vbroadcast.ss.256(i8* %0)
44 declare <4 x float> @llvm.x86.avx.vbroadcast.ss(i8*)
Davx-vextractf128.ll2 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s
31 %0 = tail call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %a, i8 1)
44 %0 = tail call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a, i8 1)
58 %1 = tail call <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32> %0, i8 1)
71 %0 = tail call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %a, i8 0)
84 %0 = tail call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a, i8 0)
98 %1 = tail call <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32> %0, i8 0)
112 %1 = tail call <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32> %0, i8 0)
136 declare <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double>, i8) nounwind readnone
137 declare <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float>, i8) nounwind readnone
[all …]
Davx-cvttp2si.ll2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=avx | FileCheck %s --check-prefixes=AVX,AVX1
8 declare <8 x i32> @llvm.x86.avx.cvtt.ps2dq.256(<8 x float>)
9 declare <4 x i32> @llvm.x86.avx.cvtt.pd2dq.256(<4 x double>)
18 %fptosi = tail call <8 x i32> @llvm.x86.avx.cvtt.ps2dq.256(<8 x float> %x)
29 %fptosi = tail call <8 x i32> @llvm.x86.avx.cvtt.ps2dq.256(<8 x float> %x)
41 %fptosi = tail call <4 x i32> @llvm.x86.avx.cvtt.pd2dq.256(<4 x double> %x)
52 %fptosi = tail call <4 x i32> @llvm.x86.avx.cvtt.pd2dq.256(<4 x double> %x)
Dfold-vex.ll3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx | FileCheck %s
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-avx | FileCheck %s --check…
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx -mattr=-avx | FileCheck %s --check…
8 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=btver2 -mattr=-avx | FileCheck %s --check…
/external/llvm/test/Transforms/InstCombine/
Dx86-avx.ll11 …%a = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %v, <4 x i32> <i32 0, i32 1, i3…
19 …%a = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %v, <8 x i32> <i32 0, i32 1…
27 %a = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %v, <2 x i64> <i64 0, i64 2>)
35 …%a = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %v, <4 x i64> <i64 0, i64…
47 %a = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %v, <4 x i32> zeroinitializer)
56 …%a = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %v, <8 x i32> zeroinitializ…
65 …%a = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %v, <2 x i64> zeroinitializer)
74 …%a = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %v, <4 x i64> zeroinitial…
85 …%a = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %v, <4 x i32> <i32 3, i32 2, i3…
94 …%a = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %v, <8 x i32> <i32 7, i32 6…
[all …]
Dx86-vperm2.ll6 …%res = call <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double> %a0, <4 x double> %a1, i8 %b…
10 ; CHECK-NEXT: call <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double> %a0, <4 x double> %a1…
18 …%res = call <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double> %a0, <4 x double> %a1, i8 13…
26 %res = call <8 x float> @llvm.x86.avx.vperm2f128.ps.256(<8 x float> %a0, <8 x float> %a1, i8 136)
34 %res = call <8 x i32> @llvm.x86.avx.vperm2f128.si.256(<8 x i32> %a0, <8 x i32> %a1, i8 136)
53 …%res = call <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double> %a0, <4 x double> %a1, i8 25…
65 %res = call <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double> %a0, <4 x double> %a1, i8 0)
74 %res = call <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double> %a0, <4 x double> %a1, i8 1)
83 %res = call <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double> %a0, <4 x double> %a1, i8 2)
92 %res = call <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double> %a0, <4 x double> %a1, i8 3)
[all …]
Dx86-masked-memops.ll8 %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> %mask)
12 ; CHECK-NEXT: %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> %mask)
19 %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> zeroinitializer)
29 …%ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> <i32 1, i32 2, i32 3, i32 …
39 …%ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> <i32 -1, i32 -2, i32 -3, i…
51 …%ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> <i32 0, i32 0, i32 0, i32 …
63 %ld = tail call <2 x double> @llvm.x86.avx.maskload.pd(i8* %f, <2 x i64> <i64 -1, i64 0>)
75 …%ld = tail call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %f, <8 x i32> <i32 0, i32 0, i32 0, …
85 …%ld = tail call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %f, <4 x i64> <i64 -1, i64 0, i64 0…
142 tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> %mask, <4 x float> %v)
[all …]
/external/flac/libFLAC/
Dcpu.c135 info->ia32.avx = (flags_ecx & FLAC__CPUINFO_IA32_CPUID_AVX ) ? true : false; in ia32_cpu_info()
152 dfprintf(stderr, " AVX ........ %c\n", info->ia32.avx ? 'Y' : 'n'); in ia32_cpu_info()
160 if (!FLAC__HAS_X86INTRIN || !info->ia32.avx || !ia32_osxsave || (cpu_xgetbv_x86() & 0x6) != 0x6) { in ia32_cpu_info()
162 info->ia32.avx = false; in ia32_cpu_info()
168 dfprintf(stderr, " AVX OS sup . %c\n", info->ia32.avx ? 'Y' : 'n'); in ia32_cpu_info()
196 info->x86.avx = (flags_ecx & FLAC__CPUINFO_IA32_CPUID_AVX ) ? true : false; in x86_64_cpu_info()
209 dfprintf(stderr, " AVX ........ %c\n", info->x86.avx ? 'Y' : 'n'); in x86_64_cpu_info()
217 if (!info->x86.avx || !x86_osxsave || (cpu_xgetbv_x86() & 0x6) != 0x6) { in x86_64_cpu_info()
219 info->x86.avx = false; in x86_64_cpu_info()
225 dfprintf(stderr, " AVX OS sup . %c\n", info->x86.avx ? 'Y' : 'n'); in x86_64_cpu_info()
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/
Davx-intrinsics-x86.ll1 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -march=x86 -mcpu=corei7 -mattr=avx | FileCheck %s
1836 …%res = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x d…
1839 declare <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double>, <4 x double>) nounwind readnone
1844 …%res = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x floa…
1847 declare <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float>, <8 x float>) nounwind readnone
1852 …%res = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a1, i32 7) ; <…
1855 declare <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double>, <4 x double>, i32) nounwind readnone
1860 …%res = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a1, i32 7) ; <<8 …
1863 declare <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float>, <8 x float>, i32) nounwind readnone
1868 …%res = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x doub…
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/X86/
Dx86-avx.ll4 declare <8 x float> @llvm.x86.avx.round.ps.256(<8 x float>, i32)
5 declare <4 x double> @llvm.x86.avx.round.pd.256(<4 x double>, i32)
12 %1 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a, i32 1)
21 %1 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a, i32 2)
30 %1 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a, i32 1)
39 %1 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a, i32 2)
Dx86-masked-memops.ll10 ; CHECK-NEXT: [[LD:%.*]] = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> %ma…
13 %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> %mask)
24 %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> zeroinitializer)
35 …%ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> <i32 1, i32 2, i32 3, i32 …
48 …%ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> <i32 -1, i32 -2, i32 -3, i…
61 …%ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> <i32 0, i32 0, i32 0, i32 …
74 %ld = tail call <2 x double> @llvm.x86.avx.maskload.pd(i8* %f, <2 x i64> <i64 -1, i64 0>)
87 …%ld = tail call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %f, <8 x i32> <i32 0, i32 0, i32 0, …
98 …%ld = tail call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %f, <4 x i64> <i64 -1, i64 0, i64 0…
156 ; CHECK-NEXT: tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> %mask, <4 x float> %v)
[all …]
Dx86-vpermil.ll11 …%a = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %v, <4 x i32> <i32 0, i32 1, i3…
19 …%a = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %v, <8 x i32> <i32 0, i32 1…
35 %a = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %v, <2 x i64> <i64 0, i64 2>)
43 …%a = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %v, <4 x i64> <i64 0, i64…
63 %a = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %v, <4 x i32> zeroinitializer)
72 …%a = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %v, <8 x i32> zeroinitializ…
90 …%a = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %v, <2 x i64> zeroinitializer)
99 …%a = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %v, <4 x i64> zeroinitial…
119 …%a = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %v, <4 x i32> <i32 3, i32 2, i3…
128 …%a = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %v, <8 x i32> <i32 7, i32 6…
[all …]

12345678910>>...45