• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-apple-macosx10.6.6 -mattr=+sse4.1 | FileCheck %s
3
4%0 = type { double }
5%union.anon = type { float }
6
7define i32 @double_signbit(double %d1) nounwind uwtable readnone ssp {
8; CHECK-LABEL: double_signbit:
9; CHECK:       ## %bb.0: ## %entry
10; CHECK-NEXT:    movsd %xmm0, -{{[0-9]+}}(%rsp)
11; CHECK-NEXT:    movsd %xmm0, -{{[0-9]+}}(%rsp)
12; CHECK-NEXT:    movmskpd %xmm0, %eax
13; CHECK-NEXT:    andl $1, %eax
14; CHECK-NEXT:    retq
15entry:
16  %__x.addr.i = alloca double, align 8
17  %__u.i = alloca %0, align 8
18  %0 = bitcast double* %__x.addr.i to i8*
19  %1 = bitcast %0* %__u.i to i8*
20  store double %d1, double* %__x.addr.i, align 8
21  %__f.i = getelementptr inbounds %0, %0* %__u.i, i64 0, i32 0
22  store double %d1, double* %__f.i, align 8
23  %tmp = bitcast double %d1 to i64
24  %tmp1 = lshr i64 %tmp, 63
25  %shr.i = trunc i64 %tmp1 to i32
26  ret i32 %shr.i
27}
28
29define i32 @double_add_signbit(double %d1, double %d2) nounwind uwtable readnone ssp {
30; CHECK-LABEL: double_add_signbit:
31; CHECK:       ## %bb.0: ## %entry
32; CHECK-NEXT:    addsd %xmm1, %xmm0
33; CHECK-NEXT:    movsd %xmm0, -{{[0-9]+}}(%rsp)
34; CHECK-NEXT:    movsd %xmm0, -{{[0-9]+}}(%rsp)
35; CHECK-NEXT:    movmskpd %xmm0, %eax
36; CHECK-NEXT:    andl $1, %eax
37; CHECK-NEXT:    retq
38entry:
39  %__x.addr.i = alloca double, align 8
40  %__u.i = alloca %0, align 8
41  %add = fadd double %d1, %d2
42  %0 = bitcast double* %__x.addr.i to i8*
43  %1 = bitcast %0* %__u.i to i8*
44  store double %add, double* %__x.addr.i, align 8
45  %__f.i = getelementptr inbounds %0, %0* %__u.i, i64 0, i32 0
46  store double %add, double* %__f.i, align 8
47  %tmp = bitcast double %add to i64
48  %tmp1 = lshr i64 %tmp, 63
49  %shr.i = trunc i64 %tmp1 to i32
50  ret i32 %shr.i
51}
52
53define i32 @float_signbit(float %f1) nounwind uwtable readnone ssp {
54; CHECK-LABEL: float_signbit:
55; CHECK:       ## %bb.0: ## %entry
56; CHECK-NEXT:    movss %xmm0, -{{[0-9]+}}(%rsp)
57; CHECK-NEXT:    movss %xmm0, -{{[0-9]+}}(%rsp)
58; CHECK-NEXT:    movmskps %xmm0, %eax
59; CHECK-NEXT:    andl $1, %eax
60; CHECK-NEXT:    retq
61entry:
62  %__x.addr.i = alloca float, align 4
63  %__u.i = alloca %union.anon, align 4
64  %0 = bitcast float* %__x.addr.i to i8*
65  %1 = bitcast %union.anon* %__u.i to i8*
66  store float %f1, float* %__x.addr.i, align 4
67  %__f.i = getelementptr inbounds %union.anon, %union.anon* %__u.i, i64 0, i32 0
68  store float %f1, float* %__f.i, align 4
69  %2 = bitcast float %f1 to i32
70  %shr.i = lshr i32 %2, 31
71  ret i32 %shr.i
72}
73
74define i32 @float_add_signbit(float %f1, float %f2) nounwind uwtable readnone ssp {
75; CHECK-LABEL: float_add_signbit:
76; CHECK:       ## %bb.0: ## %entry
77; CHECK-NEXT:    addss %xmm1, %xmm0
78; CHECK-NEXT:    movss %xmm0, -{{[0-9]+}}(%rsp)
79; CHECK-NEXT:    movss %xmm0, -{{[0-9]+}}(%rsp)
80; CHECK-NEXT:    movmskps %xmm0, %eax
81; CHECK-NEXT:    andl $1, %eax
82; CHECK-NEXT:    retq
83entry:
84  %__x.addr.i = alloca float, align 4
85  %__u.i = alloca %union.anon, align 4
86  %add = fadd float %f1, %f2
87  %0 = bitcast float* %__x.addr.i to i8*
88  %1 = bitcast %union.anon* %__u.i to i8*
89  store float %add, float* %__x.addr.i, align 4
90  %__f.i = getelementptr inbounds %union.anon, %union.anon* %__u.i, i64 0, i32 0
91  store float %add, float* %__f.i, align 4
92  %2 = bitcast float %add to i32
93  %shr.i = lshr i32 %2, 31
94  ret i32 %shr.i
95}
96
97; PR11570
98define void @float_call_signbit(double %n) {
99; CHECK-LABEL: float_call_signbit:
100; CHECK:       ## %bb.0: ## %entry
101; CHECK-NEXT:    movmskpd %xmm0, %edi
102; CHECK-NEXT:    andl $1, %edi
103; CHECK-NEXT:    jmp _float_call_signbit_callee ## TAILCALL
104entry:
105  %t0 = bitcast double %n to i64
106  %tobool.i.i.i.i = icmp slt i64 %t0, 0
107  tail call void @float_call_signbit_callee(i1 zeroext %tobool.i.i.i.i)
108  ret void
109}
110declare void @float_call_signbit_callee(i1 zeroext)
111
112; Known zeros
113define i32 @knownbits_v2f64(<2 x double> %x) {
114; CHECK-LABEL: knownbits_v2f64:
115; CHECK:       ## %bb.0:
116; CHECK-NEXT:    movmskpd %xmm0, %eax
117; CHECK-NEXT:    retq
118  %1 = tail call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %x)
119  %2 = and i32 %1, 3
120  ret i32 %2
121}
122
123; Don't demand any movmsk signbits -> zero
124define i32 @demandedbits_v16i8(<16 x i8> %x) {
125; CHECK-LABEL: demandedbits_v16i8:
126; CHECK:       ## %bb.0:
127; CHECK-NEXT:    xorl %eax, %eax
128; CHECK-NEXT:    retq
129  %1 = tail call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %x)
130  %2 = and i32 %1, 65536
131  ret i32 %2
132}
133
134; Simplify demanded vector elts
135define i32 @demandedelts_v4f32(<4 x float> %x) {
136; CHECK-LABEL: demandedelts_v4f32:
137; CHECK:       ## %bb.0:
138; CHECK-NEXT:    movmskps %xmm0, %eax
139; CHECK-NEXT:    andl $1, %eax
140; CHECK-NEXT:    retq
141  %1 = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> zeroinitializer
142  %2 = tail call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %1)
143  %3 = and i32 %2, 1
144  ret i32 %3
145}
146
147; rdar://10247336
148; movmskp{s|d} only set low 4/2 bits, high bits are known zero
149
150define i32 @t1(<4 x float> %x, i32* nocapture %indexTable) nounwind uwtable readonly ssp {
151; CHECK-LABEL: t1:
152; CHECK:       ## %bb.0: ## %entry
153; CHECK-NEXT:    movmskps %xmm0, %eax
154; CHECK-NEXT:    movl (%rdi,%rax,4), %eax
155; CHECK-NEXT:    retq
156entry:
157  %0 = tail call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %x) nounwind
158  %idxprom = sext i32 %0 to i64
159  %arrayidx = getelementptr inbounds i32, i32* %indexTable, i64 %idxprom
160  %1 = load i32, i32* %arrayidx, align 4
161  ret i32 %1
162}
163
164define i32 @t2(<4 x float> %x, i32* nocapture %indexTable) nounwind uwtable readonly ssp {
165; CHECK-LABEL: t2:
166; CHECK:       ## %bb.0: ## %entry
167; CHECK-NEXT:    movmskpd %xmm0, %eax
168; CHECK-NEXT:    movl (%rdi,%rax,4), %eax
169; CHECK-NEXT:    retq
170entry:
171  %0 = bitcast <4 x float> %x to <2 x double>
172  %1 = tail call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %0) nounwind
173  %idxprom = sext i32 %1 to i64
174  %arrayidx = getelementptr inbounds i32, i32* %indexTable, i64 %idxprom
175  %2 = load i32, i32* %arrayidx, align 4
176  ret i32 %2
177}
178
179declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>) nounwind readnone
180declare i32 @llvm.x86.sse2.movmsk.pd(<2 x double>) nounwind readnone
181declare i32 @llvm.x86.sse.movmsk.ps(<4 x float>) nounwind readnone
182