• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -mtriple=x86_64-darwin -mattr=+mmx,+sse2 | FileCheck %s
2
3define i64 @t0(x86_mmx* %p) {
4; CHECK-LABEL: t0:
5; CHECK:       ## BB#0:
6; CHECK-NEXT:    movq
7; CHECK-NEXT:    paddq %mm0, %mm0
8; CHECK-NEXT:    movd %mm0, %rax
9; CHECK-NEXT:    retq
10  %t = load x86_mmx, x86_mmx* %p
11  %u = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %t, x86_mmx %t)
12  %s = bitcast x86_mmx %u to i64
13  ret i64 %s
14}
15
16define i64 @t1(x86_mmx* %p) {
17; CHECK-LABEL: t1:
18; CHECK:       ## BB#0:
19; CHECK-NEXT:    movq
20; CHECK-NEXT:    paddd %mm0, %mm0
21; CHECK-NEXT:    movd %mm0, %rax
22; CHECK-NEXT:    retq
23  %t = load x86_mmx, x86_mmx* %p
24  %u = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %t, x86_mmx %t)
25  %s = bitcast x86_mmx %u to i64
26  ret i64 %s
27}
28
29define i64 @t2(x86_mmx* %p) {
30; CHECK-LABEL: t2:
31; CHECK:       ## BB#0:
32; CHECK-NEXT:    movq
33; CHECK-NEXT:    paddw %mm0, %mm0
34; CHECK-NEXT:    movd %mm0, %rax
35; CHECK-NEXT:    retq
36  %t = load x86_mmx, x86_mmx* %p
37  %u = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %t, x86_mmx %t)
38  %s = bitcast x86_mmx %u to i64
39  ret i64 %s
40}
41
42define i64 @t3(x86_mmx* %p) {
43; CHECK-LABEL: t3:
44; CHECK:       ## BB#0:
45; CHECK-NEXT:    movq
46; CHECK-NEXT:    paddb %mm0, %mm0
47; CHECK-NEXT:    movd %mm0, %rax
48; CHECK-NEXT:    retq
49  %t = load x86_mmx, x86_mmx* %p
50  %u = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %t, x86_mmx %t)
51  %s = bitcast x86_mmx %u to i64
52  ret i64 %s
53}
54
55@R = external global x86_mmx
56
57define void @t4(<1 x i64> %A, <1 x i64> %B) {
58; CHECK-LABEL: t4:
59; CHECK:       ## BB#0: ## %entry
60; CHECK-NEXT:    movd
61; CHECK-NEXT:    movd
62; CHECK:    retq
63entry:
64  %tmp2 = bitcast <1 x i64> %A to x86_mmx
65  %tmp3 = bitcast <1 x i64> %B to x86_mmx
66  %tmp7 = tail call x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx %tmp2, x86_mmx %tmp3)
67  store x86_mmx %tmp7, x86_mmx* @R
68  tail call void @llvm.x86.mmx.emms()
69  ret void
70}
71
72define i64 @t5(i32 %a, i32 %b) nounwind readnone {
73; CHECK-LABEL: t5:
74; CHECK:       ## BB#0:
75; CHECK-NEXT:    movd
76; CHECK-NEXT:    movd
77; CHECK-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
78; CHECK-NEXT:    movd %xmm1, %rax
79; CHECK-NEXT:    retq
80  %v0 = insertelement <2 x i32> undef, i32 %a, i32 0
81  %v1 = insertelement <2 x i32> %v0, i32 %b, i32 1
82  %conv = bitcast <2 x i32> %v1 to i64
83  ret i64 %conv
84}
85
86declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32)
87
88define <1 x i64> @t6(i64 %t) {
89; CHECK-LABEL: t6:
90; CHECK:       ## BB#0:
91; CHECK-NEXT:    movd
92; CHECK-NEXT:    psllq $48, %mm0
93; CHECK-NEXT:    movd %mm0, %rax
94; CHECK-NEXT:    retq
95  %t1 = insertelement <1 x i64> undef, i64 %t, i32 0
96  %t0 = bitcast <1 x i64> %t1 to x86_mmx
97  %t2 = tail call x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %t0, i32 48)
98  %t3 = bitcast x86_mmx %t2 to <1 x i64>
99  ret <1 x i64> %t3
100}
101
102declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx)
103declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx)
104declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
105declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
106declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
107declare void @llvm.x86.mmx.emms()
108
109