• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -mtriple=x86_64-apple-darwin -mcpu skx < %s | FileCheck %s
2; This test compliments the .c test under clang/test/CodeGen/. We check
3; if the inline asm constraints are respected in the generated code.
4
5; Function Attrs: nounwind
6define void @f_Ym(i64 %m.coerce) {
7; Any mmx regiter constraint
8; CHECK-LABEL: f_Ym:
9; CHECK:         ## InlineAsm Start
10; CHECK-NEXT:    movq %mm{{[0-9]+}}, %mm1
11; CHECK:         ## InlineAsm End
12
13entry:
14  %0 = tail call x86_mmx asm sideeffect "movq $0, %mm1\0A\09", "=^Ym,~{dirflag},~{fpsr},~{flags}"()
15  ret void
16}
17
18; Function Attrs: nounwind
19define void @f_Yi(<4 x float> %x, <4 x float> %y, <4 x float> %z) {
20; Any SSE register when SSE2 is enabled (GCC when inter-unit moves enabled)
21; CHECK-LABEL: f_Yi:
22; CHECK:         ## InlineAsm Start
23; CHECK-NEXT:    vpaddq %xmm{{[0-9]+}}, %xmm{{[0-9]+}}, %xmm{{[0-9]+}}
24; CHECK:         ## InlineAsm End
25
26entry:
27  %0 = tail call <4 x float> asm sideeffect "vpaddq $0, $1, $2\0A\09", "=^Yi,^Yi,^Yi,~{dirflag},~{fpsr},~{flags}"(<4 x float> %y, <4 x float> %z)
28  ret void
29}
30
31; Function Attrs: nounwind
32define void @f_Yt(<4 x float> %x, <4 x float> %y, <4 x float> %z) {
33; Any SSE register when SSE2 is enabled
34; CHECK-LABEL: f_Yt:
35; CHECK:         ## InlineAsm Start
36; CHECK-NEXT:    vpaddq %xmm{{[0-9]+}}, %xmm{{[0-9]+}}, %xmm{{[0-9]+}}
37; CHECK:         ## InlineAsm End
38
39entry:
40  %0 = tail call <4 x float> asm sideeffect "vpaddq $0, $1, $2\0A\09", "=^Yt,^Yt,^Yt,~{dirflag},~{fpsr},~{flags}"(<4 x float> %y, <4 x float> %z)
41  ret void
42}
43
44; Function Attrs: nounwind
45define void @f_Y2(<4 x float> %x, <4 x float> %y, <4 x float> %z) {
46; Any SSE register when SSE2 is enabled
47; CHECK-LABEL: f_Y2:
48; CHECK:         ## InlineAsm Start
49; CHECK-NEXT:    vpaddq %xmm{{[0-9]+}}, %xmm{{[0-9]+}}, %xmm{{[0-9]+}}
50; CHECK:         ## InlineAsm End
51
52entry:
53  %0 = tail call <4 x float> asm sideeffect "vpaddq $0, $1, $2\0A\09", "=^Y2,^Y2,^Y2,~{dirflag},~{fpsr},~{flags}"(<4 x float> %y, <4 x float> %z)
54  ret void
55}
56
57; Function Attrs: nounwind
58define void @f_Yz(<4 x float> %x, <4 x float> %y, <4 x float> %z) {
59; xmm0 SSE register(GCC)
60; CHECK-LABEL: f_Yz:
61; CHECK:         ## InlineAsm Start
62; CHECK-NEXT:    vpaddq %xmm{{[0-9]+}}, %xmm{{[0-9]+}}, %xmm0
63; CHECK-NEXT:    vpaddq %xmm0, %xmm{{[0-9]+}}, %xmm{{[0-9]+}}
64; CHECK:         ## InlineAsm End
65entry:
66  %0 = tail call { <4 x float>, <4 x float> } asm sideeffect "vpaddq $0,$2,$1\0A\09vpaddq $1,$0,$2\0A\09", "=^Yi,=^Yz,^Yi,0,~{dirflag},~{fpsr},~{flags}"(<4 x float> %y, <4 x float> %z)
67  ret void
68}
69
70; Function Attrs: nounwind
71define void @f_Y0(<4 x float> %x, <4 x float> %y, <4 x float> %z) {
72; xmm0 SSE register
73; CHECK-LABEL: f_Y0:
74; CHECK:         ## InlineAsm Start
75; CHECK-NEXT:    vpaddq %xmm{{[0-9]+}}, %xmm{{[0-9]+}}, %xmm0
76; CHECK-NEXT:    vpaddq %xmm0, %xmm{{[0-9]+}}, %xmm{{[0-9]+}}
77; CHECK:         ## InlineAsm End
78
79entry:
80  %0 = tail call { <4 x float>, <4 x float> } asm sideeffect "vpaddq $0,$2,$1\0A\09vpaddq $1,$0,$2\0A\09", "=^Yi,=^Y0,^Yi,0,~{dirflag},~{fpsr},~{flags}"(<4 x float> %y, <4 x float> %z)
81  ret void
82}
83
84