• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=x86_64-linux-gnu                                  -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefixes=CHECK,SSE
3; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx                      -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefixes=CHECK,AVX
4; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f                  -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefixes=CHECK,AVX
5; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefixes=CHECK,AVX
6
7define i64 @test_sub_i64(i64 %arg1, i64 %arg2) {
8; CHECK-LABEL: test_sub_i64:
9; CHECK:       # %bb.0:
10; CHECK-NEXT:    movq %rdi, %rax
11; CHECK-NEXT:    subq %rsi, %rax
12; CHECK-NEXT:    retq
13  %ret = sub i64 %arg1, %arg2
14  ret i64 %ret
15}
16
17define i32 @test_sub_i32(i32 %arg1, i32 %arg2) {
18; CHECK-LABEL: test_sub_i32:
19; CHECK:       # %bb.0:
20; CHECK-NEXT:    movl %edi, %eax
21; CHECK-NEXT:    subl %esi, %eax
22; CHECK-NEXT:    retq
23  %ret = sub i32 %arg1, %arg2
24  ret i32 %ret
25}
26
27define float @test_add_float(float %arg1, float %arg2) {
28; SSE-LABEL: test_add_float:
29; SSE:       # %bb.0:
30; SSE-NEXT:    addss %xmm1, %xmm0
31; SSE-NEXT:    retq
32;
33; AVX-LABEL: test_add_float:
34; AVX:       # %bb.0:
35; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
36; AVX-NEXT:    retq
37  %ret = fadd float %arg1, %arg2
38  ret float %ret
39}
40
41define double @test_add_double(double %arg1, double %arg2) {
42; SSE-LABEL: test_add_double:
43; SSE:       # %bb.0:
44; SSE-NEXT:    addsd %xmm1, %xmm0
45; SSE-NEXT:    retq
46;
47; AVX-LABEL: test_add_double:
48; AVX:       # %bb.0:
49; AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
50; AVX-NEXT:    retq
51  %ret = fadd double %arg1, %arg2
52  ret double %ret
53}
54
55define float @test_sub_float(float %arg1, float %arg2) {
56; SSE-LABEL: test_sub_float:
57; SSE:       # %bb.0:
58; SSE-NEXT:    subss %xmm1, %xmm0
59; SSE-NEXT:    retq
60;
61; AVX-LABEL: test_sub_float:
62; AVX:       # %bb.0:
63; AVX-NEXT:    vsubss %xmm1, %xmm0, %xmm0
64; AVX-NEXT:    retq
65  %ret = fsub float %arg1, %arg2
66  ret float %ret
67}
68
69define double @test_sub_double(double %arg1, double %arg2) {
70; SSE-LABEL: test_sub_double:
71; SSE:       # %bb.0:
72; SSE-NEXT:    subsd %xmm1, %xmm0
73; SSE-NEXT:    retq
74;
75; AVX-LABEL: test_sub_double:
76; AVX:       # %bb.0:
77; AVX-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
78; AVX-NEXT:    retq
79  %ret = fsub double %arg1, %arg2
80  ret double %ret
81}
82
83define <4 x i32>  @test_add_v4i32(<4 x i32> %arg1, <4 x i32>  %arg2) {
84; SSE-LABEL: test_add_v4i32:
85; SSE:       # %bb.0:
86; SSE-NEXT:    paddd %xmm1, %xmm0
87; SSE-NEXT:    retq
88;
89; AVX-LABEL: test_add_v4i32:
90; AVX:       # %bb.0:
91; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
92; AVX-NEXT:    retq
93  %ret = add <4 x i32>  %arg1, %arg2
94  ret <4 x i32>  %ret
95}
96
97define <4 x i32>  @test_sub_v4i32(<4 x i32> %arg1, <4 x i32>  %arg2) {
98; SSE-LABEL: test_sub_v4i32:
99; SSE:       # %bb.0:
100; SSE-NEXT:    psubd %xmm1, %xmm0
101; SSE-NEXT:    retq
102;
103; AVX-LABEL: test_sub_v4i32:
104; AVX:       # %bb.0:
105; AVX-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
106; AVX-NEXT:    retq
107  %ret = sub <4 x i32>  %arg1, %arg2
108  ret <4 x i32>  %ret
109}
110
111define <4 x float>  @test_add_v4f32(<4 x float> %arg1, <4 x float>  %arg2) {
112; SSE-LABEL: test_add_v4f32:
113; SSE:       # %bb.0:
114; SSE-NEXT:    addps %xmm1, %xmm0
115; SSE-NEXT:    retq
116;
117; AVX-LABEL: test_add_v4f32:
118; AVX:       # %bb.0:
119; AVX-NEXT:    vaddps %xmm1, %xmm0, %xmm0
120; AVX-NEXT:    retq
121  %ret = fadd <4 x float>  %arg1, %arg2
122  ret <4 x float>  %ret
123}
124
125define <4 x float>  @test_sub_v4f32(<4 x float> %arg1, <4 x float>  %arg2) {
126; SSE-LABEL: test_sub_v4f32:
127; SSE:       # %bb.0:
128; SSE-NEXT:    subps %xmm1, %xmm0
129; SSE-NEXT:    retq
130;
131; AVX-LABEL: test_sub_v4f32:
132; AVX:       # %bb.0:
133; AVX-NEXT:    vsubps %xmm1, %xmm0, %xmm0
134; AVX-NEXT:    retq
135  %ret = fsub <4 x float>  %arg1, %arg2
136  ret <4 x float>  %ret
137}
138
139define i32  @test_copy_float(float %val) {
140; SSE-LABEL: test_copy_float:
141; SSE:       # %bb.0:
142; SSE-NEXT:    movd %xmm0, %eax
143; SSE-NEXT:    retq
144;
145; AVX-LABEL: test_copy_float:
146; AVX:       # %bb.0:
147; AVX-NEXT:    vmovd %xmm0, %eax
148; AVX-NEXT:    retq
149  %r = bitcast float %val to i32
150  ret i32 %r
151}
152
153define float  @test_copy_i32(i32 %val) {
154; SSE-LABEL: test_copy_i32:
155; SSE:       # %bb.0:
156; SSE-NEXT:    movd %edi, %xmm0
157; SSE-NEXT:    retq
158;
159; AVX-LABEL: test_copy_i32:
160; AVX:       # %bb.0:
161; AVX-NEXT:    vmovd %edi, %xmm0
162; AVX-NEXT:    retq
163  %r = bitcast i32 %val to float
164  ret float %r
165}
166
167