• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=x86_64-linux-gnu                                  -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE
3; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx                      -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=AVX
4; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f                  -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=AVX512F
5; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=AVX512VL
6
7define i64 @test_sub_i64(i64 %arg1, i64 %arg2) {
8; ALL-LABEL: test_sub_i64:
9; ALL:       # %bb.0:
10; ALL-NEXT:    subq %rsi, %rdi
11; ALL-NEXT:    movq %rdi, %rax
12; ALL-NEXT:    retq
13  %ret = sub i64 %arg1, %arg2
14  ret i64 %ret
15}
16
17define i32 @test_sub_i32(i32 %arg1, i32 %arg2) {
18; ALL-LABEL: test_sub_i32:
19; ALL:       # %bb.0:
20; ALL-NEXT:    subl %esi, %edi
21; ALL-NEXT:    movl %edi, %eax
22; ALL-NEXT:    retq
23  %ret = sub i32 %arg1, %arg2
24  ret i32 %ret
25}
26
27define float @test_add_float(float %arg1, float %arg2) {
28; SSE-LABEL: test_add_float:
29; SSE:       # %bb.0:
30; SSE-NEXT:    addss %xmm1, %xmm0
31; SSE-NEXT:    retq
32;
33; ALL_AVX-LABEL: test_add_float:
34; ALL_AVX:       # %bb.0:
35; ALL_AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
36; ALL_AVX-NEXT:    retq
37  %ret = fadd float %arg1, %arg2
38  ret float %ret
39}
40
41define double @test_add_double(double %arg1, double %arg2) {
42; SSE-LABEL: test_add_double:
43; SSE:       # %bb.0:
44; SSE-NEXT:    addsd %xmm1, %xmm0
45; SSE-NEXT:    retq
46;
47; ALL_AVX-LABEL: test_add_double:
48; ALL_AVX:       # %bb.0:
49; ALL_AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
50; ALL_AVX-NEXT:    retq
51  %ret = fadd double %arg1, %arg2
52  ret double %ret
53}
54
55define float @test_sub_float(float %arg1, float %arg2) {
56; SSE-LABEL: test_sub_float:
57; SSE:       # %bb.0:
58; SSE-NEXT:    subss %xmm1, %xmm0
59; SSE-NEXT:    retq
60;
61; ALL_AVX-LABEL: test_sub_float:
62; ALL_AVX:       # %bb.0:
63; ALL_AVX-NEXT:    vsubss %xmm1, %xmm0, %xmm0
64; ALL_AVX-NEXT:    retq
65  %ret = fsub float %arg1, %arg2
66  ret float %ret
67}
68
69define double @test_sub_double(double %arg1, double %arg2) {
70; SSE-LABEL: test_sub_double:
71; SSE:       # %bb.0:
72; SSE-NEXT:    subsd %xmm1, %xmm0
73; SSE-NEXT:    retq
74;
75; ALL_AVX-LABEL: test_sub_double:
76; ALL_AVX:       # %bb.0:
77; ALL_AVX-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
78; ALL_AVX-NEXT:    retq
79  %ret = fsub double %arg1, %arg2
80  ret double %ret
81}
82
83define <4 x i32>  @test_add_v4i32(<4 x i32> %arg1, <4 x i32>  %arg2) {
84; SSE-LABEL: test_add_v4i32:
85; SSE:       # %bb.0:
86; SSE-NEXT:    paddd %xmm1, %xmm0
87; SSE-NEXT:    retq
88;
89; ALL_AVX-LABEL: test_add_v4i32:
90; ALL_AVX:       # %bb.0:
91; ALL_AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
92; ALL_AVX-NEXT:    retq
93  %ret = add <4 x i32>  %arg1, %arg2
94  ret <4 x i32>  %ret
95}
96
97define <4 x i32>  @test_sub_v4i32(<4 x i32> %arg1, <4 x i32>  %arg2) {
98; SSE-LABEL: test_sub_v4i32:
99; SSE:       # %bb.0:
100; SSE-NEXT:    psubd %xmm1, %xmm0
101; SSE-NEXT:    retq
102;
103; ALL_AVX-LABEL: test_sub_v4i32:
104; ALL_AVX:       # %bb.0:
105; ALL_AVX-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
106; ALL_AVX-NEXT:    retq
107  %ret = sub <4 x i32>  %arg1, %arg2
108  ret <4 x i32>  %ret
109}
110
111define <4 x float>  @test_add_v4f32(<4 x float> %arg1, <4 x float>  %arg2) {
112; SSE-LABEL: test_add_v4f32:
113; SSE:       # %bb.0:
114; SSE-NEXT:    addps %xmm1, %xmm0
115; SSE-NEXT:    retq
116;
117; ALL_AVX-LABEL: test_add_v4f32:
118; ALL_AVX:       # %bb.0:
119; ALL_AVX-NEXT:    vaddps %xmm1, %xmm0, %xmm0
120; ALL_AVX-NEXT:    retq
121  %ret = fadd <4 x float>  %arg1, %arg2
122  ret <4 x float>  %ret
123}
124
125define <4 x float>  @test_sub_v4f32(<4 x float> %arg1, <4 x float>  %arg2) {
126; SSE-LABEL: test_sub_v4f32:
127; SSE:       # %bb.0:
128; SSE-NEXT:    subps %xmm1, %xmm0
129; SSE-NEXT:    retq
130;
131; ALL_AVX-LABEL: test_sub_v4f32:
132; ALL_AVX:       # %bb.0:
133; ALL_AVX-NEXT:    vsubps %xmm1, %xmm0, %xmm0
134; ALL_AVX-NEXT:    retq
135  %ret = fsub <4 x float>  %arg1, %arg2
136  ret <4 x float>  %ret
137}
138
139define i32  @test_copy_float(float %val) {
140; SSE-LABEL: test_copy_float:
141; SSE:       # %bb.0:
142; SSE-NEXT:    movd %xmm0, %eax
143; SSE-NEXT:    retq
144;
145; ALL_AVX-LABEL: test_copy_float:
146; ALL_AVX:       # %bb.0:
147; ALL_AVX-NEXT:    vmovd %xmm0, %eax
148; ALL_AVX-NEXT:    retq
149  %r = bitcast float %val to i32
150  ret i32 %r
151}
152
153define float  @test_copy_i32(i32 %val) {
154; SSE-LABEL: test_copy_i32:
155; SSE:       # %bb.0:
156; SSE-NEXT:    movd %edi, %xmm0
157; SSE-NEXT:    retq
158;
159; ALL_AVX-LABEL: test_copy_i32:
160; ALL_AVX:       # %bb.0:
161; ALL_AVX-NEXT:    vmovd %edi, %xmm0
162; ALL_AVX-NEXT:    retq
163  %r = bitcast i32 %val to float
164  ret float %r
165}
166
167