• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -mtriple=x86_64-linux-gnu                                  -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=SSE
3# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx                      -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=AVX
4# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f                  -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=AVX512ALL --check-prefix=AVX512F
5# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512ALL --check-prefix=AVX512VL
6--- |
7
8  define float @test_fdiv_float(float %arg1, float %arg2) {
9    %ret = fdiv float %arg1, %arg2
10    ret float %ret
11  }
12
13  define double @test_fdiv_double(double %arg1, double %arg2) {
14    %ret = fdiv double %arg1, %arg2
15    ret double %ret
16  }
17
18...
19---
20name:            test_fdiv_float
21alignment:       4
22legalized:       true
23regBankSelected: true
24#
25registers:
26  - { id: 0, class: vecr, preferred-register: '' }
27  - { id: 1, class: vecr, preferred-register: '' }
28  - { id: 2, class: vecr, preferred-register: '' }
29  - { id: 3, class: vecr, preferred-register: '' }
30  - { id: 4, class: vecr, preferred-register: '' }
31  - { id: 5, class: vecr, preferred-register: '' }
32liveins:
33fixedStack:
34stack:
35constants:
36#
37#
38body:             |
39  bb.1 (%ir-block.0):
40    liveins: $xmm0, $xmm1
41
42    ; SSE-LABEL: name: test_fdiv_float
43    ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
44    ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
45    ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
46    ; SSE: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
47    ; SSE: [[DIVSSrr:%[0-9]+]]:fr32 = DIVSSrr [[COPY1]], [[COPY3]]
48    ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[DIVSSrr]]
49    ; SSE: $xmm0 = COPY [[COPY4]]
50    ; SSE: RET 0, implicit $xmm0
51    ; AVX-LABEL: name: test_fdiv_float
52    ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
53    ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
54    ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
55    ; AVX: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
56    ; AVX: [[VDIVSSrr:%[0-9]+]]:fr32 = VDIVSSrr [[COPY1]], [[COPY3]]
57    ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VDIVSSrr]]
58    ; AVX: $xmm0 = COPY [[COPY4]]
59    ; AVX: RET 0, implicit $xmm0
60    ; AVX512F-LABEL: name: test_fdiv_float
61    ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
62    ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
63    ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
64    ; AVX512F: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
65    ; AVX512F: [[VDIVSSZrr:%[0-9]+]]:fr32x = VDIVSSZrr [[COPY1]], [[COPY3]]
66    ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VDIVSSZrr]]
67    ; AVX512F: $xmm0 = COPY [[COPY4]]
68    ; AVX512F: RET 0, implicit $xmm0
69    ; AVX512VL-LABEL: name: test_fdiv_float
70    ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
71    ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
72    ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
73    ; AVX512VL: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
74    ; AVX512VL: [[VDIVSSZrr:%[0-9]+]]:fr32x = VDIVSSZrr [[COPY1]], [[COPY3]]
75    ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VDIVSSZrr]]
76    ; AVX512VL: $xmm0 = COPY [[COPY4]]
77    ; AVX512VL: RET 0, implicit $xmm0
78    %2:vecr(s128) = COPY $xmm0
79    %0:vecr(s32) = G_TRUNC %2(s128)
80    %3:vecr(s128) = COPY $xmm1
81    %1:vecr(s32) = G_TRUNC %3(s128)
82    %4:vecr(s32) = G_FDIV %0, %1
83    %5:vecr(s128) = G_ANYEXT %4(s32)
84    $xmm0 = COPY %5(s128)
85    RET 0, implicit $xmm0
86
87...
88---
89name:            test_fdiv_double
90alignment:       4
91legalized:       true
92regBankSelected: true
93#
94registers:
95  - { id: 0, class: vecr, preferred-register: '' }
96  - { id: 1, class: vecr, preferred-register: '' }
97  - { id: 2, class: vecr, preferred-register: '' }
98  - { id: 3, class: vecr, preferred-register: '' }
99  - { id: 4, class: vecr, preferred-register: '' }
100  - { id: 5, class: vecr, preferred-register: '' }
101liveins:
102fixedStack:
103stack:
104constants:
105#
106#
107body:             |
108  bb.1 (%ir-block.0):
109    liveins: $xmm0, $xmm1
110
111    ; SSE-LABEL: name: test_fdiv_double
112    ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
113    ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
114    ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
115    ; SSE: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
116    ; SSE: [[DIVSDrr:%[0-9]+]]:fr64 = DIVSDrr [[COPY1]], [[COPY3]]
117    ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[DIVSDrr]]
118    ; SSE: $xmm0 = COPY [[COPY4]]
119    ; SSE: RET 0, implicit $xmm0
120    ; AVX-LABEL: name: test_fdiv_double
121    ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
122    ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
123    ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
124    ; AVX: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
125    ; AVX: [[VDIVSDrr:%[0-9]+]]:fr64 = VDIVSDrr [[COPY1]], [[COPY3]]
126    ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VDIVSDrr]]
127    ; AVX: $xmm0 = COPY [[COPY4]]
128    ; AVX: RET 0, implicit $xmm0
129    ; AVX512F-LABEL: name: test_fdiv_double
130    ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
131    ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
132    ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
133    ; AVX512F: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
134    ; AVX512F: [[VDIVSDZrr:%[0-9]+]]:fr64x = VDIVSDZrr [[COPY1]], [[COPY3]]
135    ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VDIVSDZrr]]
136    ; AVX512F: $xmm0 = COPY [[COPY4]]
137    ; AVX512F: RET 0, implicit $xmm0
138    ; AVX512VL-LABEL: name: test_fdiv_double
139    ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
140    ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
141    ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
142    ; AVX512VL: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
143    ; AVX512VL: [[VDIVSDZrr:%[0-9]+]]:fr64x = VDIVSDZrr [[COPY1]], [[COPY3]]
144    ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VDIVSDZrr]]
145    ; AVX512VL: $xmm0 = COPY [[COPY4]]
146    ; AVX512VL: RET 0, implicit $xmm0
147    %2:vecr(s128) = COPY $xmm0
148    %0:vecr(s64) = G_TRUNC %2(s128)
149    %3:vecr(s128) = COPY $xmm1
150    %1:vecr(s64) = G_TRUNC %3(s128)
151    %4:vecr(s64) = G_FDIV %0, %1
152    %5:vecr(s128) = G_ANYEXT %4(s64)
153    $xmm0 = COPY %5(s128)
154    RET 0, implicit $xmm0
155
156...
157