• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
2; RUN:    -check-prefixes=ALL,M2,M2-M3
3; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
4; RUN:    -check-prefixes=ALL,CMOV,CMOV-32,CMOV-32R1
5; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
6; RUN:    -check-prefixes=ALL,CMOV,CMOV-32,CMOV-32R2-R5
7; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \
8; RUN:    -check-prefixes=ALL,CMOV,CMOV-32,CMOV-32R2-R5
9; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s \
10; RUN:    -check-prefixes=ALL,CMOV,CMOV-32,CMOV-32R2-R5
11; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
12; RUN:    -check-prefixes=ALL,SEL-32,32R6
13; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
14; RUN:    -check-prefixes=ALL,M3,M2-M3
15; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
16; RUN:    -check-prefixes=ALL,CMOV,CMOV-64
17; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
18; RUN:    -check-prefixes=ALL,CMOV,CMOV-64
19; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
20; RUN:    -check-prefixes=ALL,CMOV,CMOV-64
21; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
22; RUN:    -check-prefixes=ALL,CMOV,CMOV-64
23; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
24; RUN:    -check-prefixes=ALL,CMOV,CMOV-64
25; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
26; RUN:    -check-prefixes=ALL,SEL-64,64R6
27; RUN: llc < %s -march=mips -mcpu=mips32r3 -mattr=+micromips | FileCheck %s \
28; RUN:    -check-prefixes=ALL,MM32R3
29; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips | FileCheck %s \
30; RUN:    -check-prefixes=ALL,MM32R6,SEL-32
31
32define double @tst_select_i1_double(i1 signext %s, double %x, double %y) {
33entry:
34  ; ALL-LABEL: tst_select_i1_double:
35
36  ; M2:         andi    $[[T0:[0-9]+]], $4, 1
37  ; M2:         bnez    $[[T0]], $[[BB0:BB[0-9_]+]]
38  ; M2:         nop
39  ; M2:         ldc1    $f0, 16($sp)
40  ; M2:         jr      $ra
41  ; M2:         nop
42  ; M2:         $[[BB0]]:
43  ; M2:         mtc1    $7, $f0
44  ; M2:         jr      $ra
45  ; M2:         mtc1    $6, $f1
46
47  ; CMOV-32:      mtc1    $7, $[[F0:f[0-9]+]]
48  ; CMOV-32R1:    mtc1    $6, $f{{[0-9]+}}
49  ; CMOV-32R2-R5: mthc1   $6, $[[F0]]
50  ; CMOV-32:      andi    $[[T0:[0-9]+]], $4, 1
51  ; CMOV-32:      ldc1    $f0, 16($sp)
52  ; CMOV-32:      movn.d  $f0, $[[F0]], $[[T0]]
53
54  ; SEL-32:     mtc1    $7, $[[F0:f[0-9]+]]
55  ; SEL-32:     mthc1   $6, $[[F0]]
56  ; SEL-32:     ldc1    $[[F1:f[0-9]+]], 16($sp)
57  ; SEL-32:     mtc1    $4, $f0
58  ; SEL-32:     sel.d   $f0, $[[F1]], $[[F0]]
59
60  ; M3:         andi    $[[T0:[0-9]+]], $4, 1
61  ; M3:         bnez    $[[T0]], $[[BB0:BB[0-9_]+]]
62  ; M3:         nop
63  ; M3:         mov.d   $f13, $f14
64  ; M3:         $[[BB0]]:
65  ; M3:         jr      $ra
66  ; M3:         mov.d   $f0, $f13
67
68  ; CMOV-64:    andi    $[[T0:[0-9]+]], $4, 1
69  ; CMOV-64:    movn.d  $f14, $f13, $[[T0]]
70  ; CMOV-64:    mov.d   $f0, $f14
71
72  ; SEL-64:     mtc1    $4, $f0
73  ; SEL-64:     sel.d   $f0, $f14, $f13
74
75  ; MM32R3:     mtc1    $7, $[[F0:f[0-9]+]]
76  ; MM32R3:     mthc1   $6, $[[F0]]
77  ; MM32R3:     andi16  $[[T0:[0-9]+]], $4, 1
78  ; MM32R3:     ldc1    $f0, 16($sp)
79  ; MM32R3:     movn.d  $f0, $[[F0]], $[[T0]]
80
81  %r = select i1 %s, double %x, double %y
82  ret double %r
83}
84
85define double @tst_select_i1_double_reordered(double %x, double %y,
86                                              i1 signext %s) {
87entry:
88  ; ALL-LABEL: tst_select_i1_double_reordered:
89
90  ; M2:         lw      $[[T0:[0-9]+]], 16($sp)
91  ; M2:         andi    $[[T1:[0-9]+]], $[[T0]], 1
92  ; M2:         bnez    $[[T1]], $[[BB0:BB[0-9_]+]]
93  ; M2:         nop
94  ; M2:         mov.d   $f12, $f14
95  ; M2:         $[[BB0]]:
96  ; M2:         jr      $ra
97  ; M2:         mov.d   $f0, $f12
98
99  ; CMOV-32:    lw      $[[T0:[0-9]+]], 16($sp)
100  ; CMOV-32:    andi    $[[T1:[0-9]+]], $[[T0]], 1
101  ; CMOV-32:    movn.d  $f14, $f12, $[[T1]]
102  ; CMOV-32:    mov.d   $f0, $f14
103
104  ; SEL-32:     lw      $[[T0:[0-9]+]], 16($sp)
105  ; SEL-32:     mtc1    $[[T0]], $f0
106  ; SEL-32:     sel.d   $f0, $f14, $f12
107
108  ; M3:         andi    $[[T0:[0-9]+]], $6, 1
109  ; M3:         bnez    $[[T0]], $[[BB0:BB[0-9_]+]]
110  ; M3:         nop
111  ; M3:         mov.d   $f12, $f13
112  ; M3:         $[[BB0]]:
113  ; M3:         jr      $ra
114  ; M3:         mov.d   $f0, $f12
115
116  ; CMOV-64:    andi    $[[T0:[0-9]+]], $6, 1
117  ; CMOV-64:    movn.d  $f13, $f12, $[[T0]]
118  ; CMOV-64:    mov.d   $f0, $f13
119
120  ; SEL-64:     mtc1    $6, $f0
121  ; SEL-64:     sel.d   $f0, $f13, $f12
122
123  ; MM32R3:     lw      $[[T0:[0-9]+]], 16($sp)
124  ; MM32R3:     andi16  $[[T1:[0-9]+]], $[[T0:[0-9]+]], 1
125  ; MM32R3:     movn.d  $f14, $f12, $[[T1]]
126  ; MM32R3:     mov.d   $f0, $f14
127
128  %r = select i1 %s, double %x, double %y
129  ret double %r
130}
131
132define double @tst_select_fcmp_olt_double(double %x, double %y) {
133entry:
134  ; ALL-LABEL: tst_select_fcmp_olt_double:
135
136  ; M2:         c.olt.d   $f12, $f14
137  ; M3:         c.olt.d   $f12, $f13
138  ; M2-M3:      bc1t      $[[BB0:BB[0-9_]+]]
139  ; M2-M3:      nop
140  ; M2:         mov.d     $f12, $f14
141  ; M3:         mov.d     $f12, $f13
142  ; M2-M3:      $[[BB0]]:
143  ; M2-M3:      jr        $ra
144  ; M2-M3:      mov.d     $f0, $f12
145
146  ; CMOV-32:    c.olt.d   $f12, $f14
147  ; CMOV-32:    movt.d    $f14, $f12, $fcc0
148  ; CMOV-32:    mov.d     $f0, $f14
149
150  ; SEL-32:     cmp.lt.d  $f0, $f12, $f14
151  ; SEL-32:     sel.d     $f0, $f14, $f12
152
153  ; CMOV-64:    c.olt.d   $f12, $f13
154  ; CMOV-64:    movt.d    $f13, $f12, $fcc0
155  ; CMOV-64:    mov.d     $f0, $f13
156
157  ; SEL-64:     cmp.lt.d  $f0, $f12, $f13
158  ; SEL-64:     sel.d     $f0, $f13, $f12
159
160  ; MM32R3:     c.olt.d   $f12, $f14
161  ; MM32R3:     movt.d    $f14, $f12, $fcc0
162  ; MM32R3:     mov.d     $f0, $f14
163
164  %s = fcmp olt double %x, %y
165  %r = select i1 %s, double %x, double %y
166  ret double %r
167}
168
169define double @tst_select_fcmp_ole_double(double %x, double %y) {
170entry:
171  ; ALL-LABEL: tst_select_fcmp_ole_double:
172
173  ; M2:         c.ole.d   $f12, $f14
174  ; M3:         c.ole.d   $f12, $f13
175  ; M2-M3:      bc1t      $[[BB0:BB[0-9_]+]]
176  ; M2-M3:      nop
177  ; M2:         mov.d     $f12, $f14
178  ; M3:         mov.d     $f12, $f13
179  ; M2-M3:      $[[BB0]]:
180  ; M2-M3:      jr        $ra
181  ; M2-M3:      mov.d     $f0, $f12
182
183  ; CMOV-32:    c.ole.d   $f12, $f14
184  ; CMOV-32:    movt.d    $f14, $f12, $fcc0
185  ; CMOV-32:    mov.d     $f0, $f14
186
187  ; SEL-32:     cmp.le.d  $f0, $f12, $f14
188  ; SEL-32:     sel.d     $f0, $f14, $f12
189
190  ; CMOV-64:    c.ole.d   $f12, $f13
191  ; CMOV-64:    movt.d    $f13, $f12, $fcc0
192  ; CMOV-64:    mov.d     $f0, $f13
193
194  ; SEL-64:     cmp.le.d  $f0, $f12, $f13
195  ; SEL-64:     sel.d     $f0, $f13, $f12
196
197  ; MM32R3:     c.ole.d   $f12, $f14
198  ; MM32R3:     movt.d    $f14, $f12, $fcc0
199  ; MM32R3:     mov.d     $f0, $f14
200
201  %s = fcmp ole double %x, %y
202  %r = select i1 %s, double %x, double %y
203  ret double %r
204}
205
206define double @tst_select_fcmp_ogt_double(double %x, double %y) {
207entry:
208  ; ALL-LABEL: tst_select_fcmp_ogt_double:
209
210  ; M2:         c.ule.d   $f12, $f14
211  ; M3:         c.ule.d   $f12, $f13
212  ; M2-M3:      bc1f      $[[BB0:BB[0-9_]+]]
213  ; M2-M3:      nop
214  ; M2:         mov.d     $f12, $f14
215  ; M3:         mov.d     $f12, $f13
216  ; M2-M3:      $[[BB0]]:
217  ; M2-M3:      jr        $ra
218  ; M2-M3:      mov.d     $f0, $f12
219
220  ; CMOV-32:    c.ule.d   $f12, $f14
221  ; CMOV-32:    movf.d    $f14, $f12, $fcc0
222  ; CMOV-32:    mov.d     $f0, $f14
223
224  ; SEL-32:     cmp.lt.d  $f0, $f14, $f12
225  ; SEL-32:     sel.d     $f0, $f14, $f12
226
227  ; CMOV-64:    c.ule.d   $f12, $f13
228  ; CMOV-64:    movf.d    $f13, $f12, $fcc0
229  ; CMOV-64:    mov.d     $f0, $f13
230
231  ; SEL-64:     cmp.lt.d  $f0, $f13, $f12
232  ; SEL-64:     sel.d     $f0, $f13, $f12
233
234  ; MM32R3:     c.ule.d   $f12, $f14
235  ; MM32R3:     movf.d    $f14, $f12, $fcc0
236  ; MM32R3:     mov.d     $f0, $f14
237
238  %s = fcmp ogt double %x, %y
239  %r = select i1 %s, double %x, double %y
240  ret double %r
241}
242
243define double @tst_select_fcmp_oge_double(double %x, double %y) {
244entry:
245  ; ALL-LABEL: tst_select_fcmp_oge_double:
246
247  ; M2:         c.ult.d   $f12, $f14
248  ; M3:         c.ult.d   $f12, $f13
249  ; M2-M3:      bc1f      $[[BB0:BB[0-9_]+]]
250  ; M2-M3:      nop
251  ; M2:         mov.d     $f12, $f14
252  ; M3:         mov.d     $f12, $f13
253  ; M2-M3:      $[[BB0]]:
254  ; M2-M3:      jr        $ra
255  ; M2-M3:      mov.d     $f0, $f12
256
257  ; CMOV-32:    c.ult.d   $f12, $f14
258  ; CMOV-32:    movf.d    $f14, $f12, $fcc0
259  ; CMOV-32:    mov.d     $f0, $f14
260
261  ; SEL-32:     cmp.le.d  $f0, $f14, $f12
262  ; SEL-32:     sel.d     $f0, $f14, $f12
263
264  ; CMOV-64:    c.ult.d   $f12, $f13
265  ; CMOV-64:    movf.d    $f13, $f12, $fcc0
266  ; CMOV-64:    mov.d     $f0, $f13
267
268  ; SEL-64:     cmp.le.d  $f0, $f13, $f12
269  ; SEL-64:     sel.d     $f0, $f13, $f12
270
271  ; MM32R3:     c.ult.d   $f12, $f14
272  ; MM32R3:     movf.d    $f14, $f12, $fcc0
273  ; MM32R3:     mov.d     $f0, $f14
274
275  %s = fcmp oge double %x, %y
276  %r = select i1 %s, double %x, double %y
277  ret double %r
278}
279
280define double @tst_select_fcmp_oeq_double(double %x, double %y) {
281entry:
282  ; ALL-LABEL: tst_select_fcmp_oeq_double:
283
284  ; M2:         c.eq.d    $f12, $f14
285  ; M3:         c.eq.d    $f12, $f13
286  ; M2-M3:      bc1t      $[[BB0:BB[0-9_]+]]
287  ; M2-M3:      nop
288  ; M2:         mov.d     $f12, $f14
289  ; M3:         mov.d     $f12, $f13
290  ; M2-M3:      $[[BB0]]:
291  ; M2-M3:      jr        $ra
292  ; M2-M3:      mov.d     $f0, $f12
293
294  ; CMOV-32:    c.eq.d    $f12, $f14
295  ; CMOV-32:    movt.d    $f14, $f12, $fcc0
296  ; CMOV-32:    mov.d     $f0, $f14
297
298  ; SEL-32:     cmp.eq.d  $f0, $f12, $f14
299  ; SEL-32:     sel.d     $f0, $f14, $f12
300
301  ; CMOV-64:    c.eq.d    $f12, $f13
302  ; CMOV-64:    movt.d    $f13, $f12, $fcc0
303  ; CMOV-64:    mov.d     $f0, $f13
304
305  ; SEL-64:     cmp.eq.d  $f0, $f12, $f13
306  ; SEL-64:     sel.d     $f0, $f13, $f12
307
308  ; MM32R3:     c.eq.d    $f12, $f14
309  ; MM32R3:     movt.d    $f14, $f12, $fcc0
310  ; MM32R3:     mov.d     $f0, $f14
311
312  %s = fcmp oeq double %x, %y
313  %r = select i1 %s, double %x, double %y
314  ret double %r
315}
316
317define double @tst_select_fcmp_one_double(double %x, double %y) {
318entry:
319  ; ALL-LABEL: tst_select_fcmp_one_double:
320
321  ; M2:         c.ueq.d   $f12, $f14
322  ; M3:         c.ueq.d   $f12, $f13
323  ; M2-M3:      bc1f      $[[BB0:BB[0-9_]+]]
324  ; M2-M3:      nop
325  ; M2:         mov.d     $f12, $f14
326  ; M3:         mov.d     $f12, $f13
327  ; M2-M3:      $[[BB0]]:
328  ; M2-M3:      jr        $ra
329  ; M2-M3:      mov.d     $f0, $f12
330
331  ; CMOV-32:    c.ueq.d   $f12, $f14
332  ; CMOV-32:    movf.d    $f14, $f12, $fcc0
333  ; CMOV-32:    mov.d     $f0, $f14
334
335  ; SEL-32:     cmp.ueq.d $f0, $f12, $f14
336  ; SEL-32:     mfc1      $[[T0:[0-9]+]], $f0
337  ; SEL-32:     not       $[[T0]], $[[T0]]
338  ; SEL-32:     mtc1      $[[T0:[0-9]+]], $f0
339  ; SEL-32:     sel.d     $f0, $f14, $f12
340
341  ; CMOV-64:    c.ueq.d   $f12, $f13
342  ; CMOV-64:    movf.d    $f13, $f12, $fcc0
343  ; CMOV-64:    mov.d     $f0, $f13
344
345  ; SEL-64:     cmp.ueq.d $f0, $f12, $f13
346  ; SEL-64:     mfc1      $[[T0:[0-9]+]], $f0
347  ; SEL-64:     not       $[[T0]], $[[T0]]
348  ; SEL-64:     mtc1      $[[T0:[0-9]+]], $f0
349  ; SEL-64:     sel.d     $f0, $f13, $f12
350
351  ; MM32R3:     c.ueq.d   $f12, $f14
352  ; MM32R3:     movf.d    $f14, $f12, $fcc0
353  ; MM32R3:     mov.d     $f0, $f14
354
355  %s = fcmp one double %x, %y
356  %r = select i1 %s, double %x, double %y
357  ret double %r
358}
359