• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
2; RUN:    -check-prefixes=NOT-R2-R6,GP32,GP32-NOT-MM,NOT-MM,PRE4
3; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
4; RUN:    -check-prefixes=NOT-R2-R6,GP32,GP32-NOT-MM,NOT-MM
5; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
6; RUN:    -check-prefixes=R2-R6,GP32,GP32-NOT-MM,NOT-MM
7; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \
8; RUN:    -check-prefixes=R2-R6,GP32,GP32-NOT-MM,NOT-MM
9; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s \
10; RUN:    -check-prefixes=R2-R6,GP32,GP32-NOT-MM,NOT-MM
11; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
12; RUN:    -check-prefixes=R2-R6,GP32,GP32-NOT-MM,NOT-MM
13; RUN: llc < %s -march=mips -mcpu=mips32r3 -mattr=+micromips -verify-machineinstrs | FileCheck %s \
14; RUN:    -check-prefixes=GP32-MM,GP32,MM32,MMR3
15; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips | FileCheck %s \
16; RUN:    -check-prefixes=GP32-MM,GP32,MM32,MMR6
17; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
18; RUN:    -check-prefixes=NOT-R2-R6,GP64,NOT-MM,GP64-NOT-R2
19; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
20; RUN:    -check-prefixes=NOT-R2-R6,GP64,NOT-MM,GP64-NOT-R2
21; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
22; RUN:    -check-prefixes=NOT-R2-R6,GP64,NOT-MM,GP64-NOT-R2
23; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
24; RUN:    -check-prefixes=R2-R6,GP64,NOT-MM,GP64-R2
25; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
26; RUN:    -check-prefixes=R2-R6,GP64,NOT-MM,GP64-R2
27; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
28; RUN:    -check-prefixes=R2-R6,GP64,NOT-MM,GP64-R2
29; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
30; RUN:    -check-prefixes=R2-R6,GP64,NOT-MM,GP64-R2
31
32define signext i1 @sub_i1(i1 signext %a, i1 signext %b) {
33entry:
34; ALL-LABEL: sub_i1:
35
36  ; NOT-MM:         subu    $[[T0:[0-9]+]], $4, $5
37  ; NOT-MM:         andi    $[[T0]], $[[T0]], 1
38  ; NOT-MM:         negu    $2, $[[T0]]
39
40  ; MM:             subu16  $[[T0:[0-9]+]], $4, $5
41  ; MM:             andi16  $[[T0]], $[[T0]], 1
42  ; MM:             li16    $[[T1:[0-9]+]], 0
43  ; MM:             subu16  $2, $[[T1]], $[[T0]]
44
45  %r = sub i1 %a, %b
46  ret i1 %r
47}
48
49define signext i8 @sub_i8(i8 signext %a, i8 signext %b) {
50entry:
51; ALL-LABEL: sub_i8:
52
53  ; NOT-R2-R6:      subu    $[[T0:[0-9]+]], $4, $5
54  ; NOT-R2-R6:      sll     $[[T0]], $[[T0]], 24
55  ; NOT-R2-R6:      sra     $2, $[[T0]], 24
56
57  ; R2-R6:          subu    $[[T0:[0-9]+]], $4, $5
58  ; R2-R6:          seb     $2, $[[T0:[0-9]+]]
59
60  ; MM:             subu16  $[[T0:[0-9]+]], $4, $5
61  ; MM:             seb     $[[T0]], $[[T0]]
62
63  %r = sub i8 %a, %b
64  ret i8 %r
65}
66
67define signext i16 @sub_i16(i16 signext %a, i16 signext %b) {
68entry:
69; ALL-LABEL: sub_i16:
70
71  ; NOT-R2-R6:      subu    $[[T0:[0-9]+]], $4, $5
72  ; NOT-R2-R6:      sll     $[[T0]], $[[T0]], 16
73  ; NOT-R2-R6:      sra     $2, $[[T0]], 16
74
75  ; R2-R6:          subu    $[[T0:[0-9]+]], $4, $5
76  ; R2-R6:          seh     $2, $[[T0:[0-9]+]]
77
78  ; MM:             subu16  $[[T0:[0-9]+]], $4, $5
79  ; MM:             seh     $[[T0]], $[[T0]]
80
81  %r = sub i16 %a, %b
82  ret i16 %r
83}
84
85define signext i32 @sub_i32(i32 signext %a, i32 signext %b) {
86entry:
87; ALL-LABEL: sub_i32:
88
89  ; NOT-MM:         subu    $2, $4, $5
90
91  ; MM:             subu16  $2, $4, $5
92
93  %r = sub i32 %a, %b
94  ret i32 %r
95}
96
97define signext i64 @sub_i64(i64 signext %a, i64 signext %b) {
98entry:
99; ALL-LABEL: sub_i64:
100
101  ; GP32-NOT-MM:    sltu    $[[T0:[0-9]+]], $5, $7
102  ; GP32-NOT-MM:    subu    $2, $4, $6
103  ; GP32-NOT-MM:    subu    $2, $2, $[[T0]]
104  ; GP32-NOT-MM:    subu    $3, $5, $7
105
106  ; MM32:           sltu    $[[T0:[0-9]+]], $5, $7
107  ; MM32:           subu16    $3, $4, $6
108  ; MM32:           subu16    $2, $3, $[[T0]]
109  ; MM32:           subu16    $3, $5, $7
110
111  ; GP64:           dsubu   $2, $4, $5
112
113  %r = sub i64 %a, %b
114  ret i64 %r
115}
116
117define signext i128 @sub_i128(i128 signext %a, i128 signext %b) {
118entry:
119; ALL-LABEL: sub_i128:
120
121; PRE4: lw     $[[T0:[0-9]+]], 24($sp)
122; PRE4: lw     $[[T1:[0-9]+]], 28($sp)
123; PRE4: sltu   $[[T2:[0-9]+]], $7, $[[T1]]
124; PRE4: xor    $[[T3:[0-9]+]], $6, $[[T0]]
125; PRE4: sltiu  $[[T4:[0-9]+]], $[[T3]], 1
126; PRE4: bnez   $[[T4]]
127; PRE4: move   $[[T5:[0-9]+]], $[[T2]]
128; PRE4: sltu   $[[T5]], $6, $[[T0]]
129
130; PRE4: lw     $[[T6:[0-9]+]], 20($sp)
131; PRE4: subu   $[[T7:[0-9]+]], $5, $[[T6]]
132; PRE4: subu   $[[T8:[0-9]+]], $[[T7]], $[[T5]]
133; PRE4: sltu   $[[T9:[0-9]+]], $[[T7]], $[[T5]]
134; PRE4: sltu   $[[T10:[0-9]+]], $5, $[[T6]]
135; PRE4: lw     $[[T11:[0-9]+]], 16($sp)
136; PRE4: subu   $[[T12:[0-9]+]], $4, $[[T11]]
137; PRE4: subu   $[[T13:[0-9]+]], $[[T12]], $[[T10]]
138; PRE4: subu   $[[T14:[0-9]+]], $[[T13]], $[[T9]]
139; PRE4: subu   $[[T15:[0-9]+]], $6, $[[T0]]
140; PRE4: subu   $[[T16:[0-9]+]], $[[T15]], $[[T2]]
141; PRE4: subu   $5, $7, $[[T1]]
142
143; MMR3: lw       $[[T1:[0-9]+]], 48($sp)
144; MMR3: sltu     $[[T2:[0-9]+]], $6, $[[T1]]
145; MMR3: xor      $[[T3:[0-9]+]], $6, $[[T1]]
146; MMR3: lw       $[[T4:[0-9]+]], 52($sp)
147; MMR3: sltu     $[[T5:[0-9]+]], $7, $[[T4]]
148; MMR3: movz     $[[T6:[0-9]+]], $[[T5]], $[[T3]]
149; MMR3: lw       $[[T7:[0-8]+]], 44($sp)
150; MMR3: subu16   $[[T8:[0-9]+]], $5, $[[T7]]
151; MMR3: subu16   $[[T9:[0-9]+]], $[[T8]], $[[T6]]
152; MMR3: sltu     $[[T10:[0-9]+]], $[[T8]], $[[T2]]
153; MMR3: sltu     $[[T11:[0-9]+]], $5, $[[T7]]
154; MMR3: lw       $[[T12:[0-9]+]], 40($sp)
155; MMR3: lw       $[[T13:[0-9]+]], 12($sp)
156; MMR3: subu16   $[[T14:[0-9]+]], $[[T13]], $[[T12]]
157; MMR3: subu16   $[[T15:[0-9]+]], $[[T14]], $[[T11]]
158; MMR3: subu16   $[[T16:[0-9]+]], $[[T15]], $[[T10]]
159; MMR3: subu16   $[[T17:[0-9]+]], $6, $[[T1]]
160; MMR3: subu16   $[[T18:[0-9]+]], $[[T17]], $7
161; MMR3: lw       $[[T19:[0-9]+]], 8($sp)
162; MMR3: lw       $[[T20:[0-9]+]], 0($sp)
163; MMR3: subu16   $5, $[[T19]], $[[T20]]
164
165; MMR6: move     $[[T0:[0-9]+]], $7
166; MMR6: sw       $7, 8($sp)
167; MMR6: move     $[[T1:[0-9]+]], $5
168; MMR6: sw       $4, 12($sp)
169; MMR6: lw       $[[T2:[0-9]+]], 48($sp)
170; MMR6: sltu     $[[T3:[0-9]+]], $6, $[[T2]]
171; MMR6: xor      $[[T4:[0-9]+]], $6, $[[T2]]
172; MMR6: sltiu    $[[T5:[0-9]+]], $[[T4]], 1
173; MMR6: seleqz   $[[T6:[0-9]+]], $[[T3]], $[[T5]]
174; MMR6: lw       $[[T7:[0-9]+]], 52($sp)
175; MMR6: sltu     $[[T8:[0-9]+]], $[[T0]], $[[T7]]
176; MMR6: selnez   $[[T9:[0-9]+]], $[[T8]], $[[T5]]
177; MMR6: or       $[[T10:[0-9]+]], $[[T9]], $[[T6]]
178; MMR6: lw       $[[T11:[0-9]+]], 44($sp)
179; MMR6: subu16   $[[T12:[0-9]+]], $[[T1]], $[[T11]]
180; MMR6: subu16   $[[T13:[0-9]+]], $[[T12]], $[[T7]]
181; MMR6: sltu     $[[T16:[0-9]+]], $[[T12]], $[[T7]]
182; MMR6: sltu     $[[T17:[0-9]+]], $[[T1]], $[[T11]]
183; MMR6: lw       $[[T18:[0-9]+]], 40($sp)
184; MMR6: lw       $[[T19:[0-9]+]], 12($sp)
185; MMR6: subu16   $[[T20:[0-9]+]], $[[T19]], $[[T18]]
186; MMR6: subu16   $[[T21:[0-9]+]], $[[T20]], $[[T17]]
187; MMR6: subu16   $[[T22:[0-9]+]], $[[T21]], $[[T16]]
188; MMR6: subu16   $[[T23:[0-9]+]], $6, $[[T2]]
189; MMR6: subu16   $4, $[[T23]], $5
190; MMR6: lw       $[[T24:[0-9]+]], 8($sp)
191; MMR6: lw       $[[T25:[0-9]+]], 0($sp)
192; MMR6: subu16   $5, $[[T24]], $[[T25]]
193; MMR6: lw       $3, 4($sp)
194
195; FIXME: The sltu, dsll, dsrl pattern here occurs when an i32 is zero
196;        extended to 64 bits. Fortunately slt(i)(u) actually gives an i1.
197;        These should be combined away.
198
199; GP64-NOT-R2: dsubu     $1, $4, $6
200; GP64-NOT-R2: sltu      $[[T0:[0-9]+]], $5, $7
201; GP64-NOT-R2: dsll      $[[T1:[0-9]+]], $[[T0]], 32
202; GP64-NOT-R2: dsrl      $[[T2:[0-9]+]], $[[T1]], 32
203; GP64-NOT-R2: dsubu     $2, $1, $[[T2]]
204; GP64-NOT-R2: dsubu     $3, $5, $7
205
206; FIXME: Likewise for the sltu, dext here.
207
208; GP64-R2:     dsubu     $1, $4, $6
209; GP64-R2:     sltu      $[[T0:[0-9]+]], $5, $7
210; GP64-R2:     dext      $[[T1:[0-9]+]], $[[T0]], 0, 32
211; GP64-R2:     dsubu     $2, $1, $[[T1]]
212; GP64-R2:     dsubu     $3, $5, $7
213
214  %r = sub i128 %a, %b
215  ret i128 %r
216}
217