• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s -check-prefix=RV64I
4; RUN: llc -mtriple=riscv64 -mattr=+experimental-b -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s -check-prefix=RV64IB
6; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbt -verify-machineinstrs < %s \
7; RUN:   | FileCheck %s -check-prefix=RV64IBT
8
9define signext i32 @cmix_i32(i32 signext %a, i32 signext %b, i32 signext %c) nounwind {
10; RV64I-LABEL: cmix_i32:
11; RV64I:       # %bb.0:
12; RV64I-NEXT:    and a0, a1, a0
13; RV64I-NEXT:    not a1, a1
14; RV64I-NEXT:    and a1, a1, a2
15; RV64I-NEXT:    or a0, a1, a0
16; RV64I-NEXT:    ret
17;
18; RV64IB-LABEL: cmix_i32:
19; RV64IB:       # %bb.0:
20; RV64IB-NEXT:    cmix a0, a1, a0, a2
21; RV64IB-NEXT:    ret
22;
23; RV64IBT-LABEL: cmix_i32:
24; RV64IBT:       # %bb.0:
25; RV64IBT-NEXT:    cmix a0, a1, a0, a2
26; RV64IBT-NEXT:    ret
27  %and = and i32 %b, %a
28  %neg = xor i32 %b, -1
29  %and1 = and i32 %neg, %c
30  %or = or i32 %and1, %and
31  ret i32 %or
32}
33
34define i64 @cmix_i64(i64 %a, i64 %b, i64 %c) nounwind {
35; RV64I-LABEL: cmix_i64:
36; RV64I:       # %bb.0:
37; RV64I-NEXT:    and a0, a1, a0
38; RV64I-NEXT:    not a1, a1
39; RV64I-NEXT:    and a1, a1, a2
40; RV64I-NEXT:    or a0, a1, a0
41; RV64I-NEXT:    ret
42;
43; RV64IB-LABEL: cmix_i64:
44; RV64IB:       # %bb.0:
45; RV64IB-NEXT:    cmix a0, a1, a0, a2
46; RV64IB-NEXT:    ret
47;
48; RV64IBT-LABEL: cmix_i64:
49; RV64IBT:       # %bb.0:
50; RV64IBT-NEXT:    cmix a0, a1, a0, a2
51; RV64IBT-NEXT:    ret
52  %and = and i64 %b, %a
53  %neg = xor i64 %b, -1
54  %and1 = and i64 %neg, %c
55  %or = or i64 %and1, %and
56  ret i64 %or
57}
58
59define signext i32 @cmov_i32(i32 signext %a, i32 signext %b, i32 signext %c) nounwind {
60; RV64I-LABEL: cmov_i32:
61; RV64I:       # %bb.0:
62; RV64I-NEXT:    beqz a1, .LBB2_2
63; RV64I-NEXT:  # %bb.1:
64; RV64I-NEXT:    mv a2, a0
65; RV64I-NEXT:  .LBB2_2:
66; RV64I-NEXT:    mv a0, a2
67; RV64I-NEXT:    ret
68;
69; RV64IB-LABEL: cmov_i32:
70; RV64IB:       # %bb.0:
71; RV64IB-NEXT:    cmov a0, a1, a0, a2
72; RV64IB-NEXT:    ret
73;
74; RV64IBT-LABEL: cmov_i32:
75; RV64IBT:       # %bb.0:
76; RV64IBT-NEXT:    cmov a0, a1, a0, a2
77; RV64IBT-NEXT:    ret
78  %tobool.not = icmp eq i32 %b, 0
79  %cond = select i1 %tobool.not, i32 %c, i32 %a
80  ret i32 %cond
81}
82
83define i64 @cmov_i64(i64 %a, i64 %b, i64 %c) nounwind {
84; RV64I-LABEL: cmov_i64:
85; RV64I:       # %bb.0:
86; RV64I-NEXT:    beqz a1, .LBB3_2
87; RV64I-NEXT:  # %bb.1:
88; RV64I-NEXT:    mv a2, a0
89; RV64I-NEXT:  .LBB3_2:
90; RV64I-NEXT:    mv a0, a2
91; RV64I-NEXT:    ret
92;
93; RV64IB-LABEL: cmov_i64:
94; RV64IB:       # %bb.0:
95; RV64IB-NEXT:    cmov a0, a1, a0, a2
96; RV64IB-NEXT:    ret
97;
98; RV64IBT-LABEL: cmov_i64:
99; RV64IBT:       # %bb.0:
100; RV64IBT-NEXT:    cmov a0, a1, a0, a2
101; RV64IBT-NEXT:    ret
102  %tobool.not = icmp eq i64 %b, 0
103  %cond = select i1 %tobool.not, i64 %c, i64 %a
104  ret i64 %cond
105}
106
107declare i32 @llvm.fshl.i32(i32, i32, i32)
108
109define signext i32 @fshl_i32(i32 signext %a, i32 signext %b, i32 signext %c) nounwind {
110; RV64I-LABEL: fshl_i32:
111; RV64I:       # %bb.0:
112; RV64I-NEXT:    slli a0, a0, 32
113; RV64I-NEXT:    slli a1, a1, 32
114; RV64I-NEXT:    srli a1, a1, 32
115; RV64I-NEXT:    or a0, a0, a1
116; RV64I-NEXT:    andi a1, a2, 31
117; RV64I-NEXT:    sll a0, a0, a1
118; RV64I-NEXT:    srai a0, a0, 32
119; RV64I-NEXT:    ret
120;
121; RV64IB-LABEL: fshl_i32:
122; RV64IB:       # %bb.0:
123; RV64IB-NEXT:    andi a2, a2, 31
124; RV64IB-NEXT:    fslw a0, a0, a1, a2
125; RV64IB-NEXT:    ret
126;
127; RV64IBT-LABEL: fshl_i32:
128; RV64IBT:       # %bb.0:
129; RV64IBT-NEXT:    andi a2, a2, 31
130; RV64IBT-NEXT:    fslw a0, a0, a1, a2
131; RV64IBT-NEXT:    ret
132  %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c)
133  ret i32 %1
134}
135
136; Similar to fshl_i32 but result is not sign extended.
137define void @fshl_i32_nosext(i32 signext %a, i32 signext %b, i32 signext %c, i32* %x) nounwind {
138; RV64I-LABEL: fshl_i32_nosext:
139; RV64I:       # %bb.0:
140; RV64I-NEXT:    slli a0, a0, 32
141; RV64I-NEXT:    slli a1, a1, 32
142; RV64I-NEXT:    srli a1, a1, 32
143; RV64I-NEXT:    or a0, a0, a1
144; RV64I-NEXT:    andi a1, a2, 31
145; RV64I-NEXT:    sll a0, a0, a1
146; RV64I-NEXT:    srli a0, a0, 32
147; RV64I-NEXT:    sw a0, 0(a3)
148; RV64I-NEXT:    ret
149;
150; RV64IB-LABEL: fshl_i32_nosext:
151; RV64IB:       # %bb.0:
152; RV64IB-NEXT:    andi a2, a2, 31
153; RV64IB-NEXT:    fslw a0, a0, a1, a2
154; RV64IB-NEXT:    sw a0, 0(a3)
155; RV64IB-NEXT:    ret
156;
157; RV64IBT-LABEL: fshl_i32_nosext:
158; RV64IBT:       # %bb.0:
159; RV64IBT-NEXT:    andi a2, a2, 31
160; RV64IBT-NEXT:    fslw a0, a0, a1, a2
161; RV64IBT-NEXT:    sw a0, 0(a3)
162; RV64IBT-NEXT:    ret
163  %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c)
164  store i32 %1, i32* %x
165  ret void
166}
167
168declare i64 @llvm.fshl.i64(i64, i64, i64)
169
170define i64 @fshl_i64(i64 %a, i64 %b, i64 %c) nounwind {
171; RV64I-LABEL: fshl_i64:
172; RV64I:       # %bb.0:
173; RV64I-NEXT:    sll a0, a0, a2
174; RV64I-NEXT:    not a2, a2
175; RV64I-NEXT:    srli a1, a1, 1
176; RV64I-NEXT:    srl a1, a1, a2
177; RV64I-NEXT:    or a0, a0, a1
178; RV64I-NEXT:    ret
179;
180; RV64IB-LABEL: fshl_i64:
181; RV64IB:       # %bb.0:
182; RV64IB-NEXT:    andi a2, a2, 63
183; RV64IB-NEXT:    fsl a0, a0, a1, a2
184; RV64IB-NEXT:    ret
185;
186; RV64IBT-LABEL: fshl_i64:
187; RV64IBT:       # %bb.0:
188; RV64IBT-NEXT:    andi a2, a2, 63
189; RV64IBT-NEXT:    fsl a0, a0, a1, a2
190; RV64IBT-NEXT:    ret
191  %1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 %c)
192  ret i64 %1
193}
194
195declare i32 @llvm.fshr.i32(i32, i32, i32)
196
197define signext i32 @fshr_i32(i32 signext %a, i32 signext %b, i32 signext %c) nounwind {
198; RV64I-LABEL: fshr_i32:
199; RV64I:       # %bb.0:
200; RV64I-NEXT:    slli a0, a0, 32
201; RV64I-NEXT:    slli a1, a1, 32
202; RV64I-NEXT:    srli a1, a1, 32
203; RV64I-NEXT:    or a0, a0, a1
204; RV64I-NEXT:    andi a1, a2, 31
205; RV64I-NEXT:    srl a0, a0, a1
206; RV64I-NEXT:    sext.w a0, a0
207; RV64I-NEXT:    ret
208;
209; RV64IB-LABEL: fshr_i32:
210; RV64IB:       # %bb.0:
211; RV64IB-NEXT:    andi a2, a2, 31
212; RV64IB-NEXT:    fsrw a0, a1, a0, a2
213; RV64IB-NEXT:    ret
214;
215; RV64IBT-LABEL: fshr_i32:
216; RV64IBT:       # %bb.0:
217; RV64IBT-NEXT:    andi a2, a2, 31
218; RV64IBT-NEXT:    fsrw a0, a1, a0, a2
219; RV64IBT-NEXT:    ret
220  %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 %c)
221  ret i32 %1
222}
223
224; Similar to fshr_i32 but result is not sign extended.
225define void @fshr_i32_nosext(i32 signext %a, i32 signext %b, i32 signext %c, i32* %x) nounwind {
226; RV64I-LABEL: fshr_i32_nosext:
227; RV64I:       # %bb.0:
228; RV64I-NEXT:    slli a0, a0, 32
229; RV64I-NEXT:    slli a1, a1, 32
230; RV64I-NEXT:    srli a1, a1, 32
231; RV64I-NEXT:    or a0, a0, a1
232; RV64I-NEXT:    andi a1, a2, 31
233; RV64I-NEXT:    srl a0, a0, a1
234; RV64I-NEXT:    sw a0, 0(a3)
235; RV64I-NEXT:    ret
236;
237; RV64IB-LABEL: fshr_i32_nosext:
238; RV64IB:       # %bb.0:
239; RV64IB-NEXT:    andi a2, a2, 31
240; RV64IB-NEXT:    fsrw a0, a1, a0, a2
241; RV64IB-NEXT:    sw a0, 0(a3)
242; RV64IB-NEXT:    ret
243;
244; RV64IBT-LABEL: fshr_i32_nosext:
245; RV64IBT:       # %bb.0:
246; RV64IBT-NEXT:    andi a2, a2, 31
247; RV64IBT-NEXT:    fsrw a0, a1, a0, a2
248; RV64IBT-NEXT:    sw a0, 0(a3)
249; RV64IBT-NEXT:    ret
250  %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 %c)
251  store i32 %1, i32* %x
252  ret void
253}
254
255declare i64 @llvm.fshr.i64(i64, i64, i64)
256
257define i64 @fshr_i64(i64 %a, i64 %b, i64 %c) nounwind {
258; RV64I-LABEL: fshr_i64:
259; RV64I:       # %bb.0:
260; RV64I-NEXT:    srl a1, a1, a2
261; RV64I-NEXT:    not a2, a2
262; RV64I-NEXT:    slli a0, a0, 1
263; RV64I-NEXT:    sll a0, a0, a2
264; RV64I-NEXT:    or a0, a0, a1
265; RV64I-NEXT:    ret
266;
267; RV64IB-LABEL: fshr_i64:
268; RV64IB:       # %bb.0:
269; RV64IB-NEXT:    andi a2, a2, 63
270; RV64IB-NEXT:    fsr a0, a1, a0, a2
271; RV64IB-NEXT:    ret
272;
273; RV64IBT-LABEL: fshr_i64:
274; RV64IBT:       # %bb.0:
275; RV64IBT-NEXT:    andi a2, a2, 63
276; RV64IBT-NEXT:    fsr a0, a1, a0, a2
277; RV64IBT-NEXT:    ret
278  %1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 %c)
279  ret i64 %1
280}
281
282define signext i32 @fshri_i32(i32 signext %a, i32 signext %b) nounwind {
283; RV64I-LABEL: fshri_i32:
284; RV64I:       # %bb.0:
285; RV64I-NEXT:    srliw a1, a1, 5
286; RV64I-NEXT:    slli a0, a0, 27
287; RV64I-NEXT:    or a0, a0, a1
288; RV64I-NEXT:    sext.w a0, a0
289; RV64I-NEXT:    ret
290;
291; RV64IB-LABEL: fshri_i32:
292; RV64IB:       # %bb.0:
293; RV64IB-NEXT:    fsriw a0, a1, a0, 5
294; RV64IB-NEXT:    ret
295;
296; RV64IBT-LABEL: fshri_i32:
297; RV64IBT:       # %bb.0:
298; RV64IBT-NEXT:    fsriw a0, a1, a0, 5
299; RV64IBT-NEXT:    ret
300  %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 5)
301  ret i32 %1
302}
303
304; Similar to fshr_i32 but result is not sign extended.
305define void @fshri_i32_nosext(i32 signext %a, i32 signext %b, i32* %x) nounwind {
306; RV64I-LABEL: fshri_i32_nosext:
307; RV64I:       # %bb.0:
308; RV64I-NEXT:    srliw a1, a1, 5
309; RV64I-NEXT:    slli a0, a0, 27
310; RV64I-NEXT:    or a0, a0, a1
311; RV64I-NEXT:    sw a0, 0(a2)
312; RV64I-NEXT:    ret
313;
314; RV64IB-LABEL: fshri_i32_nosext:
315; RV64IB:       # %bb.0:
316; RV64IB-NEXT:    fsriw a0, a1, a0, 5
317; RV64IB-NEXT:    sw a0, 0(a2)
318; RV64IB-NEXT:    ret
319;
320; RV64IBT-LABEL: fshri_i32_nosext:
321; RV64IBT:       # %bb.0:
322; RV64IBT-NEXT:    fsriw a0, a1, a0, 5
323; RV64IBT-NEXT:    sw a0, 0(a2)
324; RV64IBT-NEXT:    ret
325  %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 5)
326  store i32 %1, i32* %x
327  ret void
328}
329
330define i64 @fshri_i64(i64 %a, i64 %b) nounwind {
331; RV64I-LABEL: fshri_i64:
332; RV64I:       # %bb.0:
333; RV64I-NEXT:    srli a1, a1, 5
334; RV64I-NEXT:    slli a0, a0, 59
335; RV64I-NEXT:    or a0, a0, a1
336; RV64I-NEXT:    ret
337;
338; RV64IB-LABEL: fshri_i64:
339; RV64IB:       # %bb.0:
340; RV64IB-NEXT:    fsri a0, a1, a0, 5
341; RV64IB-NEXT:    ret
342;
343; RV64IBT-LABEL: fshri_i64:
344; RV64IBT:       # %bb.0:
345; RV64IBT-NEXT:    fsri a0, a1, a0, 5
346; RV64IBT-NEXT:    ret
347  %1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 5)
348  ret i64 %1
349}
350
351define signext i32 @fshli_i32(i32 signext %a, i32 signext %b) nounwind {
352; RV64I-LABEL: fshli_i32:
353; RV64I:       # %bb.0:
354; RV64I-NEXT:    srliw a1, a1, 27
355; RV64I-NEXT:    slli a0, a0, 5
356; RV64I-NEXT:    or a0, a0, a1
357; RV64I-NEXT:    sext.w a0, a0
358; RV64I-NEXT:    ret
359;
360; RV64IB-LABEL: fshli_i32:
361; RV64IB:       # %bb.0:
362; RV64IB-NEXT:    fsriw a0, a1, a0, 27
363; RV64IB-NEXT:    ret
364;
365; RV64IBT-LABEL: fshli_i32:
366; RV64IBT:       # %bb.0:
367; RV64IBT-NEXT:    fsriw a0, a1, a0, 27
368; RV64IBT-NEXT:    ret
369  %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 5)
370  ret i32 %1
371}
372
373; Similar to fshl_i32 but result is not sign extended.
374define void @fshli_i32_nosext(i32 signext %a, i32 signext %b, i32* %x) nounwind {
375; RV64I-LABEL: fshli_i32_nosext:
376; RV64I:       # %bb.0:
377; RV64I-NEXT:    srliw a1, a1, 27
378; RV64I-NEXT:    slli a0, a0, 5
379; RV64I-NEXT:    or a0, a0, a1
380; RV64I-NEXT:    sw a0, 0(a2)
381; RV64I-NEXT:    ret
382;
383; RV64IB-LABEL: fshli_i32_nosext:
384; RV64IB:       # %bb.0:
385; RV64IB-NEXT:    fsriw a0, a1, a0, 27
386; RV64IB-NEXT:    sw a0, 0(a2)
387; RV64IB-NEXT:    ret
388;
389; RV64IBT-LABEL: fshli_i32_nosext:
390; RV64IBT:       # %bb.0:
391; RV64IBT-NEXT:    fsriw a0, a1, a0, 27
392; RV64IBT-NEXT:    sw a0, 0(a2)
393; RV64IBT-NEXT:    ret
394  %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 5)
395  store i32 %1, i32* %x
396  ret void
397}
398
399define i64 @fshli_i64(i64 %a, i64 %b) nounwind {
400; RV64I-LABEL: fshli_i64:
401; RV64I:       # %bb.0:
402; RV64I-NEXT:    srli a1, a1, 59
403; RV64I-NEXT:    slli a0, a0, 5
404; RV64I-NEXT:    or a0, a0, a1
405; RV64I-NEXT:    ret
406;
407; RV64IB-LABEL: fshli_i64:
408; RV64IB:       # %bb.0:
409; RV64IB-NEXT:    fsri a0, a1, a0, 59
410; RV64IB-NEXT:    ret
411;
412; RV64IBT-LABEL: fshli_i64:
413; RV64IBT:       # %bb.0:
414; RV64IBT-NEXT:    fsri a0, a1, a0, 59
415; RV64IBT-NEXT:    ret
416  %1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 5)
417  ret i64 %1
418}
419