• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
3
4define i32 @fold_srem_positive_odd(i32 %x) {
5; CHECK-LABEL: fold_srem_positive_odd:
6; CHECK:       // %bb.0:
7; CHECK-NEXT:    mov w8, #37253
8; CHECK-NEXT:    movk w8, #44150, lsl #16
9; CHECK-NEXT:    smull x8, w0, w8
10; CHECK-NEXT:    lsr x8, x8, #32
11; CHECK-NEXT:    add w8, w8, w0
12; CHECK-NEXT:    asr w9, w8, #6
13; CHECK-NEXT:    add w8, w9, w8, lsr #31
14; CHECK-NEXT:    mov w9, #95
15; CHECK-NEXT:    msub w0, w8, w9, w0
16; CHECK-NEXT:    ret
17  %1 = srem i32 %x, 95
18  ret i32 %1
19}
20
21
22define i32 @fold_srem_positive_even(i32 %x) {
23; CHECK-LABEL: fold_srem_positive_even:
24; CHECK:       // %bb.0:
25; CHECK-NEXT:    mov w8, #36849
26; CHECK-NEXT:    movk w8, #15827, lsl #16
27; CHECK-NEXT:    smull x8, w0, w8
28; CHECK-NEXT:    lsr x9, x8, #63
29; CHECK-NEXT:    asr x8, x8, #40
30; CHECK-NEXT:    add w8, w8, w9
31; CHECK-NEXT:    mov w9, #1060
32; CHECK-NEXT:    msub w0, w8, w9, w0
33; CHECK-NEXT:    ret
34  %1 = srem i32 %x, 1060
35  ret i32 %1
36}
37
38
39define i32 @fold_srem_negative_odd(i32 %x) {
40; CHECK-LABEL: fold_srem_negative_odd:
41; CHECK:       // %bb.0:
42; CHECK-NEXT:    mov w8, #65445
43; CHECK-NEXT:    movk w8, #42330, lsl #16
44; CHECK-NEXT:    smull x8, w0, w8
45; CHECK-NEXT:    lsr x9, x8, #63
46; CHECK-NEXT:    asr x8, x8, #40
47; CHECK-NEXT:    add w8, w8, w9
48; CHECK-NEXT:    mov w9, #-723
49; CHECK-NEXT:    msub w0, w8, w9, w0
50; CHECK-NEXT:    ret
51  %1 = srem i32 %x, -723
52  ret i32 %1
53}
54
55
56define i32 @fold_srem_negative_even(i32 %x) {
57; CHECK-LABEL: fold_srem_negative_even:
58; CHECK:       // %bb.0:
59; CHECK-NEXT:    mov w8, #62439
60; CHECK-NEXT:    movk w8, #64805, lsl #16
61; CHECK-NEXT:    smull x8, w0, w8
62; CHECK-NEXT:    lsr x9, x8, #63
63; CHECK-NEXT:    asr x8, x8, #40
64; CHECK-NEXT:    add w8, w8, w9
65; CHECK-NEXT:    mov w9, #-22981
66; CHECK-NEXT:    msub w0, w8, w9, w0
67; CHECK-NEXT:    ret
68  %1 = srem i32 %x, -22981
69  ret i32 %1
70}
71
72
73; Don't fold if we can combine srem with sdiv.
74define i32 @combine_srem_sdiv(i32 %x) {
75; CHECK-LABEL: combine_srem_sdiv:
76; CHECK:       // %bb.0:
77; CHECK-NEXT:    mov w8, #37253
78; CHECK-NEXT:    movk w8, #44150, lsl #16
79; CHECK-NEXT:    smull x8, w0, w8
80; CHECK-NEXT:    lsr x8, x8, #32
81; CHECK-NEXT:    add w8, w8, w0
82; CHECK-NEXT:    asr w9, w8, #6
83; CHECK-NEXT:    add w8, w9, w8, lsr #31
84; CHECK-NEXT:    mov w9, #95
85; CHECK-NEXT:    msub w9, w8, w9, w0
86; CHECK-NEXT:    add w0, w9, w8
87; CHECK-NEXT:    ret
88  %1 = srem i32 %x, 95
89  %2 = sdiv i32 %x, 95
90  %3 = add i32 %1, %2
91  ret i32 %3
92}
93
94; Don't fold for divisors that are a power of two.
95define i32 @dont_fold_srem_power_of_two(i32 %x) {
96; CHECK-LABEL: dont_fold_srem_power_of_two:
97; CHECK:       // %bb.0:
98; CHECK-NEXT:    add w8, w0, #63 // =63
99; CHECK-NEXT:    cmp w0, #0 // =0
100; CHECK-NEXT:    csel w8, w8, w0, lt
101; CHECK-NEXT:    and w8, w8, #0xffffffc0
102; CHECK-NEXT:    sub w0, w0, w8
103; CHECK-NEXT:    ret
104  %1 = srem i32 %x, 64
105  ret i32 %1
106}
107
108; Don't fold if the divisor is one.
109define i32 @dont_fold_srem_one(i32 %x) {
110; CHECK-LABEL: dont_fold_srem_one:
111; CHECK:       // %bb.0:
112; CHECK-NEXT:    mov w0, wzr
113; CHECK-NEXT:    ret
114  %1 = srem i32 %x, 1
115  ret i32 %1
116}
117
118; Don't fold if the divisor is 2^31.
119define i32 @dont_fold_srem_i32_smax(i32 %x) {
120; CHECK-LABEL: dont_fold_srem_i32_smax:
121; CHECK:       // %bb.0:
122; CHECK-NEXT:    mov w8, #2147483647
123; CHECK-NEXT:    add w8, w0, w8
124; CHECK-NEXT:    cmp w0, #0 // =0
125; CHECK-NEXT:    csel w8, w8, w0, lt
126; CHECK-NEXT:    and w8, w8, #0x80000000
127; CHECK-NEXT:    add w0, w0, w8
128; CHECK-NEXT:    ret
129  %1 = srem i32 %x, 2147483648
130  ret i32 %1
131}
132
133; Don't fold i64 srem
134define i64 @dont_fold_srem_i64(i64 %x) {
135; CHECK-LABEL: dont_fold_srem_i64:
136; CHECK:       // %bb.0:
137; CHECK-NEXT:    mov x8, #58849
138; CHECK-NEXT:    movk x8, #48148, lsl #16
139; CHECK-NEXT:    movk x8, #33436, lsl #32
140; CHECK-NEXT:    movk x8, #21399, lsl #48
141; CHECK-NEXT:    smulh x8, x0, x8
142; CHECK-NEXT:    asr x9, x8, #5
143; CHECK-NEXT:    add x8, x9, x8, lsr #63
144; CHECK-NEXT:    mov w9, #98
145; CHECK-NEXT:    msub x0, x8, x9, x0
146; CHECK-NEXT:    ret
147  %1 = srem i64 %x, 98
148  ret i64 %1
149}
150