• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
3
4; *Please* keep in sync with test/CodeGen/X86/extract-bits.ll
5
6; https://bugs.llvm.org/show_bug.cgi?id=36419
7; https://bugs.llvm.org/show_bug.cgi?id=37603
8; https://bugs.llvm.org/show_bug.cgi?id=37610
9
10; Patterns:
11;   a) (x >> start) &  (1 << nbits) - 1
12;   b) (x >> start) & ~(-1 << nbits)
13;   c) (x >> start) &  (-1 >> (32 - y))
14;   d) (x >> start) << (32 - y) >> (32 - y)
15; are equivalent.
16
17; ---------------------------------------------------------------------------- ;
18; Pattern a. 32-bit
19; ---------------------------------------------------------------------------- ;
20
21define i32 @bextr32_a0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
22; CHECK-LABEL: bextr32_a0:
23; CHECK:       // %bb.0:
24; CHECK-NEXT:    mov w9, #1
25; CHECK-NEXT:    lsl w9, w9, w2
26; CHECK-NEXT:    lsr w8, w0, w1
27; CHECK-NEXT:    sub w9, w9, #1 // =1
28; CHECK-NEXT:    and w0, w9, w8
29; CHECK-NEXT:    ret
30  %shifted = lshr i32 %val, %numskipbits
31  %onebit = shl i32 1, %numlowbits
32  %mask = add nsw i32 %onebit, -1
33  %masked = and i32 %mask, %shifted
34  ret i32 %masked
35}
36
37define i32 @bextr32_a0_arithmetic(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
38; CHECK-LABEL: bextr32_a0_arithmetic:
39; CHECK:       // %bb.0:
40; CHECK-NEXT:    mov w9, #1
41; CHECK-NEXT:    lsl w9, w9, w2
42; CHECK-NEXT:    asr w8, w0, w1
43; CHECK-NEXT:    sub w9, w9, #1 // =1
44; CHECK-NEXT:    and w0, w9, w8
45; CHECK-NEXT:    ret
46  %shifted = ashr i32 %val, %numskipbits
47  %onebit = shl i32 1, %numlowbits
48  %mask = add nsw i32 %onebit, -1
49  %masked = and i32 %mask, %shifted
50  ret i32 %masked
51}
52
53define i32 @bextr32_a1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
54; CHECK-LABEL: bextr32_a1_indexzext:
55; CHECK:       // %bb.0:
56; CHECK-NEXT:    mov w9, #1
57; CHECK-NEXT:    lsl w9, w9, w2
58; CHECK-NEXT:    lsr w8, w0, w1
59; CHECK-NEXT:    sub w9, w9, #1 // =1
60; CHECK-NEXT:    and w0, w9, w8
61; CHECK-NEXT:    ret
62  %skip = zext i8 %numskipbits to i32
63  %shifted = lshr i32 %val, %skip
64  %conv = zext i8 %numlowbits to i32
65  %onebit = shl i32 1, %conv
66  %mask = add nsw i32 %onebit, -1
67  %masked = and i32 %mask, %shifted
68  ret i32 %masked
69}
70
71define i32 @bextr32_a2_load(i32* %w, i32 %numskipbits, i32 %numlowbits) nounwind {
72; CHECK-LABEL: bextr32_a2_load:
73; CHECK:       // %bb.0:
74; CHECK-NEXT:    ldr w8, [x0]
75; CHECK-NEXT:    mov w9, #1
76; CHECK-NEXT:    lsl w9, w9, w2
77; CHECK-NEXT:    sub w9, w9, #1 // =1
78; CHECK-NEXT:    lsr w8, w8, w1
79; CHECK-NEXT:    and w0, w9, w8
80; CHECK-NEXT:    ret
81  %val = load i32, i32* %w
82  %shifted = lshr i32 %val, %numskipbits
83  %onebit = shl i32 1, %numlowbits
84  %mask = add nsw i32 %onebit, -1
85  %masked = and i32 %mask, %shifted
86  ret i32 %masked
87}
88
89define i32 @bextr32_a3_load_indexzext(i32* %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
90; CHECK-LABEL: bextr32_a3_load_indexzext:
91; CHECK:       // %bb.0:
92; CHECK-NEXT:    ldr w8, [x0]
93; CHECK-NEXT:    mov w9, #1
94; CHECK-NEXT:    lsl w9, w9, w2
95; CHECK-NEXT:    sub w9, w9, #1 // =1
96; CHECK-NEXT:    lsr w8, w8, w1
97; CHECK-NEXT:    and w0, w9, w8
98; CHECK-NEXT:    ret
99  %val = load i32, i32* %w
100  %skip = zext i8 %numskipbits to i32
101  %shifted = lshr i32 %val, %skip
102  %conv = zext i8 %numlowbits to i32
103  %onebit = shl i32 1, %conv
104  %mask = add nsw i32 %onebit, -1
105  %masked = and i32 %mask, %shifted
106  ret i32 %masked
107}
108
109define i32 @bextr32_a4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
110; CHECK-LABEL: bextr32_a4_commutative:
111; CHECK:       // %bb.0:
112; CHECK-NEXT:    mov w9, #1
113; CHECK-NEXT:    lsl w9, w9, w2
114; CHECK-NEXT:    lsr w8, w0, w1
115; CHECK-NEXT:    sub w9, w9, #1 // =1
116; CHECK-NEXT:    and w0, w8, w9
117; CHECK-NEXT:    ret
118  %shifted = lshr i32 %val, %numskipbits
119  %onebit = shl i32 1, %numlowbits
120  %mask = add nsw i32 %onebit, -1
121  %masked = and i32 %shifted, %mask ; swapped order
122  ret i32 %masked
123}
124
125; 64-bit
126
127define i64 @bextr64_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
128; CHECK-LABEL: bextr64_a0:
129; CHECK:       // %bb.0:
130; CHECK-NEXT:    mov w9, #1
131; CHECK-NEXT:    lsl x9, x9, x2
132; CHECK-NEXT:    lsr x8, x0, x1
133; CHECK-NEXT:    sub x9, x9, #1 // =1
134; CHECK-NEXT:    and x0, x9, x8
135; CHECK-NEXT:    ret
136  %shifted = lshr i64 %val, %numskipbits
137  %onebit = shl i64 1, %numlowbits
138  %mask = add nsw i64 %onebit, -1
139  %masked = and i64 %mask, %shifted
140  ret i64 %masked
141}
142
143define i64 @bextr64_a0_arithmetic(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
144; CHECK-LABEL: bextr64_a0_arithmetic:
145; CHECK:       // %bb.0:
146; CHECK-NEXT:    mov w9, #1
147; CHECK-NEXT:    lsl x9, x9, x2
148; CHECK-NEXT:    asr x8, x0, x1
149; CHECK-NEXT:    sub x9, x9, #1 // =1
150; CHECK-NEXT:    and x0, x9, x8
151; CHECK-NEXT:    ret
152  %shifted = ashr i64 %val, %numskipbits
153  %onebit = shl i64 1, %numlowbits
154  %mask = add nsw i64 %onebit, -1
155  %masked = and i64 %mask, %shifted
156  ret i64 %masked
157}
158
159define i64 @bextr64_a1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
160; CHECK-LABEL: bextr64_a1_indexzext:
161; CHECK:       // %bb.0:
162; CHECK-NEXT:    mov w9, #1
163; CHECK-NEXT:    // kill: def $w2 killed $w2 def $x2
164; CHECK-NEXT:    lsl x9, x9, x2
165; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
166; CHECK-NEXT:    lsr x8, x0, x1
167; CHECK-NEXT:    sub x9, x9, #1 // =1
168; CHECK-NEXT:    and x0, x9, x8
169; CHECK-NEXT:    ret
170  %skip = zext i8 %numskipbits to i64
171  %shifted = lshr i64 %val, %skip
172  %conv = zext i8 %numlowbits to i64
173  %onebit = shl i64 1, %conv
174  %mask = add nsw i64 %onebit, -1
175  %masked = and i64 %mask, %shifted
176  ret i64 %masked
177}
178
179define i64 @bextr64_a2_load(i64* %w, i64 %numskipbits, i64 %numlowbits) nounwind {
180; CHECK-LABEL: bextr64_a2_load:
181; CHECK:       // %bb.0:
182; CHECK-NEXT:    ldr x8, [x0]
183; CHECK-NEXT:    mov w9, #1
184; CHECK-NEXT:    lsl x9, x9, x2
185; CHECK-NEXT:    sub x9, x9, #1 // =1
186; CHECK-NEXT:    lsr x8, x8, x1
187; CHECK-NEXT:    and x0, x9, x8
188; CHECK-NEXT:    ret
189  %val = load i64, i64* %w
190  %shifted = lshr i64 %val, %numskipbits
191  %onebit = shl i64 1, %numlowbits
192  %mask = add nsw i64 %onebit, -1
193  %masked = and i64 %mask, %shifted
194  ret i64 %masked
195}
196
197define i64 @bextr64_a3_load_indexzext(i64* %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
198; CHECK-LABEL: bextr64_a3_load_indexzext:
199; CHECK:       // %bb.0:
200; CHECK-NEXT:    ldr x8, [x0]
201; CHECK-NEXT:    mov w9, #1
202; CHECK-NEXT:    // kill: def $w2 killed $w2 def $x2
203; CHECK-NEXT:    lsl x9, x9, x2
204; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
205; CHECK-NEXT:    sub x9, x9, #1 // =1
206; CHECK-NEXT:    lsr x8, x8, x1
207; CHECK-NEXT:    and x0, x9, x8
208; CHECK-NEXT:    ret
209  %val = load i64, i64* %w
210  %skip = zext i8 %numskipbits to i64
211  %shifted = lshr i64 %val, %skip
212  %conv = zext i8 %numlowbits to i64
213  %onebit = shl i64 1, %conv
214  %mask = add nsw i64 %onebit, -1
215  %masked = and i64 %mask, %shifted
216  ret i64 %masked
217}
218
219define i64 @bextr64_a4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
220; CHECK-LABEL: bextr64_a4_commutative:
221; CHECK:       // %bb.0:
222; CHECK-NEXT:    mov w9, #1
223; CHECK-NEXT:    lsl x9, x9, x2
224; CHECK-NEXT:    lsr x8, x0, x1
225; CHECK-NEXT:    sub x9, x9, #1 // =1
226; CHECK-NEXT:    and x0, x8, x9
227; CHECK-NEXT:    ret
228  %shifted = lshr i64 %val, %numskipbits
229  %onebit = shl i64 1, %numlowbits
230  %mask = add nsw i64 %onebit, -1
231  %masked = and i64 %shifted, %mask ; swapped order
232  ret i64 %masked
233}
234
235; 64-bit, but with 32-bit output
236
237; Everything done in 64-bit, truncation happens last.
238define i32 @bextr64_32_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
239; CHECK-LABEL: bextr64_32_a0:
240; CHECK:       // %bb.0:
241; CHECK-NEXT:    mov w9, #1
242; CHECK-NEXT:    lsl x9, x9, x2
243; CHECK-NEXT:    lsr x8, x0, x1
244; CHECK-NEXT:    sub w9, w9, #1 // =1
245; CHECK-NEXT:    and w0, w9, w8
246; CHECK-NEXT:    ret
247  %shifted = lshr i64 %val, %numskipbits
248  %onebit = shl i64 1, %numlowbits
249  %mask = add nsw i64 %onebit, -1
250  %masked = and i64 %mask, %shifted
251  %res = trunc i64 %masked to i32
252  ret i32 %res
253}
254
255; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
256define i32 @bextr64_32_a1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
257; CHECK-LABEL: bextr64_32_a1:
258; CHECK:       // %bb.0:
259; CHECK-NEXT:    mov w9, #1
260; CHECK-NEXT:    lsl w9, w9, w2
261; CHECK-NEXT:    lsr x8, x0, x1
262; CHECK-NEXT:    sub w9, w9, #1 // =1
263; CHECK-NEXT:    and w0, w9, w8
264; CHECK-NEXT:    ret
265  %shifted = lshr i64 %val, %numskipbits
266  %truncshifted = trunc i64 %shifted to i32
267  %onebit = shl i32 1, %numlowbits
268  %mask = add nsw i32 %onebit, -1
269  %masked = and i32 %mask, %truncshifted
270  ret i32 %masked
271}
272
273; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit.
274; Masking is 64-bit. Then truncation.
275define i32 @bextr64_32_a2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
276; CHECK-LABEL: bextr64_32_a2:
277; CHECK:       // %bb.0:
278; CHECK-NEXT:    mov w9, #1
279; CHECK-NEXT:    lsl w9, w9, w2
280; CHECK-NEXT:    lsr x8, x0, x1
281; CHECK-NEXT:    sub w9, w9, #1 // =1
282; CHECK-NEXT:    and w0, w9, w8
283; CHECK-NEXT:    ret
284  %shifted = lshr i64 %val, %numskipbits
285  %onebit = shl i32 1, %numlowbits
286  %mask = add nsw i32 %onebit, -1
287  %zextmask = zext i32 %mask to i64
288  %masked = and i64 %zextmask, %shifted
289  %truncmasked = trunc i64 %masked to i32
290  ret i32 %truncmasked
291}
292
293; ---------------------------------------------------------------------------- ;
294; Pattern b. 32-bit
295; ---------------------------------------------------------------------------- ;
296
297define i32 @bextr32_b0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
298; CHECK-LABEL: bextr32_b0:
299; CHECK:       // %bb.0:
300; CHECK-NEXT:    mov w9, #-1
301; CHECK-NEXT:    lsr w8, w0, w1
302; CHECK-NEXT:    lsl w9, w9, w2
303; CHECK-NEXT:    bic w0, w8, w9
304; CHECK-NEXT:    ret
305  %shifted = lshr i32 %val, %numskipbits
306  %notmask = shl i32 -1, %numlowbits
307  %mask = xor i32 %notmask, -1
308  %masked = and i32 %mask, %shifted
309  ret i32 %masked
310}
311
312define i32 @bextr32_b1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
313; CHECK-LABEL: bextr32_b1_indexzext:
314; CHECK:       // %bb.0:
315; CHECK-NEXT:    mov w9, #-1
316; CHECK-NEXT:    lsr w8, w0, w1
317; CHECK-NEXT:    lsl w9, w9, w2
318; CHECK-NEXT:    bic w0, w8, w9
319; CHECK-NEXT:    ret
320  %skip = zext i8 %numskipbits to i32
321  %shifted = lshr i32 %val, %skip
322  %conv = zext i8 %numlowbits to i32
323  %notmask = shl i32 -1, %conv
324  %mask = xor i32 %notmask, -1
325  %masked = and i32 %mask, %shifted
326  ret i32 %masked
327}
328
329define i32 @bextr32_b2_load(i32* %w, i32 %numskipbits, i32 %numlowbits) nounwind {
330; CHECK-LABEL: bextr32_b2_load:
331; CHECK:       // %bb.0:
332; CHECK-NEXT:    ldr w8, [x0]
333; CHECK-NEXT:    mov w9, #-1
334; CHECK-NEXT:    lsl w9, w9, w2
335; CHECK-NEXT:    lsr w8, w8, w1
336; CHECK-NEXT:    bic w0, w8, w9
337; CHECK-NEXT:    ret
338  %val = load i32, i32* %w
339  %shifted = lshr i32 %val, %numskipbits
340  %notmask = shl i32 -1, %numlowbits
341  %mask = xor i32 %notmask, -1
342  %masked = and i32 %mask, %shifted
343  ret i32 %masked
344}
345
346define i32 @bextr32_b3_load_indexzext(i32* %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
347; CHECK-LABEL: bextr32_b3_load_indexzext:
348; CHECK:       // %bb.0:
349; CHECK-NEXT:    ldr w8, [x0]
350; CHECK-NEXT:    mov w9, #-1
351; CHECK-NEXT:    lsl w9, w9, w2
352; CHECK-NEXT:    lsr w8, w8, w1
353; CHECK-NEXT:    bic w0, w8, w9
354; CHECK-NEXT:    ret
355  %val = load i32, i32* %w
356  %skip = zext i8 %numskipbits to i32
357  %shifted = lshr i32 %val, %skip
358  %conv = zext i8 %numlowbits to i32
359  %notmask = shl i32 -1, %conv
360  %mask = xor i32 %notmask, -1
361  %masked = and i32 %mask, %shifted
362  ret i32 %masked
363}
364
365define i32 @bextr32_b4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
366; CHECK-LABEL: bextr32_b4_commutative:
367; CHECK:       // %bb.0:
368; CHECK-NEXT:    mov w9, #-1
369; CHECK-NEXT:    lsr w8, w0, w1
370; CHECK-NEXT:    lsl w9, w9, w2
371; CHECK-NEXT:    bic w0, w8, w9
372; CHECK-NEXT:    ret
373  %shifted = lshr i32 %val, %numskipbits
374  %notmask = shl i32 -1, %numlowbits
375  %mask = xor i32 %notmask, -1
376  %masked = and i32 %shifted, %mask ; swapped order
377  ret i32 %masked
378}
379
380; 64-bit
381
382define i64 @bextr64_b0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
383; CHECK-LABEL: bextr64_b0:
384; CHECK:       // %bb.0:
385; CHECK-NEXT:    mov x9, #-1
386; CHECK-NEXT:    lsr x8, x0, x1
387; CHECK-NEXT:    lsl x9, x9, x2
388; CHECK-NEXT:    bic x0, x8, x9
389; CHECK-NEXT:    ret
390  %shifted = lshr i64 %val, %numskipbits
391  %notmask = shl i64 -1, %numlowbits
392  %mask = xor i64 %notmask, -1
393  %masked = and i64 %mask, %shifted
394  ret i64 %masked
395}
396
397define i64 @bextr64_b1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
398; CHECK-LABEL: bextr64_b1_indexzext:
399; CHECK:       // %bb.0:
400; CHECK-NEXT:    mov x9, #-1
401; CHECK-NEXT:    // kill: def $w2 killed $w2 def $x2
402; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
403; CHECK-NEXT:    lsr x8, x0, x1
404; CHECK-NEXT:    lsl x9, x9, x2
405; CHECK-NEXT:    bic x0, x8, x9
406; CHECK-NEXT:    ret
407  %skip = zext i8 %numskipbits to i64
408  %shifted = lshr i64 %val, %skip
409  %conv = zext i8 %numlowbits to i64
410  %notmask = shl i64 -1, %conv
411  %mask = xor i64 %notmask, -1
412  %masked = and i64 %mask, %shifted
413  ret i64 %masked
414}
415
416define i64 @bextr64_b2_load(i64* %w, i64 %numskipbits, i64 %numlowbits) nounwind {
417; CHECK-LABEL: bextr64_b2_load:
418; CHECK:       // %bb.0:
419; CHECK-NEXT:    ldr x8, [x0]
420; CHECK-NEXT:    mov x9, #-1
421; CHECK-NEXT:    lsl x9, x9, x2
422; CHECK-NEXT:    lsr x8, x8, x1
423; CHECK-NEXT:    bic x0, x8, x9
424; CHECK-NEXT:    ret
425  %val = load i64, i64* %w
426  %shifted = lshr i64 %val, %numskipbits
427  %notmask = shl i64 -1, %numlowbits
428  %mask = xor i64 %notmask, -1
429  %masked = and i64 %mask, %shifted
430  ret i64 %masked
431}
432
433define i64 @bextr64_b3_load_indexzext(i64* %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
434; CHECK-LABEL: bextr64_b3_load_indexzext:
435; CHECK:       // %bb.0:
436; CHECK-NEXT:    ldr x8, [x0]
437; CHECK-NEXT:    mov x9, #-1
438; CHECK-NEXT:    // kill: def $w2 killed $w2 def $x2
439; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
440; CHECK-NEXT:    lsl x9, x9, x2
441; CHECK-NEXT:    lsr x8, x8, x1
442; CHECK-NEXT:    bic x0, x8, x9
443; CHECK-NEXT:    ret
444  %val = load i64, i64* %w
445  %skip = zext i8 %numskipbits to i64
446  %shifted = lshr i64 %val, %skip
447  %conv = zext i8 %numlowbits to i64
448  %notmask = shl i64 -1, %conv
449  %mask = xor i64 %notmask, -1
450  %masked = and i64 %mask, %shifted
451  ret i64 %masked
452}
453
454define i64 @bextr64_b4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
455; CHECK-LABEL: bextr64_b4_commutative:
456; CHECK:       // %bb.0:
457; CHECK-NEXT:    mov x9, #-1
458; CHECK-NEXT:    lsr x8, x0, x1
459; CHECK-NEXT:    lsl x9, x9, x2
460; CHECK-NEXT:    bic x0, x8, x9
461; CHECK-NEXT:    ret
462  %shifted = lshr i64 %val, %numskipbits
463  %notmask = shl i64 -1, %numlowbits
464  %mask = xor i64 %notmask, -1
465  %masked = and i64 %shifted, %mask ; swapped order
466  ret i64 %masked
467}
468
469; 64-bit, but with 32-bit output
470
471; Everything done in 64-bit, truncation happens last.
472define i32 @bextr64_32_b0(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
473; CHECK-LABEL: bextr64_32_b0:
474; CHECK:       // %bb.0:
475; CHECK-NEXT:    mov x9, #-1
476; CHECK-NEXT:    // kill: def $w2 killed $w2 def $x2
477; CHECK-NEXT:    lsr x8, x0, x1
478; CHECK-NEXT:    lsl x9, x9, x2
479; CHECK-NEXT:    bic w0, w8, w9
480; CHECK-NEXT:    ret
481  %shiftedval = lshr i64 %val, %numskipbits
482  %widenumlowbits = zext i8 %numlowbits to i64
483  %notmask = shl nsw i64 -1, %widenumlowbits
484  %mask = xor i64 %notmask, -1
485  %wideres = and i64 %shiftedval, %mask
486  %res = trunc i64 %wideres to i32
487  ret i32 %res
488}
489
490; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
491define i32 @bextr64_32_b1(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
492; CHECK-LABEL: bextr64_32_b1:
493; CHECK:       // %bb.0:
494; CHECK-NEXT:    mov w9, #-1
495; CHECK-NEXT:    // kill: def $w2 killed $w2 def $x2
496; CHECK-NEXT:    lsr x8, x0, x1
497; CHECK-NEXT:    lsl w9, w9, w2
498; CHECK-NEXT:    bic w0, w8, w9
499; CHECK-NEXT:    ret
500  %shiftedval = lshr i64 %val, %numskipbits
501  %truncshiftedval = trunc i64 %shiftedval to i32
502  %widenumlowbits = zext i8 %numlowbits to i32
503  %notmask = shl nsw i32 -1, %widenumlowbits
504  %mask = xor i32 %notmask, -1
505  %res = and i32 %truncshiftedval, %mask
506  ret i32 %res
507}
508
509; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit.
510; Masking is 64-bit. Then truncation.
511define i32 @bextr64_32_b2(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
512; CHECK-LABEL: bextr64_32_b2:
513; CHECK:       // %bb.0:
514; CHECK-NEXT:    mov w9, #-1
515; CHECK-NEXT:    // kill: def $w2 killed $w2 def $x2
516; CHECK-NEXT:    lsr x8, x0, x1
517; CHECK-NEXT:    lsl w9, w9, w2
518; CHECK-NEXT:    bic w0, w8, w9
519; CHECK-NEXT:    ret
520  %shiftedval = lshr i64 %val, %numskipbits
521  %widenumlowbits = zext i8 %numlowbits to i32
522  %notmask = shl nsw i32 -1, %widenumlowbits
523  %mask = xor i32 %notmask, -1
524  %zextmask = zext i32 %mask to i64
525  %wideres = and i64 %shiftedval, %zextmask
526  %res = trunc i64 %wideres to i32
527  ret i32 %res
528}
529
530; ---------------------------------------------------------------------------- ;
531; Pattern c. 32-bit
532; ---------------------------------------------------------------------------- ;
533
534define i32 @bextr32_c0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
535; CHECK-LABEL: bextr32_c0:
536; CHECK:       // %bb.0:
537; CHECK-NEXT:    neg w9, w2
538; CHECK-NEXT:    mov w10, #-1
539; CHECK-NEXT:    lsr w8, w0, w1
540; CHECK-NEXT:    lsr w9, w10, w9
541; CHECK-NEXT:    and w0, w9, w8
542; CHECK-NEXT:    ret
543  %shifted = lshr i32 %val, %numskipbits
544  %numhighbits = sub i32 32, %numlowbits
545  %mask = lshr i32 -1, %numhighbits
546  %masked = and i32 %mask, %shifted
547  ret i32 %masked
548}
549
550define i32 @bextr32_c1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
551; CHECK-LABEL: bextr32_c1_indexzext:
552; CHECK:       // %bb.0:
553; CHECK-NEXT:    mov w9, #32
554; CHECK-NEXT:    sub w9, w9, w2
555; CHECK-NEXT:    mov w10, #-1
556; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
557; CHECK-NEXT:    lsr w8, w0, w1
558; CHECK-NEXT:    lsr w9, w10, w9
559; CHECK-NEXT:    and w0, w9, w8
560; CHECK-NEXT:    ret
561  %skip = zext i8 %numskipbits to i32
562  %shifted = lshr i32 %val, %skip
563  %numhighbits = sub i8 32, %numlowbits
564  %sh_prom = zext i8 %numhighbits to i32
565  %mask = lshr i32 -1, %sh_prom
566  %masked = and i32 %mask, %shifted
567  ret i32 %masked
568}
569
570define i32 @bextr32_c2_load(i32* %w, i32 %numskipbits, i32 %numlowbits) nounwind {
571; CHECK-LABEL: bextr32_c2_load:
572; CHECK:       // %bb.0:
573; CHECK-NEXT:    ldr w8, [x0]
574; CHECK-NEXT:    neg w9, w2
575; CHECK-NEXT:    mov w10, #-1
576; CHECK-NEXT:    lsr w9, w10, w9
577; CHECK-NEXT:    lsr w8, w8, w1
578; CHECK-NEXT:    and w0, w9, w8
579; CHECK-NEXT:    ret
580  %val = load i32, i32* %w
581  %shifted = lshr i32 %val, %numskipbits
582  %numhighbits = sub i32 32, %numlowbits
583  %mask = lshr i32 -1, %numhighbits
584  %masked = and i32 %mask, %shifted
585  ret i32 %masked
586}
587
588define i32 @bextr32_c3_load_indexzext(i32* %w, i8 %numskipbits, i8 %numlowbits) nounwind {
589; CHECK-LABEL: bextr32_c3_load_indexzext:
590; CHECK:       // %bb.0:
591; CHECK-NEXT:    ldr w8, [x0]
592; CHECK-NEXT:    mov w9, #32
593; CHECK-NEXT:    mov w10, #-1
594; CHECK-NEXT:    sub w9, w9, w2
595; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
596; CHECK-NEXT:    lsr w8, w8, w1
597; CHECK-NEXT:    lsr w9, w10, w9
598; CHECK-NEXT:    and w0, w9, w8
599; CHECK-NEXT:    ret
600  %val = load i32, i32* %w
601  %skip = zext i8 %numskipbits to i32
602  %shifted = lshr i32 %val, %skip
603  %numhighbits = sub i8 32, %numlowbits
604  %sh_prom = zext i8 %numhighbits to i32
605  %mask = lshr i32 -1, %sh_prom
606  %masked = and i32 %mask, %shifted
607  ret i32 %masked
608}
609
610define i32 @bextr32_c4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
611; CHECK-LABEL: bextr32_c4_commutative:
612; CHECK:       // %bb.0:
613; CHECK-NEXT:    neg w9, w2
614; CHECK-NEXT:    mov w10, #-1
615; CHECK-NEXT:    lsr w8, w0, w1
616; CHECK-NEXT:    lsr w9, w10, w9
617; CHECK-NEXT:    and w0, w8, w9
618; CHECK-NEXT:    ret
619  %shifted = lshr i32 %val, %numskipbits
620  %numhighbits = sub i32 32, %numlowbits
621  %mask = lshr i32 -1, %numhighbits
622  %masked = and i32 %shifted, %mask ; swapped order
623  ret i32 %masked
624}
625
626; 64-bit
627
628define i64 @bextr64_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
629; CHECK-LABEL: bextr64_c0:
630; CHECK:       // %bb.0:
631; CHECK-NEXT:    neg x9, x2
632; CHECK-NEXT:    mov x10, #-1
633; CHECK-NEXT:    lsr x8, x0, x1
634; CHECK-NEXT:    lsr x9, x10, x9
635; CHECK-NEXT:    and x0, x9, x8
636; CHECK-NEXT:    ret
637  %shifted = lshr i64 %val, %numskipbits
638  %numhighbits = sub i64 64, %numlowbits
639  %mask = lshr i64 -1, %numhighbits
640  %masked = and i64 %mask, %shifted
641  ret i64 %masked
642}
643
644define i64 @bextr64_c1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
645; CHECK-LABEL: bextr64_c1_indexzext:
646; CHECK:       // %bb.0:
647; CHECK-NEXT:    mov w9, #64
648; CHECK-NEXT:    sub w9, w9, w2
649; CHECK-NEXT:    mov x10, #-1
650; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
651; CHECK-NEXT:    lsr x8, x0, x1
652; CHECK-NEXT:    lsr x9, x10, x9
653; CHECK-NEXT:    and x0, x9, x8
654; CHECK-NEXT:    ret
655  %skip = zext i8 %numskipbits to i64
656  %shifted = lshr i64 %val, %skip
657  %numhighbits = sub i8 64, %numlowbits
658  %sh_prom = zext i8 %numhighbits to i64
659  %mask = lshr i64 -1, %sh_prom
660  %masked = and i64 %mask, %shifted
661  ret i64 %masked
662}
663
664define i64 @bextr64_c2_load(i64* %w, i64 %numskipbits, i64 %numlowbits) nounwind {
665; CHECK-LABEL: bextr64_c2_load:
666; CHECK:       // %bb.0:
667; CHECK-NEXT:    ldr x8, [x0]
668; CHECK-NEXT:    neg x9, x2
669; CHECK-NEXT:    mov x10, #-1
670; CHECK-NEXT:    lsr x9, x10, x9
671; CHECK-NEXT:    lsr x8, x8, x1
672; CHECK-NEXT:    and x0, x9, x8
673; CHECK-NEXT:    ret
674  %val = load i64, i64* %w
675  %shifted = lshr i64 %val, %numskipbits
676  %numhighbits = sub i64 64, %numlowbits
677  %mask = lshr i64 -1, %numhighbits
678  %masked = and i64 %mask, %shifted
679  ret i64 %masked
680}
681
682define i64 @bextr64_c3_load_indexzext(i64* %w, i8 %numskipbits, i8 %numlowbits) nounwind {
683; CHECK-LABEL: bextr64_c3_load_indexzext:
684; CHECK:       // %bb.0:
685; CHECK-NEXT:    ldr x8, [x0]
686; CHECK-NEXT:    mov w9, #64
687; CHECK-NEXT:    mov x10, #-1
688; CHECK-NEXT:    sub w9, w9, w2
689; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
690; CHECK-NEXT:    lsr x8, x8, x1
691; CHECK-NEXT:    lsr x9, x10, x9
692; CHECK-NEXT:    and x0, x9, x8
693; CHECK-NEXT:    ret
694  %val = load i64, i64* %w
695  %skip = zext i8 %numskipbits to i64
696  %shifted = lshr i64 %val, %skip
697  %numhighbits = sub i8 64, %numlowbits
698  %sh_prom = zext i8 %numhighbits to i64
699  %mask = lshr i64 -1, %sh_prom
700  %masked = and i64 %mask, %shifted
701  ret i64 %masked
702}
703
704define i64 @bextr64_c4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
705; CHECK-LABEL: bextr64_c4_commutative:
706; CHECK:       // %bb.0:
707; CHECK-NEXT:    neg x9, x2
708; CHECK-NEXT:    mov x10, #-1
709; CHECK-NEXT:    lsr x8, x0, x1
710; CHECK-NEXT:    lsr x9, x10, x9
711; CHECK-NEXT:    and x0, x8, x9
712; CHECK-NEXT:    ret
713  %shifted = lshr i64 %val, %numskipbits
714  %numhighbits = sub i64 64, %numlowbits
715  %mask = lshr i64 -1, %numhighbits
716  %masked = and i64 %shifted, %mask ; swapped order
717  ret i64 %masked
718}
719
720; 64-bit, but with 32-bit output
721
722; Everything done in 64-bit, truncation happens last.
723define i32 @bextr64_32_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
724; CHECK-LABEL: bextr64_32_c0:
725; CHECK:       // %bb.0:
726; CHECK-NEXT:    neg x9, x2
727; CHECK-NEXT:    mov x10, #-1
728; CHECK-NEXT:    lsr x8, x0, x1
729; CHECK-NEXT:    lsr x9, x10, x9
730; CHECK-NEXT:    and w0, w9, w8
731; CHECK-NEXT:    ret
732  %shifted = lshr i64 %val, %numskipbits
733  %numhighbits = sub i64 64, %numlowbits
734  %mask = lshr i64 -1, %numhighbits
735  %masked = and i64 %mask, %shifted
736  %res = trunc i64 %masked to i32
737  ret i32 %res
738}
739
740; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
741define i32 @bextr64_32_c1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
742; CHECK-LABEL: bextr64_32_c1:
743; CHECK:       // %bb.0:
744; CHECK-NEXT:    neg w9, w2
745; CHECK-NEXT:    mov w10, #-1
746; CHECK-NEXT:    lsr x8, x0, x1
747; CHECK-NEXT:    lsr w9, w10, w9
748; CHECK-NEXT:    and w0, w9, w8
749; CHECK-NEXT:    ret
750  %shifted = lshr i64 %val, %numskipbits
751  %truncshifted = trunc i64 %shifted to i32
752  %numhighbits = sub i32 32, %numlowbits
753  %mask = lshr i32 -1, %numhighbits
754  %masked = and i32 %mask, %truncshifted
755  ret i32 %masked
756}
757
758; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit.
759; Masking is 64-bit. Then truncation.
760define i32 @bextr64_32_c2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
761; CHECK-LABEL: bextr64_32_c2:
762; CHECK:       // %bb.0:
763; CHECK-NEXT:    neg w9, w2
764; CHECK-NEXT:    mov w10, #-1
765; CHECK-NEXT:    lsr x8, x0, x1
766; CHECK-NEXT:    lsr w9, w10, w9
767; CHECK-NEXT:    and w0, w9, w8
768; CHECK-NEXT:    ret
769  %shifted = lshr i64 %val, %numskipbits
770  %numhighbits = sub i32 32, %numlowbits
771  %mask = lshr i32 -1, %numhighbits
772  %zextmask = zext i32 %mask to i64
773  %masked = and i64 %zextmask, %shifted
774  %truncmasked = trunc i64 %masked to i32
775  ret i32 %truncmasked
776}
777
778; ---------------------------------------------------------------------------- ;
779; Pattern d. 32-bit.
780; ---------------------------------------------------------------------------- ;
781
782define i32 @bextr32_d0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
783; CHECK-LABEL: bextr32_d0:
784; CHECK:       // %bb.0:
785; CHECK-NEXT:    lsr w8, w0, w1
786; CHECK-NEXT:    neg w9, w2
787; CHECK-NEXT:    lsl w8, w8, w9
788; CHECK-NEXT:    lsr w0, w8, w9
789; CHECK-NEXT:    ret
790  %shifted = lshr i32 %val, %numskipbits
791  %numhighbits = sub i32 32, %numlowbits
792  %highbitscleared = shl i32 %shifted, %numhighbits
793  %masked = lshr i32 %highbitscleared, %numhighbits
794  ret i32 %masked
795}
796
797define i32 @bextr32_d1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
798; CHECK-LABEL: bextr32_d1_indexzext:
799; CHECK:       // %bb.0:
800; CHECK-NEXT:    mov w9, #32
801; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
802; CHECK-NEXT:    lsr w8, w0, w1
803; CHECK-NEXT:    sub w9, w9, w2
804; CHECK-NEXT:    lsl w8, w8, w9
805; CHECK-NEXT:    lsr w0, w8, w9
806; CHECK-NEXT:    ret
807  %skip = zext i8 %numskipbits to i32
808  %shifted = lshr i32 %val, %skip
809  %numhighbits = sub i8 32, %numlowbits
810  %sh_prom = zext i8 %numhighbits to i32
811  %highbitscleared = shl i32 %shifted, %sh_prom
812  %masked = lshr i32 %highbitscleared, %sh_prom
813  ret i32 %masked
814}
815
816define i32 @bextr32_d2_load(i32* %w, i32 %numskipbits, i32 %numlowbits) nounwind {
817; CHECK-LABEL: bextr32_d2_load:
818; CHECK:       // %bb.0:
819; CHECK-NEXT:    ldr w8, [x0]
820; CHECK-NEXT:    neg w9, w2
821; CHECK-NEXT:    lsr w8, w8, w1
822; CHECK-NEXT:    lsl w8, w8, w9
823; CHECK-NEXT:    lsr w0, w8, w9
824; CHECK-NEXT:    ret
825  %val = load i32, i32* %w
826  %shifted = lshr i32 %val, %numskipbits
827  %numhighbits = sub i32 32, %numlowbits
828  %highbitscleared = shl i32 %shifted, %numhighbits
829  %masked = lshr i32 %highbitscleared, %numhighbits
830  ret i32 %masked
831}
832
833define i32 @bextr32_d3_load_indexzext(i32* %w, i8 %numskipbits, i8 %numlowbits) nounwind {
834; CHECK-LABEL: bextr32_d3_load_indexzext:
835; CHECK:       // %bb.0:
836; CHECK-NEXT:    ldr w8, [x0]
837; CHECK-NEXT:    mov w9, #32
838; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
839; CHECK-NEXT:    sub w9, w9, w2
840; CHECK-NEXT:    lsr w8, w8, w1
841; CHECK-NEXT:    lsl w8, w8, w9
842; CHECK-NEXT:    lsr w0, w8, w9
843; CHECK-NEXT:    ret
844  %val = load i32, i32* %w
845  %skip = zext i8 %numskipbits to i32
846  %shifted = lshr i32 %val, %skip
847  %numhighbits = sub i8 32, %numlowbits
848  %sh_prom = zext i8 %numhighbits to i32
849  %highbitscleared = shl i32 %shifted, %sh_prom
850  %masked = lshr i32 %highbitscleared, %sh_prom
851  ret i32 %masked
852}
853
854; 64-bit.
855
856define i64 @bextr64_d0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
857; CHECK-LABEL: bextr64_d0:
858; CHECK:       // %bb.0:
859; CHECK-NEXT:    lsr x8, x0, x1
860; CHECK-NEXT:    neg x9, x2
861; CHECK-NEXT:    lsl x8, x8, x9
862; CHECK-NEXT:    lsr x0, x8, x9
863; CHECK-NEXT:    ret
864  %shifted = lshr i64 %val, %numskipbits
865  %numhighbits = sub i64 64, %numlowbits
866  %highbitscleared = shl i64 %shifted, %numhighbits
867  %masked = lshr i64 %highbitscleared, %numhighbits
868  ret i64 %masked
869}
870
871define i64 @bextr64_d1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
872; CHECK-LABEL: bextr64_d1_indexzext:
873; CHECK:       // %bb.0:
874; CHECK-NEXT:    mov w9, #64
875; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
876; CHECK-NEXT:    lsr x8, x0, x1
877; CHECK-NEXT:    sub w9, w9, w2
878; CHECK-NEXT:    lsl x8, x8, x9
879; CHECK-NEXT:    lsr x0, x8, x9
880; CHECK-NEXT:    ret
881  %skip = zext i8 %numskipbits to i64
882  %shifted = lshr i64 %val, %skip
883  %numhighbits = sub i8 64, %numlowbits
884  %sh_prom = zext i8 %numhighbits to i64
885  %highbitscleared = shl i64 %shifted, %sh_prom
886  %masked = lshr i64 %highbitscleared, %sh_prom
887  ret i64 %masked
888}
889
890define i64 @bextr64_d2_load(i64* %w, i64 %numskipbits, i64 %numlowbits) nounwind {
891; CHECK-LABEL: bextr64_d2_load:
892; CHECK:       // %bb.0:
893; CHECK-NEXT:    ldr x8, [x0]
894; CHECK-NEXT:    neg x9, x2
895; CHECK-NEXT:    lsr x8, x8, x1
896; CHECK-NEXT:    lsl x8, x8, x9
897; CHECK-NEXT:    lsr x0, x8, x9
898; CHECK-NEXT:    ret
899  %val = load i64, i64* %w
900  %shifted = lshr i64 %val, %numskipbits
901  %numhighbits = sub i64 64, %numlowbits
902  %highbitscleared = shl i64 %shifted, %numhighbits
903  %masked = lshr i64 %highbitscleared, %numhighbits
904  ret i64 %masked
905}
906
907define i64 @bextr64_d3_load_indexzext(i64* %w, i8 %numskipbits, i8 %numlowbits) nounwind {
908; CHECK-LABEL: bextr64_d3_load_indexzext:
909; CHECK:       // %bb.0:
910; CHECK-NEXT:    ldr x8, [x0]
911; CHECK-NEXT:    mov w9, #64
912; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
913; CHECK-NEXT:    sub w9, w9, w2
914; CHECK-NEXT:    lsr x8, x8, x1
915; CHECK-NEXT:    lsl x8, x8, x9
916; CHECK-NEXT:    lsr x0, x8, x9
917; CHECK-NEXT:    ret
918  %val = load i64, i64* %w
919  %skip = zext i8 %numskipbits to i64
920  %shifted = lshr i64 %val, %skip
921  %numhighbits = sub i8 64, %numlowbits
922  %sh_prom = zext i8 %numhighbits to i64
923  %highbitscleared = shl i64 %shifted, %sh_prom
924  %masked = lshr i64 %highbitscleared, %sh_prom
925  ret i64 %masked
926}
927
928; 64-bit, but with 32-bit output
929
930; Everything done in 64-bit, truncation happens last.
931define i32 @bextr64_32_d0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
932; CHECK-LABEL: bextr64_32_d0:
933; CHECK:       // %bb.0:
934; CHECK-NEXT:    lsr x8, x0, x1
935; CHECK-NEXT:    neg x9, x2
936; CHECK-NEXT:    lsl x8, x8, x9
937; CHECK-NEXT:    lsr x0, x8, x9
938; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
939; CHECK-NEXT:    ret
940  %shifted = lshr i64 %val, %numskipbits
941  %numhighbits = sub i64 64, %numlowbits
942  %highbitscleared = shl i64 %shifted, %numhighbits
943  %masked = lshr i64 %highbitscleared, %numhighbits
944  %res = trunc i64 %masked to i32
945  ret i32 %res
946}
947
948; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
949define i32 @bextr64_32_d1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
950; CHECK-LABEL: bextr64_32_d1:
951; CHECK:       // %bb.0:
952; CHECK-NEXT:    lsr x8, x0, x1
953; CHECK-NEXT:    neg w9, w2
954; CHECK-NEXT:    lsl w8, w8, w9
955; CHECK-NEXT:    lsr w0, w8, w9
956; CHECK-NEXT:    ret
957  %shifted = lshr i64 %val, %numskipbits
958  %truncshifted = trunc i64 %shifted to i32
959  %numhighbits = sub i32 32, %numlowbits
960  %highbitscleared = shl i32 %truncshifted, %numhighbits
961  %masked = lshr i32 %highbitscleared, %numhighbits
962  ret i32 %masked
963}
964
965; ---------------------------------------------------------------------------- ;
966; Constant
967; ---------------------------------------------------------------------------- ;
968
969; https://bugs.llvm.org/show_bug.cgi?id=38938
970define void @pr38938(i32* %a0, i64* %a1) nounwind {
971; CHECK-LABEL: pr38938:
972; CHECK:       // %bb.0:
973; CHECK-NEXT:    ldr x8, [x1]
974; CHECK-NEXT:    ubfx x8, x8, #21, #10
975; CHECK-NEXT:    lsl x8, x8, #2
976; CHECK-NEXT:    ldr w9, [x0, x8]
977; CHECK-NEXT:    add w9, w9, #1 // =1
978; CHECK-NEXT:    str w9, [x0, x8]
979; CHECK-NEXT:    ret
980  %tmp = load i64, i64* %a1, align 8
981  %tmp1 = lshr i64 %tmp, 21
982  %tmp2 = and i64 %tmp1, 1023
983  %tmp3 = getelementptr inbounds i32, i32* %a0, i64 %tmp2
984  %tmp4 = load i32, i32* %tmp3, align 4
985  %tmp5 = add nsw i32 %tmp4, 1
986  store i32 %tmp5, i32* %tmp3, align 4
987  ret void
988}
989
990; The most canonical variant
991define i32 @c0_i32(i32 %arg) nounwind {
992; CHECK-LABEL: c0_i32:
993; CHECK:       // %bb.0:
994; CHECK-NEXT:    ubfx w0, w0, #19, #10
995; CHECK-NEXT:    ret
996  %tmp0 = lshr i32 %arg, 19
997  %tmp1 = and i32 %tmp0, 1023
998  ret i32 %tmp1
999}
1000
1001; Should be still fine, but the mask is shifted
1002define i32 @c1_i32(i32 %arg) nounwind {
1003; CHECK-LABEL: c1_i32:
1004; CHECK:       // %bb.0:
1005; CHECK-NEXT:    lsr w8, w0, #19
1006; CHECK-NEXT:    and w0, w8, #0xffc
1007; CHECK-NEXT:    ret
1008  %tmp0 = lshr i32 %arg, 19
1009  %tmp1 = and i32 %tmp0, 4092
1010  ret i32 %tmp1
1011}
1012
1013; Should be still fine, but the result is shifted left afterwards
1014define i32 @c2_i32(i32 %arg) nounwind {
1015; CHECK-LABEL: c2_i32:
1016; CHECK:       // %bb.0:
1017; CHECK-NEXT:    ubfx w8, w0, #19, #10
1018; CHECK-NEXT:    lsl w0, w8, #2
1019; CHECK-NEXT:    ret
1020  %tmp0 = lshr i32 %arg, 19
1021  %tmp1 = and i32 %tmp0, 1023
1022  %tmp2 = shl i32 %tmp1, 2
1023  ret i32 %tmp2
1024}
1025
1026; The mask covers newly shifted-in bit
1027define i32 @c4_i32_bad(i32 %arg) nounwind {
1028; CHECK-LABEL: c4_i32_bad:
1029; CHECK:       // %bb.0:
1030; CHECK-NEXT:    lsr w8, w0, #19
1031; CHECK-NEXT:    and w0, w8, #0x1ffe
1032; CHECK-NEXT:    ret
1033  %tmp0 = lshr i32 %arg, 19
1034  %tmp1 = and i32 %tmp0, 16382
1035  ret i32 %tmp1
1036}
1037
1038; i64
1039
1040; The most canonical variant
1041define i64 @c0_i64(i64 %arg) nounwind {
1042; CHECK-LABEL: c0_i64:
1043; CHECK:       // %bb.0:
1044; CHECK-NEXT:    ubfx x0, x0, #51, #10
1045; CHECK-NEXT:    ret
1046  %tmp0 = lshr i64 %arg, 51
1047  %tmp1 = and i64 %tmp0, 1023
1048  ret i64 %tmp1
1049}
1050
1051; Should be still fine, but the mask is shifted
1052define i64 @c1_i64(i64 %arg) nounwind {
1053; CHECK-LABEL: c1_i64:
1054; CHECK:       // %bb.0:
1055; CHECK-NEXT:    lsr x8, x0, #51
1056; CHECK-NEXT:    and x0, x8, #0xffc
1057; CHECK-NEXT:    ret
1058  %tmp0 = lshr i64 %arg, 51
1059  %tmp1 = and i64 %tmp0, 4092
1060  ret i64 %tmp1
1061}
1062
1063; Should be still fine, but the result is shifted left afterwards
1064define i64 @c2_i64(i64 %arg) nounwind {
1065; CHECK-LABEL: c2_i64:
1066; CHECK:       // %bb.0:
1067; CHECK-NEXT:    ubfx x8, x0, #51, #10
1068; CHECK-NEXT:    lsl x0, x8, #2
1069; CHECK-NEXT:    ret
1070  %tmp0 = lshr i64 %arg, 51
1071  %tmp1 = and i64 %tmp0, 1023
1072  %tmp2 = shl i64 %tmp1, 2
1073  ret i64 %tmp2
1074}
1075
1076; The mask covers newly shifted-in bit
1077define i64 @c4_i64_bad(i64 %arg) nounwind {
1078; CHECK-LABEL: c4_i64_bad:
1079; CHECK:       // %bb.0:
1080; CHECK-NEXT:    lsr x8, x0, #51
1081; CHECK-NEXT:    and x0, x8, #0x1ffe
1082; CHECK-NEXT:    ret
1083  %tmp0 = lshr i64 %arg, 51
1084  %tmp1 = and i64 %tmp0, 16382
1085  ret i64 %tmp1
1086}
1087
1088; ---------------------------------------------------------------------------- ;
1089; Constant, storing the result afterwards.
1090; ---------------------------------------------------------------------------- ;
1091
1092; i32
1093
1094; The most canonical variant
1095define void @c5_i32(i32 %arg, i32* %ptr) nounwind {
1096; CHECK-LABEL: c5_i32:
1097; CHECK:       // %bb.0:
1098; CHECK-NEXT:    ubfx w8, w0, #19, #10
1099; CHECK-NEXT:    str w8, [x1]
1100; CHECK-NEXT:    ret
1101  %tmp0 = lshr i32 %arg, 19
1102  %tmp1 = and i32 %tmp0, 1023
1103  store i32 %tmp1, i32* %ptr
1104  ret void
1105}
1106
1107; Should be still fine, but the mask is shifted
1108define void @c6_i32(i32 %arg, i32* %ptr) nounwind {
1109; CHECK-LABEL: c6_i32:
1110; CHECK:       // %bb.0:
1111; CHECK-NEXT:    ubfx w8, w0, #19, #12
1112; CHECK-NEXT:    str w8, [x1]
1113; CHECK-NEXT:    ret
1114  %tmp0 = lshr i32 %arg, 19
1115  %tmp1 = and i32 %tmp0, 4095
1116  store i32 %tmp1, i32* %ptr
1117  ret void
1118}
1119
1120; Should be still fine, but the result is shifted left afterwards
1121define void @c7_i32(i32 %arg, i32* %ptr) nounwind {
1122; CHECK-LABEL: c7_i32:
1123; CHECK:       // %bb.0:
1124; CHECK-NEXT:    ubfx w8, w0, #19, #10
1125; CHECK-NEXT:    lsl w8, w8, #2
1126; CHECK-NEXT:    str w8, [x1]
1127; CHECK-NEXT:    ret
1128  %tmp0 = lshr i32 %arg, 19
1129  %tmp1 = and i32 %tmp0, 1023
1130  %tmp2 = shl i32 %tmp1, 2
1131  store i32 %tmp2, i32* %ptr
1132  ret void
1133}
1134
1135; i64
1136
1137; The most canonical variant
1138define void @c5_i64(i64 %arg, i64* %ptr) nounwind {
1139; CHECK-LABEL: c5_i64:
1140; CHECK:       // %bb.0:
1141; CHECK-NEXT:    ubfx x8, x0, #51, #10
1142; CHECK-NEXT:    str x8, [x1]
1143; CHECK-NEXT:    ret
1144  %tmp0 = lshr i64 %arg, 51
1145  %tmp1 = and i64 %tmp0, 1023
1146  store i64 %tmp1, i64* %ptr
1147  ret void
1148}
1149
1150; Should be still fine, but the mask is shifted
1151define void @c6_i64(i64 %arg, i64* %ptr) nounwind {
1152; CHECK-LABEL: c6_i64:
1153; CHECK:       // %bb.0:
1154; CHECK-NEXT:    ubfx x8, x0, #51, #12
1155; CHECK-NEXT:    str x8, [x1]
1156; CHECK-NEXT:    ret
1157  %tmp0 = lshr i64 %arg, 51
1158  %tmp1 = and i64 %tmp0, 4095
1159  store i64 %tmp1, i64* %ptr
1160  ret void
1161}
1162
1163; Should be still fine, but the result is shifted left afterwards
1164define void @c7_i64(i64 %arg, i64* %ptr) nounwind {
1165; CHECK-LABEL: c7_i64:
1166; CHECK:       // %bb.0:
1167; CHECK-NEXT:    ubfx x8, x0, #51, #10
1168; CHECK-NEXT:    lsl x8, x8, #2
1169; CHECK-NEXT:    str x8, [x1]
1170; CHECK-NEXT:    ret
1171  %tmp0 = lshr i64 %arg, 51
1172  %tmp1 = and i64 %tmp0, 1023
1173  %tmp2 = shl i64 %tmp1, 2
1174  store i64 %tmp2, i64* %ptr
1175  ret void
1176}
1177