• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+bmi,+bmi2 | FileCheck %s
3
4declare i8 @llvm.cttz.i8(i8, i1)
5declare i16 @llvm.cttz.i16(i16, i1)
6declare i32 @llvm.cttz.i32(i32, i1)
7declare i64 @llvm.cttz.i64(i64, i1)
8
9define i8 @t1(i8 %x)   {
10; CHECK-LABEL: t1:
11; CHECK:       # BB#0:
12; CHECK-NEXT:    movzbl %dil, %eax
13; CHECK-NEXT:    orl $256, %eax # imm = 0x100
14; CHECK-NEXT:    tzcntl %eax, %eax
15; CHECK-NEXT:    # kill: %AL<def> %AL<kill> %EAX<kill>
16; CHECK-NEXT:    retq
17  %tmp = tail call i8 @llvm.cttz.i8( i8 %x, i1 false )
18  ret i8 %tmp
19}
20
21define i16 @t2(i16 %x)   {
22; CHECK-LABEL: t2:
23; CHECK:       # BB#0:
24; CHECK-NEXT:    tzcntw %di, %ax
25; CHECK-NEXT:    retq
26  %tmp = tail call i16 @llvm.cttz.i16( i16 %x, i1 false )
27  ret i16 %tmp
28}
29
30define i32 @t3(i32 %x)   {
31; CHECK-LABEL: t3:
32; CHECK:       # BB#0:
33; CHECK-NEXT:    tzcntl %edi, %eax
34; CHECK-NEXT:    retq
35  %tmp = tail call i32 @llvm.cttz.i32( i32 %x, i1 false )
36  ret i32 %tmp
37}
38
39define i32 @tzcnt32_load(i32* %x)   {
40; CHECK-LABEL: tzcnt32_load:
41; CHECK:       # BB#0:
42; CHECK-NEXT:    tzcntl (%rdi), %eax
43; CHECK-NEXT:    retq
44  %x1 = load i32, i32* %x
45  %tmp = tail call i32 @llvm.cttz.i32(i32 %x1, i1 false )
46  ret i32 %tmp
47}
48
49define i64 @t4(i64 %x)   {
50; CHECK-LABEL: t4:
51; CHECK:       # BB#0:
52; CHECK-NEXT:    tzcntq %rdi, %rax
53; CHECK-NEXT:    retq
54  %tmp = tail call i64 @llvm.cttz.i64( i64 %x, i1 false )
55  ret i64 %tmp
56}
57
58define i8 @t5(i8 %x)   {
59; CHECK-LABEL: t5:
60; CHECK:       # BB#0:
61; CHECK-NEXT:    movzbl %dil, %eax
62; CHECK-NEXT:    tzcntl %eax, %eax
63; CHECK-NEXT:    # kill: %AL<def> %AL<kill> %EAX<kill>
64; CHECK-NEXT:    retq
65  %tmp = tail call i8 @llvm.cttz.i8( i8 %x, i1 true )
66  ret i8 %tmp
67}
68
69define i16 @t6(i16 %x)   {
70; CHECK-LABEL: t6:
71; CHECK:       # BB#0:
72; CHECK-NEXT:    tzcntw %di, %ax
73; CHECK-NEXT:    retq
74  %tmp = tail call i16 @llvm.cttz.i16( i16 %x, i1 true )
75  ret i16 %tmp
76}
77
78define i32 @t7(i32 %x)   {
79; CHECK-LABEL: t7:
80; CHECK:       # BB#0:
81; CHECK-NEXT:    tzcntl %edi, %eax
82; CHECK-NEXT:    retq
83  %tmp = tail call i32 @llvm.cttz.i32( i32 %x, i1 true )
84  ret i32 %tmp
85}
86
87define i64 @t8(i64 %x)   {
88; CHECK-LABEL: t8:
89; CHECK:       # BB#0:
90; CHECK-NEXT:    tzcntq %rdi, %rax
91; CHECK-NEXT:    retq
92  %tmp = tail call i64 @llvm.cttz.i64( i64 %x, i1 true )
93  ret i64 %tmp
94}
95
96define i32 @andn32(i32 %x, i32 %y)   {
97; CHECK-LABEL: andn32:
98; CHECK:       # BB#0:
99; CHECK-NEXT:    andnl %esi, %edi, %eax
100; CHECK-NEXT:    retq
101  %tmp1 = xor i32 %x, -1
102  %tmp2 = and i32 %y, %tmp1
103  ret i32 %tmp2
104}
105
106define i32 @andn32_load(i32 %x, i32* %y)   {
107; CHECK-LABEL: andn32_load:
108; CHECK:       # BB#0:
109; CHECK-NEXT:    andnl (%rsi), %edi, %eax
110; CHECK-NEXT:    retq
111  %y1 = load i32, i32* %y
112  %tmp1 = xor i32 %x, -1
113  %tmp2 = and i32 %y1, %tmp1
114  ret i32 %tmp2
115}
116
117define i64 @andn64(i64 %x, i64 %y)   {
118; CHECK-LABEL: andn64:
119; CHECK:       # BB#0:
120; CHECK-NEXT:    andnq %rsi, %rdi, %rax
121; CHECK-NEXT:    retq
122  %tmp1 = xor i64 %x, -1
123  %tmp2 = and i64 %tmp1, %y
124  ret i64 %tmp2
125}
126
127; Don't choose a 'test' if an 'andn' can be used.
128define i1 @andn_cmp(i32 %x, i32 %y) {
129; CHECK-LABEL: andn_cmp:
130; CHECK:       # BB#0:
131; CHECK-NEXT:    andnl %esi, %edi, %eax
132; CHECK-NEXT:    sete %al
133; CHECK-NEXT:    retq
134  %notx = xor i32 %x, -1
135  %and = and i32 %notx, %y
136  %cmp = icmp eq i32 %and, 0
137  ret i1 %cmp
138}
139
140; Recognize a disguised andn in the following 4 tests.
141define i1 @and_cmp1(i32 %x, i32 %y) {
142; CHECK-LABEL: and_cmp1:
143; CHECK:       # BB#0:
144; CHECK-NEXT:    andnl %esi, %edi, %eax
145; CHECK-NEXT:    sete %al
146; CHECK-NEXT:    retq
147  %and = and i32 %x, %y
148  %cmp = icmp eq i32 %and, %y
149  ret i1 %cmp
150}
151
152define i1 @and_cmp2(i32 %x, i32 %y) {
153; CHECK-LABEL: and_cmp2:
154; CHECK:       # BB#0:
155; CHECK-NEXT:    andnl %esi, %edi, %eax
156; CHECK-NEXT:    setne %al
157; CHECK-NEXT:    retq
158  %and = and i32 %y, %x
159  %cmp = icmp ne i32 %and, %y
160  ret i1 %cmp
161}
162
163define i1 @and_cmp3(i32 %x, i32 %y) {
164; CHECK-LABEL: and_cmp3:
165; CHECK:       # BB#0:
166; CHECK-NEXT:    andnl %esi, %edi, %eax
167; CHECK-NEXT:    sete %al
168; CHECK-NEXT:    retq
169  %and = and i32 %x, %y
170  %cmp = icmp eq i32 %y, %and
171  ret i1 %cmp
172}
173
174define i1 @and_cmp4(i32 %x, i32 %y) {
175; CHECK-LABEL: and_cmp4:
176; CHECK:       # BB#0:
177; CHECK-NEXT:    andnl %esi, %edi, %eax
178; CHECK-NEXT:    setne %al
179; CHECK-NEXT:    retq
180  %and = and i32 %y, %x
181  %cmp = icmp ne i32 %y, %and
182  ret i1 %cmp
183}
184
185; A mask and compare against constant is ok for an 'andn' too
186; even though the BMI instruction doesn't have an immediate form.
187define i1 @and_cmp_const(i32 %x) {
188; CHECK-LABEL: and_cmp_const:
189; CHECK:       # BB#0:
190; CHECK-NEXT:    movl $43, %eax
191; CHECK-NEXT:    andnl %eax, %edi, %eax
192; CHECK-NEXT:    sete %al
193; CHECK-NEXT:    retq
194  %and = and i32 %x, 43
195  %cmp = icmp eq i32 %and, 43
196  ret i1 %cmp
197}
198
199; But don't use 'andn' if the mask is a power-of-two.
200define i1 @and_cmp_const_power_of_two(i32 %x, i32 %y) {
201; CHECK-LABEL: and_cmp_const_power_of_two:
202; CHECK:       # BB#0:
203; CHECK-NEXT:    btl %esi, %edi
204; CHECK-NEXT:    setae %al
205; CHECK-NEXT:    retq
206  %shl = shl i32 1, %y
207  %and = and i32 %x, %shl
208  %cmp = icmp ne i32 %and, %shl
209  ret i1 %cmp
210}
211
212; Don't transform to 'andn' if there's another use of the 'and'.
213define i32 @and_cmp_not_one_use(i32 %x) {
214; CHECK-LABEL: and_cmp_not_one_use:
215; CHECK:       # BB#0:
216; CHECK-NEXT:    andl $37, %edi
217; CHECK-NEXT:    xorl %eax, %eax
218; CHECK-NEXT:    cmpl $37, %edi
219; CHECK-NEXT:    sete %al
220; CHECK-NEXT:    addl %edi, %eax
221; CHECK-NEXT:    retq
222  %and = and i32 %x, 37
223  %cmp = icmp eq i32 %and, 37
224  %ext = zext i1 %cmp to i32
225  %add = add i32 %and, %ext
226  ret i32 %add
227}
228
229; Verify that we're not transforming invalid comparison predicates.
230define i1 @not_an_andn1(i32 %x, i32 %y) {
231; CHECK-LABEL: not_an_andn1:
232; CHECK:       # BB#0:
233; CHECK-NEXT:    andl %esi, %edi
234; CHECK-NEXT:    cmpl %edi, %esi
235; CHECK-NEXT:    setg %al
236; CHECK-NEXT:    retq
237  %and = and i32 %x, %y
238  %cmp = icmp sgt i32 %y, %and
239  ret i1 %cmp
240}
241
242define i1 @not_an_andn2(i32 %x, i32 %y) {
243; CHECK-LABEL: not_an_andn2:
244; CHECK:       # BB#0:
245; CHECK-NEXT:    andl %esi, %edi
246; CHECK-NEXT:    cmpl %edi, %esi
247; CHECK-NEXT:    setbe %al
248; CHECK-NEXT:    retq
249  %and = and i32 %y, %x
250  %cmp = icmp ule i32 %y, %and
251  ret i1 %cmp
252}
253
254; Don't choose a 'test' if an 'andn' can be used.
255define i1 @andn_cmp_swap_ops(i64 %x, i64 %y) {
256; CHECK-LABEL: andn_cmp_swap_ops:
257; CHECK:       # BB#0:
258; CHECK-NEXT:    andnq %rsi, %rdi, %rax
259; CHECK-NEXT:    sete %al
260; CHECK-NEXT:    retq
261  %notx = xor i64 %x, -1
262  %and = and i64 %y, %notx
263  %cmp = icmp eq i64 %and, 0
264  ret i1 %cmp
265}
266
267; Use a 'test' (not an 'and') because 'andn' only works for i32/i64.
268define i1 @andn_cmp_i8(i8 %x, i8 %y) {
269; CHECK-LABEL: andn_cmp_i8:
270; CHECK:       # BB#0:
271; CHECK-NEXT:    notb %sil
272; CHECK-NEXT:    testb %sil, %dil
273; CHECK-NEXT:    sete %al
274; CHECK-NEXT:    retq
275  %noty = xor i8 %y, -1
276  %and = and i8 %x, %noty
277  %cmp = icmp eq i8 %and, 0
278  ret i1 %cmp
279}
280
281define i32 @bextr32(i32 %x, i32 %y)   {
282; CHECK-LABEL: bextr32:
283; CHECK:       # BB#0:
284; CHECK-NEXT:    bextrl %esi, %edi, %eax
285; CHECK-NEXT:    retq
286  %tmp = tail call i32 @llvm.x86.bmi.bextr.32(i32 %x, i32 %y)
287  ret i32 %tmp
288}
289
290define i32 @bextr32_load(i32* %x, i32 %y)   {
291; CHECK-LABEL: bextr32_load:
292; CHECK:       # BB#0:
293; CHECK-NEXT:    bextrl %esi, (%rdi), %eax
294; CHECK-NEXT:    retq
295  %x1 = load i32, i32* %x
296  %tmp = tail call i32 @llvm.x86.bmi.bextr.32(i32 %x1, i32 %y)
297  ret i32 %tmp
298}
299
300declare i32 @llvm.x86.bmi.bextr.32(i32, i32)
301
302define i32 @bextr32b(i32 %x)  uwtable  ssp {
303; CHECK-LABEL: bextr32b:
304; CHECK:       # BB#0:
305; CHECK-NEXT:    movl $3076, %eax # imm = 0xC04
306; CHECK-NEXT:    bextrl %eax, %edi, %eax
307; CHECK-NEXT:    retq
308  %1 = lshr i32 %x, 4
309  %2 = and i32 %1, 4095
310  ret i32 %2
311}
312
313define i32 @bextr32b_load(i32* %x)  uwtable  ssp {
314; CHECK-LABEL: bextr32b_load:
315; CHECK:       # BB#0:
316; CHECK-NEXT:    movl $3076, %eax # imm = 0xC04
317; CHECK-NEXT:    bextrl %eax, (%rdi), %eax
318; CHECK-NEXT:    retq
319  %1 = load i32, i32* %x
320  %2 = lshr i32 %1, 4
321  %3 = and i32 %2, 4095
322  ret i32 %3
323}
324
325define i64 @bextr64(i64 %x, i64 %y)   {
326; CHECK-LABEL: bextr64:
327; CHECK:       # BB#0:
328; CHECK-NEXT:    bextrq %rsi, %rdi, %rax
329; CHECK-NEXT:    retq
330  %tmp = tail call i64 @llvm.x86.bmi.bextr.64(i64 %x, i64 %y)
331  ret i64 %tmp
332}
333
334declare i64 @llvm.x86.bmi.bextr.64(i64, i64)
335
336define i64 @bextr64b(i64 %x)  uwtable  ssp {
337; CHECK-LABEL: bextr64b:
338; CHECK:       # BB#0:
339; CHECK-NEXT:    movl $3076, %eax # imm = 0xC04
340; CHECK-NEXT:    bextrl %eax, %edi, %eax
341; CHECK-NEXT:    retq
342  %1 = lshr i64 %x, 4
343  %2 = and i64 %1, 4095
344  ret i64 %2
345}
346
347define i64 @bextr64b_load(i64* %x) {
348; CHECK-LABEL: bextr64b_load:
349; CHECK:       # BB#0:
350; CHECK-NEXT:    movl $3076, %eax # imm = 0xC04
351; CHECK-NEXT:    bextrl %eax, (%rdi), %eax
352; CHECK-NEXT:    retq
353  %1 = load i64, i64* %x, align 8
354  %2 = lshr i64 %1, 4
355  %3 = and i64 %2, 4095
356  ret i64 %3
357}
358
359define i32 @non_bextr32(i32 %x) {
360; CHECK-LABEL: non_bextr32:
361; CHECK:       # BB#0: # %entry
362; CHECK-NEXT:    shrl $2, %edi
363; CHECK-NEXT:    andl $111, %edi
364; CHECK-NEXT:    movl %edi, %eax
365; CHECK-NEXT:    retq
366entry:
367  %shr = lshr i32 %x, 2
368  %and = and i32 %shr, 111
369  ret i32 %and
370}
371
372define i64 @non_bextr64(i64 %x) {
373; CHECK-LABEL: non_bextr64:
374; CHECK:       # BB#0: # %entry
375; CHECK-NEXT:    shrq $2, %rdi
376; CHECK-NEXT:    movabsq $8589934590, %rax # imm = 0x1FFFFFFFE
377; CHECK-NEXT:    andq %rdi, %rax
378; CHECK-NEXT:    retq
379entry:
380  %shr = lshr i64 %x, 2
381  %and = and i64 %shr, 8589934590
382  ret i64 %and
383}
384
385define i32 @bzhi32(i32 %x, i32 %y)   {
386; CHECK-LABEL: bzhi32:
387; CHECK:       # BB#0:
388; CHECK-NEXT:    bzhil %esi, %edi, %eax
389; CHECK-NEXT:    retq
390  %tmp = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %x, i32 %y)
391  ret i32 %tmp
392}
393
394define i32 @bzhi32_load(i32* %x, i32 %y)   {
395; CHECK-LABEL: bzhi32_load:
396; CHECK:       # BB#0:
397; CHECK-NEXT:    bzhil %esi, (%rdi), %eax
398; CHECK-NEXT:    retq
399  %x1 = load i32, i32* %x
400  %tmp = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %x1, i32 %y)
401  ret i32 %tmp
402}
403
404declare i32 @llvm.x86.bmi.bzhi.32(i32, i32)
405
406define i64 @bzhi64(i64 %x, i64 %y)   {
407; CHECK-LABEL: bzhi64:
408; CHECK:       # BB#0:
409; CHECK-NEXT:    bzhiq %rsi, %rdi, %rax
410; CHECK-NEXT:    retq
411  %tmp = tail call i64 @llvm.x86.bmi.bzhi.64(i64 %x, i64 %y)
412  ret i64 %tmp
413}
414
415declare i64 @llvm.x86.bmi.bzhi.64(i64, i64)
416
417define i32 @bzhi32b(i32 %x, i8 zeroext %index) {
418; CHECK-LABEL: bzhi32b:
419; CHECK:       # BB#0: # %entry
420; CHECK-NEXT:    bzhil %esi, %edi, %eax
421; CHECK-NEXT:    retq
422entry:
423  %conv = zext i8 %index to i32
424  %shl = shl i32 1, %conv
425  %sub = add nsw i32 %shl, -1
426  %and = and i32 %sub, %x
427  ret i32 %and
428}
429
430define i32 @bzhi32b_load(i32* %w, i8 zeroext %index) {
431; CHECK-LABEL: bzhi32b_load:
432; CHECK:       # BB#0: # %entry
433; CHECK-NEXT:    bzhil %esi, (%rdi), %eax
434; CHECK-NEXT:    retq
435entry:
436  %x = load i32, i32* %w
437  %conv = zext i8 %index to i32
438  %shl = shl i32 1, %conv
439  %sub = add nsw i32 %shl, -1
440  %and = and i32 %sub, %x
441  ret i32 %and
442}
443
444define i32 @bzhi32c(i32 %x, i8 zeroext %index) {
445; CHECK-LABEL: bzhi32c:
446; CHECK:       # BB#0: # %entry
447; CHECK-NEXT:    bzhil %esi, %edi, %eax
448; CHECK-NEXT:    retq
449entry:
450  %conv = zext i8 %index to i32
451  %shl = shl i32 1, %conv
452  %sub = add nsw i32 %shl, -1
453  %and = and i32 %x, %sub
454  ret i32 %and
455}
456
457define i64 @bzhi64b(i64 %x, i8 zeroext %index) {
458; CHECK-LABEL: bzhi64b:
459; CHECK:       # BB#0: # %entry
460; CHECK-NEXT:    # kill: %ESI<def> %ESI<kill> %RSI<def>
461; CHECK-NEXT:    bzhiq %rsi, %rdi, %rax
462; CHECK-NEXT:    retq
463entry:
464  %conv = zext i8 %index to i64
465  %shl = shl i64 1, %conv
466  %sub = add nsw i64 %shl, -1
467  %and = and i64 %x, %sub
468  ret i64 %and
469}
470
471define i64 @bzhi64_constant_mask(i64 %x) {
472; CHECK-LABEL: bzhi64_constant_mask:
473; CHECK:       # BB#0: # %entry
474; CHECK-NEXT:    movb $62, %al
475; CHECK-NEXT:    bzhiq %rax, %rdi, %rax
476; CHECK-NEXT:    retq
477entry:
478  %and = and i64 %x, 4611686018427387903
479  ret i64 %and
480}
481
482define i64 @bzhi64_small_constant_mask(i64 %x) {
483; CHECK-LABEL: bzhi64_small_constant_mask:
484; CHECK:       # BB#0: # %entry
485; CHECK-NEXT:    andl $2147483647, %edi # imm = 0x7FFFFFFF
486; CHECK-NEXT:    movq %rdi, %rax
487; CHECK-NEXT:    retq
488entry:
489  %and = and i64 %x, 2147483647
490  ret i64 %and
491}
492
493define i32 @blsi32(i32 %x)   {
494; CHECK-LABEL: blsi32:
495; CHECK:       # BB#0:
496; CHECK-NEXT:    blsil %edi, %eax
497; CHECK-NEXT:    retq
498  %tmp = sub i32 0, %x
499  %tmp2 = and i32 %x, %tmp
500  ret i32 %tmp2
501}
502
503define i32 @blsi32_load(i32* %x)   {
504; CHECK-LABEL: blsi32_load:
505; CHECK:       # BB#0:
506; CHECK-NEXT:    blsil (%rdi), %eax
507; CHECK-NEXT:    retq
508  %x1 = load i32, i32* %x
509  %tmp = sub i32 0, %x1
510  %tmp2 = and i32 %x1, %tmp
511  ret i32 %tmp2
512}
513
514define i64 @blsi64(i64 %x)   {
515; CHECK-LABEL: blsi64:
516; CHECK:       # BB#0:
517; CHECK-NEXT:    blsiq %rdi, %rax
518; CHECK-NEXT:    retq
519  %tmp = sub i64 0, %x
520  %tmp2 = and i64 %tmp, %x
521  ret i64 %tmp2
522}
523
524define i32 @blsmsk32(i32 %x)   {
525; CHECK-LABEL: blsmsk32:
526; CHECK:       # BB#0:
527; CHECK-NEXT:    blsmskl %edi, %eax
528; CHECK-NEXT:    retq
529  %tmp = sub i32 %x, 1
530  %tmp2 = xor i32 %x, %tmp
531  ret i32 %tmp2
532}
533
534define i32 @blsmsk32_load(i32* %x)   {
535; CHECK-LABEL: blsmsk32_load:
536; CHECK:       # BB#0:
537; CHECK-NEXT:    blsmskl (%rdi), %eax
538; CHECK-NEXT:    retq
539  %x1 = load i32, i32* %x
540  %tmp = sub i32 %x1, 1
541  %tmp2 = xor i32 %x1, %tmp
542  ret i32 %tmp2
543}
544
545define i64 @blsmsk64(i64 %x)   {
546; CHECK-LABEL: blsmsk64:
547; CHECK:       # BB#0:
548; CHECK-NEXT:    blsmskq %rdi, %rax
549; CHECK-NEXT:    retq
550  %tmp = sub i64 %x, 1
551  %tmp2 = xor i64 %tmp, %x
552  ret i64 %tmp2
553}
554
555define i32 @blsr32(i32 %x)   {
556; CHECK-LABEL: blsr32:
557; CHECK:       # BB#0:
558; CHECK-NEXT:    blsrl %edi, %eax
559; CHECK-NEXT:    retq
560  %tmp = sub i32 %x, 1
561  %tmp2 = and i32 %x, %tmp
562  ret i32 %tmp2
563}
564
565define i32 @blsr32_load(i32* %x)   {
566; CHECK-LABEL: blsr32_load:
567; CHECK:       # BB#0:
568; CHECK-NEXT:    blsrl (%rdi), %eax
569; CHECK-NEXT:    retq
570  %x1 = load i32, i32* %x
571  %tmp = sub i32 %x1, 1
572  %tmp2 = and i32 %x1, %tmp
573  ret i32 %tmp2
574}
575
576define i64 @blsr64(i64 %x)   {
577; CHECK-LABEL: blsr64:
578; CHECK:       # BB#0:
579; CHECK-NEXT:    blsrq %rdi, %rax
580; CHECK-NEXT:    retq
581  %tmp = sub i64 %x, 1
582  %tmp2 = and i64 %tmp, %x
583  ret i64 %tmp2
584}
585
586define i32 @pdep32(i32 %x, i32 %y)   {
587; CHECK-LABEL: pdep32:
588; CHECK:       # BB#0:
589; CHECK-NEXT:    pdepl %esi, %edi, %eax
590; CHECK-NEXT:    retq
591  %tmp = tail call i32 @llvm.x86.bmi.pdep.32(i32 %x, i32 %y)
592  ret i32 %tmp
593}
594
595define i32 @pdep32_load(i32 %x, i32* %y)   {
596; CHECK-LABEL: pdep32_load:
597; CHECK:       # BB#0:
598; CHECK-NEXT:    pdepl (%rsi), %edi, %eax
599; CHECK-NEXT:    retq
600  %y1 = load i32, i32* %y
601  %tmp = tail call i32 @llvm.x86.bmi.pdep.32(i32 %x, i32 %y1)
602  ret i32 %tmp
603}
604
605declare i32 @llvm.x86.bmi.pdep.32(i32, i32)
606
607define i64 @pdep64(i64 %x, i64 %y)   {
608; CHECK-LABEL: pdep64:
609; CHECK:       # BB#0:
610; CHECK-NEXT:    pdepq %rsi, %rdi, %rax
611; CHECK-NEXT:    retq
612  %tmp = tail call i64 @llvm.x86.bmi.pdep.64(i64 %x, i64 %y)
613  ret i64 %tmp
614}
615
616declare i64 @llvm.x86.bmi.pdep.64(i64, i64)
617
618define i32 @pext32(i32 %x, i32 %y)   {
619; CHECK-LABEL: pext32:
620; CHECK:       # BB#0:
621; CHECK-NEXT:    pextl %esi, %edi, %eax
622; CHECK-NEXT:    retq
623  %tmp = tail call i32 @llvm.x86.bmi.pext.32(i32 %x, i32 %y)
624  ret i32 %tmp
625}
626
627define i32 @pext32_load(i32 %x, i32* %y)   {
628; CHECK-LABEL: pext32_load:
629; CHECK:       # BB#0:
630; CHECK-NEXT:    pextl (%rsi), %edi, %eax
631; CHECK-NEXT:    retq
632  %y1 = load i32, i32* %y
633  %tmp = tail call i32 @llvm.x86.bmi.pext.32(i32 %x, i32 %y1)
634  ret i32 %tmp
635}
636
637declare i32 @llvm.x86.bmi.pext.32(i32, i32)
638
639define i64 @pext64(i64 %x, i64 %y)   {
640; CHECK-LABEL: pext64:
641; CHECK:       # BB#0:
642; CHECK-NEXT:    pextq %rsi, %rdi, %rax
643; CHECK-NEXT:    retq
644  %tmp = tail call i64 @llvm.x86.bmi.pext.64(i64 %x, i64 %y)
645  ret i64 %tmp
646}
647
648declare i64 @llvm.x86.bmi.pext.64(i64, i64)
649
650