1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s --check-prefix=X86 3; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64 4 5; These tests just check that the plumbing is in place for @llvm.bitreverse. The 6; actual output is massive at the moment as llvm.bitreverse is not yet legal. 7 8declare <2 x i16> @llvm.bitreverse.v2i16(<2 x i16>) readnone 9 10define <2 x i16> @test_bitreverse_v2i16(<2 x i16> %a) nounwind { 11; X86-LABEL: test_bitreverse_v2i16: 12; X86: # %bb.0: 13; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx 14; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax 15; X86-NEXT: rolw $8, %ax 16; X86-NEXT: movl %eax, %edx 17; X86-NEXT: andl $3855, %edx # imm = 0xF0F 18; X86-NEXT: shll $4, %edx 19; X86-NEXT: andl $61680, %eax # imm = 0xF0F0 20; X86-NEXT: shrl $4, %eax 21; X86-NEXT: orl %edx, %eax 22; X86-NEXT: movl %eax, %edx 23; X86-NEXT: andl $13107, %edx # imm = 0x3333 24; X86-NEXT: andl $52428, %eax # imm = 0xCCCC 25; X86-NEXT: shrl $2, %eax 26; X86-NEXT: leal (%eax,%edx,4), %eax 27; X86-NEXT: movl %eax, %edx 28; X86-NEXT: andl $21845, %edx # imm = 0x5555 29; X86-NEXT: andl $43690, %eax # imm = 0xAAAA 30; X86-NEXT: shrl %eax 31; X86-NEXT: leal (%eax,%edx,2), %eax 32; X86-NEXT: rolw $8, %cx 33; X86-NEXT: movl %ecx, %edx 34; X86-NEXT: andl $3855, %edx # imm = 0xF0F 35; X86-NEXT: shll $4, %edx 36; X86-NEXT: andl $61680, %ecx # imm = 0xF0F0 37; X86-NEXT: shrl $4, %ecx 38; X86-NEXT: orl %edx, %ecx 39; X86-NEXT: movl %ecx, %edx 40; X86-NEXT: andl $13107, %edx # imm = 0x3333 41; X86-NEXT: andl $52428, %ecx # imm = 0xCCCC 42; X86-NEXT: shrl $2, %ecx 43; X86-NEXT: leal (%ecx,%edx,4), %ecx 44; X86-NEXT: movl %ecx, %edx 45; X86-NEXT: andl $21845, %edx # imm = 0x5555 46; X86-NEXT: andl $43690, %ecx # imm = 0xAAAA 47; X86-NEXT: shrl %ecx 48; X86-NEXT: leal (%ecx,%edx,2), %edx 49; X86-NEXT: # kill: def $ax killed $ax killed $eax 50; X86-NEXT: # kill: def $dx killed $dx killed $edx 51; X86-NEXT: retl 52; 53; X64-LABEL: test_bitreverse_v2i16: 54; X64: # %bb.0: 55; X64-NEXT: pxor %xmm1, %xmm1 56; X64-NEXT: movdqa %xmm0, %xmm2 57; X64-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] 58; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] 59; X64-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7] 60; X64-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4] 61; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] 62; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] 63; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] 64; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4] 65; X64-NEXT: packuswb %xmm2, %xmm0 66; X64-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] 67; X64-NEXT: movdqa %xmm0, %xmm2 68; X64-NEXT: pand %xmm1, %xmm2 69; X64-NEXT: psllw $4, %xmm2 70; X64-NEXT: movdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] 71; X64-NEXT: pand %xmm3, %xmm2 72; X64-NEXT: pand %xmm3, %xmm0 73; X64-NEXT: psrlw $4, %xmm0 74; X64-NEXT: pand %xmm1, %xmm0 75; X64-NEXT: por %xmm2, %xmm0 76; X64-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] 77; X64-NEXT: pand %xmm0, %xmm1 78; X64-NEXT: psllw $2, %xmm1 79; X64-NEXT: pand {{.*}}(%rip), %xmm1 80; X64-NEXT: pand {{.*}}(%rip), %xmm0 81; X64-NEXT: psrlw $2, %xmm0 82; X64-NEXT: pand {{.*}}(%rip), %xmm0 83; X64-NEXT: por %xmm1, %xmm0 84; X64-NEXT: movdqa {{.*#+}} xmm1 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] 85; X64-NEXT: pand %xmm0, %xmm1 86; X64-NEXT: paddb %xmm1, %xmm1 87; X64-NEXT: pand {{.*}}(%rip), %xmm0 88; X64-NEXT: psrlw $1, %xmm0 89; X64-NEXT: pand {{.*}}(%rip), %xmm0 90; X64-NEXT: por %xmm1, %xmm0 91; X64-NEXT: psrlq $48, %xmm0 92; X64-NEXT: retq 93 %b = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> %a) 94 ret <2 x i16> %b 95} 96 97declare i64 @llvm.bitreverse.i64(i64) readnone 98 99define i64 @test_bitreverse_i64(i64 %a) nounwind { 100; X86-LABEL: test_bitreverse_i64: 101; X86: # %bb.0: 102; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 103; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 104; X86-NEXT: bswapl %eax 105; X86-NEXT: movl %eax, %edx 106; X86-NEXT: andl $252645135, %edx # imm = 0xF0F0F0F 107; X86-NEXT: shll $4, %edx 108; X86-NEXT: andl $-252645136, %eax # imm = 0xF0F0F0F0 109; X86-NEXT: shrl $4, %eax 110; X86-NEXT: orl %edx, %eax 111; X86-NEXT: movl %eax, %edx 112; X86-NEXT: andl $858993459, %edx # imm = 0x33333333 113; X86-NEXT: andl $-858993460, %eax # imm = 0xCCCCCCCC 114; X86-NEXT: shrl $2, %eax 115; X86-NEXT: leal (%eax,%edx,4), %eax 116; X86-NEXT: movl %eax, %edx 117; X86-NEXT: andl $1431655765, %edx # imm = 0x55555555 118; X86-NEXT: andl $-1431655766, %eax # imm = 0xAAAAAAAA 119; X86-NEXT: shrl %eax 120; X86-NEXT: leal (%eax,%edx,2), %eax 121; X86-NEXT: bswapl %ecx 122; X86-NEXT: movl %ecx, %edx 123; X86-NEXT: andl $252645135, %edx # imm = 0xF0F0F0F 124; X86-NEXT: shll $4, %edx 125; X86-NEXT: andl $-252645136, %ecx # imm = 0xF0F0F0F0 126; X86-NEXT: shrl $4, %ecx 127; X86-NEXT: orl %edx, %ecx 128; X86-NEXT: movl %ecx, %edx 129; X86-NEXT: andl $858993459, %edx # imm = 0x33333333 130; X86-NEXT: andl $-858993460, %ecx # imm = 0xCCCCCCCC 131; X86-NEXT: shrl $2, %ecx 132; X86-NEXT: leal (%ecx,%edx,4), %ecx 133; X86-NEXT: movl %ecx, %edx 134; X86-NEXT: andl $1431655765, %edx # imm = 0x55555555 135; X86-NEXT: andl $-1431655766, %ecx # imm = 0xAAAAAAAA 136; X86-NEXT: shrl %ecx 137; X86-NEXT: leal (%ecx,%edx,2), %edx 138; X86-NEXT: retl 139; 140; X64-LABEL: test_bitreverse_i64: 141; X64: # %bb.0: 142; X64-NEXT: bswapq %rdi 143; X64-NEXT: movabsq $1085102592571150095, %rax # imm = 0xF0F0F0F0F0F0F0F 144; X64-NEXT: andq %rdi, %rax 145; X64-NEXT: shlq $4, %rax 146; X64-NEXT: movabsq $-1085102592571150096, %rcx # imm = 0xF0F0F0F0F0F0F0F0 147; X64-NEXT: andq %rdi, %rcx 148; X64-NEXT: shrq $4, %rcx 149; X64-NEXT: orq %rax, %rcx 150; X64-NEXT: movabsq $3689348814741910323, %rax # imm = 0x3333333333333333 151; X64-NEXT: andq %rcx, %rax 152; X64-NEXT: movabsq $-3689348814741910324, %rdx # imm = 0xCCCCCCCCCCCCCCCC 153; X64-NEXT: andq %rcx, %rdx 154; X64-NEXT: shrq $2, %rdx 155; X64-NEXT: leaq (%rdx,%rax,4), %rax 156; X64-NEXT: movabsq $6148914691236517205, %rcx # imm = 0x5555555555555555 157; X64-NEXT: andq %rax, %rcx 158; X64-NEXT: movabsq $-6148914691236517206, %rdx # imm = 0xAAAAAAAAAAAAAAAA 159; X64-NEXT: andq %rax, %rdx 160; X64-NEXT: shrq %rdx 161; X64-NEXT: leaq (%rdx,%rcx,2), %rax 162; X64-NEXT: retq 163 %b = call i64 @llvm.bitreverse.i64(i64 %a) 164 ret i64 %b 165} 166 167declare i32 @llvm.bitreverse.i32(i32) readnone 168 169define i32 @test_bitreverse_i32(i32 %a) nounwind { 170; X86-LABEL: test_bitreverse_i32: 171; X86: # %bb.0: 172; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 173; X86-NEXT: bswapl %eax 174; X86-NEXT: movl %eax, %ecx 175; X86-NEXT: andl $252645135, %ecx # imm = 0xF0F0F0F 176; X86-NEXT: shll $4, %ecx 177; X86-NEXT: andl $-252645136, %eax # imm = 0xF0F0F0F0 178; X86-NEXT: shrl $4, %eax 179; X86-NEXT: orl %ecx, %eax 180; X86-NEXT: movl %eax, %ecx 181; X86-NEXT: andl $858993459, %ecx # imm = 0x33333333 182; X86-NEXT: andl $-858993460, %eax # imm = 0xCCCCCCCC 183; X86-NEXT: shrl $2, %eax 184; X86-NEXT: leal (%eax,%ecx,4), %eax 185; X86-NEXT: movl %eax, %ecx 186; X86-NEXT: andl $1431655765, %ecx # imm = 0x55555555 187; X86-NEXT: andl $-1431655766, %eax # imm = 0xAAAAAAAA 188; X86-NEXT: shrl %eax 189; X86-NEXT: leal (%eax,%ecx,2), %eax 190; X86-NEXT: retl 191; 192; X64-LABEL: test_bitreverse_i32: 193; X64: # %bb.0: 194; X64-NEXT: # kill: def $edi killed $edi def $rdi 195; X64-NEXT: bswapl %edi 196; X64-NEXT: movl %edi, %eax 197; X64-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F 198; X64-NEXT: shll $4, %eax 199; X64-NEXT: andl $-252645136, %edi # imm = 0xF0F0F0F0 200; X64-NEXT: shrl $4, %edi 201; X64-NEXT: orl %eax, %edi 202; X64-NEXT: movl %edi, %eax 203; X64-NEXT: andl $858993459, %eax # imm = 0x33333333 204; X64-NEXT: andl $-858993460, %edi # imm = 0xCCCCCCCC 205; X64-NEXT: shrl $2, %edi 206; X64-NEXT: leal (%rdi,%rax,4), %eax 207; X64-NEXT: movl %eax, %ecx 208; X64-NEXT: andl $1431655765, %ecx # imm = 0x55555555 209; X64-NEXT: andl $-1431655766, %eax # imm = 0xAAAAAAAA 210; X64-NEXT: shrl %eax 211; X64-NEXT: leal (%rax,%rcx,2), %eax 212; X64-NEXT: retq 213 %b = call i32 @llvm.bitreverse.i32(i32 %a) 214 ret i32 %b 215} 216 217declare i24 @llvm.bitreverse.i24(i24) readnone 218 219define i24 @test_bitreverse_i24(i24 %a) nounwind { 220; X86-LABEL: test_bitreverse_i24: 221; X86: # %bb.0: 222; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 223; X86-NEXT: bswapl %eax 224; X86-NEXT: movl %eax, %ecx 225; X86-NEXT: andl $252645135, %ecx # imm = 0xF0F0F0F 226; X86-NEXT: shll $4, %ecx 227; X86-NEXT: andl $-252645136, %eax # imm = 0xF0F0F0F0 228; X86-NEXT: shrl $4, %eax 229; X86-NEXT: orl %ecx, %eax 230; X86-NEXT: movl %eax, %ecx 231; X86-NEXT: andl $858993459, %ecx # imm = 0x33333333 232; X86-NEXT: andl $-858993460, %eax # imm = 0xCCCCCCCC 233; X86-NEXT: shrl $2, %eax 234; X86-NEXT: leal (%eax,%ecx,4), %eax 235; X86-NEXT: movl %eax, %ecx 236; X86-NEXT: andl $1431655680, %ecx # imm = 0x55555500 237; X86-NEXT: andl $-1431655936, %eax # imm = 0xAAAAAA00 238; X86-NEXT: shrl %eax 239; X86-NEXT: leal (%eax,%ecx,2), %eax 240; X86-NEXT: shrl $8, %eax 241; X86-NEXT: retl 242; 243; X64-LABEL: test_bitreverse_i24: 244; X64: # %bb.0: 245; X64-NEXT: # kill: def $edi killed $edi def $rdi 246; X64-NEXT: bswapl %edi 247; X64-NEXT: movl %edi, %eax 248; X64-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F 249; X64-NEXT: shll $4, %eax 250; X64-NEXT: andl $-252645136, %edi # imm = 0xF0F0F0F0 251; X64-NEXT: shrl $4, %edi 252; X64-NEXT: orl %eax, %edi 253; X64-NEXT: movl %edi, %eax 254; X64-NEXT: andl $858993459, %eax # imm = 0x33333333 255; X64-NEXT: andl $-858993460, %edi # imm = 0xCCCCCCCC 256; X64-NEXT: shrl $2, %edi 257; X64-NEXT: leal (%rdi,%rax,4), %eax 258; X64-NEXT: movl %eax, %ecx 259; X64-NEXT: andl $1431655680, %ecx # imm = 0x55555500 260; X64-NEXT: andl $-1431655936, %eax # imm = 0xAAAAAA00 261; X64-NEXT: shrl %eax 262; X64-NEXT: leal (%rax,%rcx,2), %eax 263; X64-NEXT: shrl $8, %eax 264; X64-NEXT: retq 265 %b = call i24 @llvm.bitreverse.i24(i24 %a) 266 ret i24 %b 267} 268 269declare i16 @llvm.bitreverse.i16(i16) readnone 270 271define i16 @test_bitreverse_i16(i16 %a) nounwind { 272; X86-LABEL: test_bitreverse_i16: 273; X86: # %bb.0: 274; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax 275; X86-NEXT: rolw $8, %ax 276; X86-NEXT: movl %eax, %ecx 277; X86-NEXT: andl $3855, %ecx # imm = 0xF0F 278; X86-NEXT: shll $4, %ecx 279; X86-NEXT: andl $61680, %eax # imm = 0xF0F0 280; X86-NEXT: shrl $4, %eax 281; X86-NEXT: orl %ecx, %eax 282; X86-NEXT: movl %eax, %ecx 283; X86-NEXT: andl $13107, %ecx # imm = 0x3333 284; X86-NEXT: andl $52428, %eax # imm = 0xCCCC 285; X86-NEXT: shrl $2, %eax 286; X86-NEXT: leal (%eax,%ecx,4), %eax 287; X86-NEXT: movl %eax, %ecx 288; X86-NEXT: andl $21845, %ecx # imm = 0x5555 289; X86-NEXT: andl $43690, %eax # imm = 0xAAAA 290; X86-NEXT: shrl %eax 291; X86-NEXT: leal (%eax,%ecx,2), %eax 292; X86-NEXT: # kill: def $ax killed $ax killed $eax 293; X86-NEXT: retl 294; 295; X64-LABEL: test_bitreverse_i16: 296; X64: # %bb.0: 297; X64-NEXT: # kill: def $edi killed $edi def $rdi 298; X64-NEXT: rolw $8, %di 299; X64-NEXT: movl %edi, %eax 300; X64-NEXT: andl $3855, %eax # imm = 0xF0F 301; X64-NEXT: shll $4, %eax 302; X64-NEXT: andl $61680, %edi # imm = 0xF0F0 303; X64-NEXT: shrl $4, %edi 304; X64-NEXT: orl %eax, %edi 305; X64-NEXT: movl %edi, %eax 306; X64-NEXT: andl $13107, %eax # imm = 0x3333 307; X64-NEXT: andl $52428, %edi # imm = 0xCCCC 308; X64-NEXT: shrl $2, %edi 309; X64-NEXT: leal (%rdi,%rax,4), %eax 310; X64-NEXT: movl %eax, %ecx 311; X64-NEXT: andl $21845, %ecx # imm = 0x5555 312; X64-NEXT: andl $43690, %eax # imm = 0xAAAA 313; X64-NEXT: shrl %eax 314; X64-NEXT: leal (%rax,%rcx,2), %eax 315; X64-NEXT: # kill: def $ax killed $ax killed $eax 316; X64-NEXT: retq 317 %b = call i16 @llvm.bitreverse.i16(i16 %a) 318 ret i16 %b 319} 320 321declare i8 @llvm.bitreverse.i8(i8) readnone 322 323define i8 @test_bitreverse_i8(i8 %a) { 324; X86-LABEL: test_bitreverse_i8: 325; X86: # %bb.0: 326; X86-NEXT: movb {{[0-9]+}}(%esp), %al 327; X86-NEXT: rolb $4, %al 328; X86-NEXT: movl %eax, %ecx 329; X86-NEXT: andb $51, %cl 330; X86-NEXT: shlb $2, %cl 331; X86-NEXT: andb $-52, %al 332; X86-NEXT: shrb $2, %al 333; X86-NEXT: orb %cl, %al 334; X86-NEXT: movl %eax, %ecx 335; X86-NEXT: andb $85, %cl 336; X86-NEXT: addb %cl, %cl 337; X86-NEXT: andb $-86, %al 338; X86-NEXT: shrb %al 339; X86-NEXT: orb %cl, %al 340; X86-NEXT: retl 341; 342; X64-LABEL: test_bitreverse_i8: 343; X64: # %bb.0: 344; X64-NEXT: rolb $4, %dil 345; X64-NEXT: movl %edi, %eax 346; X64-NEXT: andb $51, %al 347; X64-NEXT: shlb $2, %al 348; X64-NEXT: andb $-52, %dil 349; X64-NEXT: shrb $2, %dil 350; X64-NEXT: orb %al, %dil 351; X64-NEXT: movl %edi, %eax 352; X64-NEXT: andb $85, %al 353; X64-NEXT: addb %al, %al 354; X64-NEXT: andb $-86, %dil 355; X64-NEXT: shrb %dil 356; X64-NEXT: orb %al, %dil 357; X64-NEXT: movl %edi, %eax 358; X64-NEXT: retq 359 %b = call i8 @llvm.bitreverse.i8(i8 %a) 360 ret i8 %b 361} 362 363declare i4 @llvm.bitreverse.i4(i4) readnone 364 365define i4 @test_bitreverse_i4(i4 %a) { 366; X86-LABEL: test_bitreverse_i4: 367; X86: # %bb.0: 368; X86-NEXT: movb {{[0-9]+}}(%esp), %al 369; X86-NEXT: rolb $4, %al 370; X86-NEXT: movl %eax, %ecx 371; X86-NEXT: andb $51, %cl 372; X86-NEXT: shlb $2, %cl 373; X86-NEXT: andb $-52, %al 374; X86-NEXT: shrb $2, %al 375; X86-NEXT: orb %cl, %al 376; X86-NEXT: movl %eax, %ecx 377; X86-NEXT: andb $80, %cl 378; X86-NEXT: addb %cl, %cl 379; X86-NEXT: andb $-96, %al 380; X86-NEXT: shrb %al 381; X86-NEXT: orb %cl, %al 382; X86-NEXT: shrb $4, %al 383; X86-NEXT: retl 384; 385; X64-LABEL: test_bitreverse_i4: 386; X64: # %bb.0: 387; X64-NEXT: rolb $4, %dil 388; X64-NEXT: movl %edi, %eax 389; X64-NEXT: andb $51, %al 390; X64-NEXT: shlb $2, %al 391; X64-NEXT: andb $-52, %dil 392; X64-NEXT: shrb $2, %dil 393; X64-NEXT: orb %al, %dil 394; X64-NEXT: movl %edi, %eax 395; X64-NEXT: andb $80, %al 396; X64-NEXT: addb %al, %al 397; X64-NEXT: andb $-96, %dil 398; X64-NEXT: shrb %dil 399; X64-NEXT: orb %al, %dil 400; X64-NEXT: shrb $4, %dil 401; X64-NEXT: movl %edi, %eax 402; X64-NEXT: retq 403 %b = call i4 @llvm.bitreverse.i4(i4 %a) 404 ret i4 %b 405} 406 407; These tests check that bitreverse(constant) calls are folded 408 409define <2 x i16> @fold_v2i16() { 410; X86-LABEL: fold_v2i16: 411; X86: # %bb.0: 412; X86-NEXT: movw $-4096, %ax # imm = 0xF000 413; X86-NEXT: movw $240, %dx 414; X86-NEXT: retl 415; 416; X64-LABEL: fold_v2i16: 417; X64: # %bb.0: 418; X64-NEXT: movaps {{.*#+}} xmm0 = [61440,240] 419; X64-NEXT: retq 420 %b = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> <i16 15, i16 3840>) 421 ret <2 x i16> %b 422} 423 424define i24 @fold_i24() { 425; X86-LABEL: fold_i24: 426; X86: # %bb.0: 427; X86-NEXT: movl $2048, %eax # imm = 0x800 428; X86-NEXT: retl 429; 430; X64-LABEL: fold_i24: 431; X64: # %bb.0: 432; X64-NEXT: movl $2048, %eax # imm = 0x800 433; X64-NEXT: retq 434 %b = call i24 @llvm.bitreverse.i24(i24 4096) 435 ret i24 %b 436} 437 438define i8 @fold_i8() { 439; X86-LABEL: fold_i8: 440; X86: # %bb.0: 441; X86-NEXT: movb $-16, %al 442; X86-NEXT: retl 443; 444; X64-LABEL: fold_i8: 445; X64: # %bb.0: 446; X64-NEXT: movb $-16, %al 447; X64-NEXT: retq 448 %b = call i8 @llvm.bitreverse.i8(i8 15) 449 ret i8 %b 450} 451 452define i4 @fold_i4() { 453; X86-LABEL: fold_i4: 454; X86: # %bb.0: 455; X86-NEXT: movb $1, %al 456; X86-NEXT: retl 457; 458; X64-LABEL: fold_i4: 459; X64: # %bb.0: 460; X64-NEXT: movb $1, %al 461; X64-NEXT: retq 462 %b = call i4 @llvm.bitreverse.i4(i4 8) 463 ret i4 %b 464} 465 466; These tests check that bitreverse(bitreverse()) calls are removed 467 468define i8 @identity_i8(i8 %a) { 469; X86-LABEL: identity_i8: 470; X86: # %bb.0: 471; X86-NEXT: movb {{[0-9]+}}(%esp), %al 472; X86-NEXT: retl 473; 474; X64-LABEL: identity_i8: 475; X64: # %bb.0: 476; X64-NEXT: movl %edi, %eax 477; X64-NEXT: retq 478 %b = call i8 @llvm.bitreverse.i8(i8 %a) 479 %c = call i8 @llvm.bitreverse.i8(i8 %b) 480 ret i8 %c 481} 482 483define <2 x i16> @identity_v2i16(<2 x i16> %a) { 484; X86-LABEL: identity_v2i16: 485; X86: # %bb.0: 486; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax 487; X86-NEXT: movzwl {{[0-9]+}}(%esp), %edx 488; X86-NEXT: retl 489; 490; X64-LABEL: identity_v2i16: 491; X64: # %bb.0: 492; X64-NEXT: retq 493 %b = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> %a) 494 %c = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> %b) 495 ret <2 x i16> %c 496} 497 498; These tests check that bitreverse(undef) calls are removed 499 500define i8 @undef_i8() { 501; X86-LABEL: undef_i8: 502; X86: # %bb.0: 503; X86-NEXT: retl 504; 505; X64-LABEL: undef_i8: 506; X64: # %bb.0: 507; X64-NEXT: retq 508 %b = call i8 @llvm.bitreverse.i8(i8 undef) 509 ret i8 %b 510} 511 512define <2 x i16> @undef_v2i16() { 513; X86-LABEL: undef_v2i16: 514; X86: # %bb.0: 515; X86-NEXT: retl 516; 517; X64-LABEL: undef_v2i16: 518; X64: # %bb.0: 519; X64-NEXT: retq 520 %b = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> undef) 521 ret <2 x i16> %b 522} 523