1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+mmx < %s | FileCheck %s --check-prefix=X64 3; RUN: llc -mtriple=i686-unknown-unknown -mattr=+mmx < %s | FileCheck %s --check-prefix=I32 4 5 6; From source: clang -02 7;__m64 test47(int a) 8;{ 9; __m64 x = (a)? (__m64)(7): (__m64)(0); 10; return __builtin_ia32_psllw(x, x); 11;} 12 13define i64 @test47(i64 %arg) { 14; 15; X64-LABEL: test47: 16; X64: # %bb.0: 17; X64-NEXT: xorl %eax, %eax 18; X64-NEXT: testq %rdi, %rdi 19; X64-NEXT: movl $7, %ecx 20; X64-NEXT: cmoveq %rcx, %rax 21; X64-NEXT: movq %rax, %mm0 22; X64-NEXT: psllw %mm0, %mm0 23; X64-NEXT: movq %mm0, %rax 24; X64-NEXT: retq 25; 26; I32-LABEL: test47: 27; I32: # %bb.0: 28; I32-NEXT: pushl %ebp 29; I32-NEXT: .cfi_def_cfa_offset 8 30; I32-NEXT: .cfi_offset %ebp, -8 31; I32-NEXT: movl %esp, %ebp 32; I32-NEXT: .cfi_def_cfa_register %ebp 33; I32-NEXT: andl $-8, %esp 34; I32-NEXT: subl $16, %esp 35; I32-NEXT: movl 8(%ebp), %eax 36; I32-NEXT: orl 12(%ebp), %eax 37; I32-NEXT: movl $7, %eax 38; I32-NEXT: je .LBB0_2 39; I32-NEXT: # %bb.1: 40; I32-NEXT: xorl %eax, %eax 41; I32-NEXT: .LBB0_2: 42; I32-NEXT: movl %eax, {{[0-9]+}}(%esp) 43; I32-NEXT: movl $0, {{[0-9]+}}(%esp) 44; I32-NEXT: movq {{[0-9]+}}(%esp), %mm0 45; I32-NEXT: psllw %mm0, %mm0 46; I32-NEXT: movq %mm0, (%esp) 47; I32-NEXT: movl (%esp), %eax 48; I32-NEXT: movl {{[0-9]+}}(%esp), %edx 49; I32-NEXT: movl %ebp, %esp 50; I32-NEXT: popl %ebp 51; I32-NEXT: .cfi_def_cfa %esp, 4 52; I32-NEXT: retl 53 %cond = icmp eq i64 %arg, 0 54 %slct = select i1 %cond, x86_mmx bitcast (i64 7 to x86_mmx), x86_mmx bitcast (i64 0 to x86_mmx) 55 %psll = tail call x86_mmx @llvm.x86.mmx.psll.w(x86_mmx %slct, x86_mmx %slct) 56 %retc = bitcast x86_mmx %psll to i64 57 ret i64 %retc 58} 59 60 61; From source: clang -O2 62;__m64 test49(int a, long long n, long long m) 63;{ 64; __m64 x = (a)? (__m64)(n): (__m64)(m); 65; return __builtin_ia32_psllw(x, x); 66;} 67 68define i64 @test49(i64 %arg, i64 %x, i64 %y) { 69; 70; X64-LABEL: test49: 71; X64: # %bb.0: 72; X64-NEXT: testq %rdi, %rdi 73; X64-NEXT: cmovneq %rdx, %rsi 74; X64-NEXT: movq %rsi, %mm0 75; X64-NEXT: psllw %mm0, %mm0 76; X64-NEXT: movq %mm0, %rax 77; X64-NEXT: retq 78; 79; I32-LABEL: test49: 80; I32: # %bb.0: 81; I32-NEXT: pushl %ebp 82; I32-NEXT: .cfi_def_cfa_offset 8 83; I32-NEXT: .cfi_offset %ebp, -8 84; I32-NEXT: movl %esp, %ebp 85; I32-NEXT: .cfi_def_cfa_register %ebp 86; I32-NEXT: andl $-8, %esp 87; I32-NEXT: subl $8, %esp 88; I32-NEXT: movl 8(%ebp), %eax 89; I32-NEXT: orl 12(%ebp), %eax 90; I32-NEXT: je .LBB1_1 91; I32-NEXT: # %bb.2: 92; I32-NEXT: leal 24(%ebp), %eax 93; I32-NEXT: jmp .LBB1_3 94; I32-NEXT: .LBB1_1: 95; I32-NEXT: leal 16(%ebp), %eax 96; I32-NEXT: .LBB1_3: 97; I32-NEXT: movq (%eax), %mm0 98; I32-NEXT: psllw %mm0, %mm0 99; I32-NEXT: movq %mm0, (%esp) 100; I32-NEXT: movl (%esp), %eax 101; I32-NEXT: movl {{[0-9]+}}(%esp), %edx 102; I32-NEXT: movl %ebp, %esp 103; I32-NEXT: popl %ebp 104; I32-NEXT: .cfi_def_cfa %esp, 4 105; I32-NEXT: retl 106 %cond = icmp eq i64 %arg, 0 107 %xmmx = bitcast i64 %x to x86_mmx 108 %ymmx = bitcast i64 %y to x86_mmx 109 %slct = select i1 %cond, x86_mmx %xmmx, x86_mmx %ymmx 110 %psll = tail call x86_mmx @llvm.x86.mmx.psll.w(x86_mmx %slct, x86_mmx %slct) 111 %retc = bitcast x86_mmx %psll to i64 112 ret i64 %retc 113} 114 115declare x86_mmx @llvm.x86.mmx.psll.w(x86_mmx, x86_mmx) 116 117