1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=X32 3; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=X64 4 5; test vector shifts converted to proper SSE2 vector shifts when the shift 6; amounts are the same when using a shuffle splat. 7 8define void @shift1a(<2 x i64> %val, <2 x i64>* %dst, <2 x i64> %sh) nounwind { 9; X32-LABEL: shift1a: 10; X32: # %bb.0: # %entry 11; X32-NEXT: movl {{[0-9]+}}(%esp), %eax 12; X32-NEXT: psllq %xmm1, %xmm0 13; X32-NEXT: movdqa %xmm0, (%eax) 14; X32-NEXT: retl 15; 16; X64-LABEL: shift1a: 17; X64: # %bb.0: # %entry 18; X64-NEXT: psllq %xmm1, %xmm0 19; X64-NEXT: movdqa %xmm0, (%rdi) 20; X64-NEXT: retq 21entry: 22 %shamt = shufflevector <2 x i64> %sh, <2 x i64> undef, <2 x i32> <i32 0, i32 0> 23 %shl = shl <2 x i64> %val, %shamt 24 store <2 x i64> %shl, <2 x i64>* %dst 25 ret void 26} 27 28; shift1b can't use a packed shift but can shift lanes separately and shuffle back together 29define void @shift1b(<2 x i64> %val, <2 x i64>* %dst, <2 x i64> %sh) nounwind { 30; X32-LABEL: shift1b: 31; X32: # %bb.0: # %entry 32; X32-NEXT: movl {{[0-9]+}}(%esp), %eax 33; X32-NEXT: movdqa %xmm0, %xmm2 34; X32-NEXT: psllq %xmm1, %xmm2 35; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] 36; X32-NEXT: psllq %xmm1, %xmm0 37; X32-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1] 38; X32-NEXT: movapd %xmm0, (%eax) 39; X32-NEXT: retl 40; 41; X64-LABEL: shift1b: 42; X64: # %bb.0: # %entry 43; X64-NEXT: movdqa %xmm0, %xmm2 44; X64-NEXT: psllq %xmm1, %xmm2 45; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] 46; X64-NEXT: psllq %xmm1, %xmm0 47; X64-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1] 48; X64-NEXT: movapd %xmm0, (%rdi) 49; X64-NEXT: retq 50entry: 51 %shamt = shufflevector <2 x i64> %sh, <2 x i64> undef, <2 x i32> <i32 0, i32 1> 52 %shl = shl <2 x i64> %val, %shamt 53 store <2 x i64> %shl, <2 x i64>* %dst 54 ret void 55} 56 57define void @shift2a(<4 x i32> %val, <4 x i32>* %dst, <2 x i32> %amt) nounwind { 58; X32-LABEL: shift2a: 59; X32: # %bb.0: # %entry 60; X32-NEXT: movl {{[0-9]+}}(%esp), %eax 61; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] 62; X32-NEXT: xorps %xmm2, %xmm2 63; X32-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3] 64; X32-NEXT: pslld %xmm2, %xmm0 65; X32-NEXT: movdqa %xmm0, (%eax) 66; X32-NEXT: retl 67; 68; X64-LABEL: shift2a: 69; X64: # %bb.0: # %entry 70; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] 71; X64-NEXT: xorps %xmm2, %xmm2 72; X64-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3] 73; X64-NEXT: pslld %xmm2, %xmm0 74; X64-NEXT: movdqa %xmm0, (%rdi) 75; X64-NEXT: retq 76entry: 77 %shamt = shufflevector <2 x i32> %amt, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> 78 %shl = shl <4 x i32> %val, %shamt 79 store <4 x i32> %shl, <4 x i32>* %dst 80 ret void 81} 82 83define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, <2 x i32> %amt) nounwind { 84; X32-LABEL: shift2b: 85; X32: # %bb.0: # %entry 86; X32-NEXT: movl {{[0-9]+}}(%esp), %eax 87; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] 88; X32-NEXT: xorps %xmm2, %xmm2 89; X32-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3] 90; X32-NEXT: pslld %xmm2, %xmm0 91; X32-NEXT: movdqa %xmm0, (%eax) 92; X32-NEXT: retl 93; 94; X64-LABEL: shift2b: 95; X64: # %bb.0: # %entry 96; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] 97; X64-NEXT: xorps %xmm2, %xmm2 98; X64-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3] 99; X64-NEXT: pslld %xmm2, %xmm0 100; X64-NEXT: movdqa %xmm0, (%rdi) 101; X64-NEXT: retq 102entry: 103 %shamt = shufflevector <2 x i32> %amt, <2 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 1, i32 1> 104 %shl = shl <4 x i32> %val, %shamt 105 store <4 x i32> %shl, <4 x i32>* %dst 106 ret void 107} 108 109define void @shift2c(<4 x i32> %val, <4 x i32>* %dst, <2 x i32> %amt) nounwind { 110; X32-LABEL: shift2c: 111; X32: # %bb.0: # %entry 112; X32-NEXT: movl {{[0-9]+}}(%esp), %eax 113; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] 114; X32-NEXT: xorps %xmm2, %xmm2 115; X32-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3] 116; X32-NEXT: pslld %xmm2, %xmm0 117; X32-NEXT: movdqa %xmm0, (%eax) 118; X32-NEXT: retl 119; 120; X64-LABEL: shift2c: 121; X64: # %bb.0: # %entry 122; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] 123; X64-NEXT: xorps %xmm2, %xmm2 124; X64-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3] 125; X64-NEXT: pslld %xmm2, %xmm0 126; X64-NEXT: movdqa %xmm0, (%rdi) 127; X64-NEXT: retq 128entry: 129 %shamt = shufflevector <2 x i32> %amt, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> 130 %shl = shl <4 x i32> %val, %shamt 131 store <4 x i32> %shl, <4 x i32>* %dst 132 ret void 133} 134 135define void @shift3a(<8 x i16> %val, <8 x i16>* %dst, <8 x i16> %amt) nounwind { 136; X32-LABEL: shift3a: 137; X32: # %bb.0: # %entry 138; X32-NEXT: movl {{[0-9]+}}(%esp), %eax 139; X32-NEXT: pextrw $6, %xmm1, %ecx 140; X32-NEXT: movd %ecx, %xmm1 141; X32-NEXT: psllw %xmm1, %xmm0 142; X32-NEXT: movdqa %xmm0, (%eax) 143; X32-NEXT: retl 144; 145; X64-LABEL: shift3a: 146; X64: # %bb.0: # %entry 147; X64-NEXT: pextrw $6, %xmm1, %eax 148; X64-NEXT: movd %eax, %xmm1 149; X64-NEXT: psllw %xmm1, %xmm0 150; X64-NEXT: movdqa %xmm0, (%rdi) 151; X64-NEXT: retq 152entry: 153 %shamt = shufflevector <8 x i16> %amt, <8 x i16> undef, <8 x i32> <i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6> 154 %shl = shl <8 x i16> %val, %shamt 155 store <8 x i16> %shl, <8 x i16>* %dst 156 ret void 157} 158 159define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind { 160; X32-LABEL: shift3b: 161; X32: # %bb.0: # %entry 162; X32-NEXT: movl {{[0-9]+}}(%esp), %eax 163; X32-NEXT: movzwl {{[0-9]+}}(%esp), %ecx 164; X32-NEXT: movd %ecx, %xmm1 165; X32-NEXT: psllw %xmm1, %xmm0 166; X32-NEXT: movdqa %xmm0, (%eax) 167; X32-NEXT: retl 168; 169; X64-LABEL: shift3b: 170; X64: # %bb.0: # %entry 171; X64-NEXT: movzwl %si, %eax 172; X64-NEXT: movd %eax, %xmm1 173; X64-NEXT: psllw %xmm1, %xmm0 174; X64-NEXT: movdqa %xmm0, (%rdi) 175; X64-NEXT: retq 176entry: 177 %0 = insertelement <8 x i16> undef, i16 %amt, i32 0 178 %1 = insertelement <8 x i16> %0, i16 %amt, i32 1 179 %2 = insertelement <8 x i16> %1, i16 %amt, i32 2 180 %3 = insertelement <8 x i16> %2, i16 %amt, i32 3 181 %4 = insertelement <8 x i16> %3, i16 %amt, i32 4 182 %5 = insertelement <8 x i16> %4, i16 %amt, i32 5 183 %6 = insertelement <8 x i16> %5, i16 %amt, i32 6 184 %7 = insertelement <8 x i16> %6, i16 %amt, i32 7 185 %shl = shl <8 x i16> %val, %7 186 store <8 x i16> %shl, <8 x i16>* %dst 187 ret void 188} 189 190