1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=x86_64-- -mattr=sse4.1 < %s | FileCheck %s -check-prefix=SSE4 3; RUN: llc -mtriple=x86_64-- -mattr=avx < %s | FileCheck %s -check-prefix=AVX1 4; RUN: llc -mtriple=x86_64-- -mattr=avx2 < %s | FileCheck %s -check-prefix=AVX2 5 6define <16 x i16> @split16(<16 x i16> %a, <16 x i16> %b, <16 x i8> %__mask) { 7; SSE4-LABEL: split16: 8; SSE4: # %bb.0: 9; SSE4-NEXT: pminuw %xmm2, %xmm0 10; SSE4-NEXT: pminuw %xmm3, %xmm1 11; SSE4-NEXT: retq 12; 13; AVX1-LABEL: split16: 14; AVX1: # %bb.0: 15; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 16; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 17; AVX1-NEXT: vpminuw %xmm2, %xmm3, %xmm2 18; AVX1-NEXT: vpminuw %xmm1, %xmm0, %xmm0 19; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 20; AVX1-NEXT: retq 21; 22; AVX2-LABEL: split16: 23; AVX2: # %bb.0: 24; AVX2-NEXT: vpminuw %ymm1, %ymm0, %ymm0 25; AVX2-NEXT: retq 26 %1 = icmp ult <16 x i16> %a, %b 27 %2 = select <16 x i1> %1, <16 x i16> %a, <16 x i16> %b 28 ret <16 x i16> %2 29} 30 31define <32 x i16> @split32(<32 x i16> %a, <32 x i16> %b, <32 x i8> %__mask) { 32; SSE4-LABEL: split32: 33; SSE4: # %bb.0: 34; SSE4-NEXT: pminuw %xmm4, %xmm0 35; SSE4-NEXT: pminuw %xmm5, %xmm1 36; SSE4-NEXT: pminuw %xmm6, %xmm2 37; SSE4-NEXT: pminuw %xmm7, %xmm3 38; SSE4-NEXT: retq 39; 40; AVX1-LABEL: split32: 41; AVX1: # %bb.0: 42; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 43; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 44; AVX1-NEXT: vpminuw %xmm4, %xmm5, %xmm4 45; AVX1-NEXT: vpminuw %xmm2, %xmm0, %xmm0 46; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 47; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2 48; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 49; AVX1-NEXT: vpminuw %xmm2, %xmm4, %xmm2 50; AVX1-NEXT: vpminuw %xmm3, %xmm1, %xmm1 51; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 52; AVX1-NEXT: retq 53; 54; AVX2-LABEL: split32: 55; AVX2: # %bb.0: 56; AVX2-NEXT: vpminuw %ymm2, %ymm0, %ymm0 57; AVX2-NEXT: vpminuw %ymm3, %ymm1, %ymm1 58; AVX2-NEXT: retq 59 %1 = icmp ult <32 x i16> %a, %b 60 %2 = select <32 x i1> %1, <32 x i16> %a, <32 x i16> %b 61 ret <32 x i16> %2 62} 63 64; PR19492 65define i128 @split128(<2 x i128> %a, <2 x i128> %b) { 66; SSE4-LABEL: split128: 67; SSE4: # %bb.0: 68; SSE4-NEXT: movq %rdx, %rax 69; SSE4-NEXT: addq %r8, %rdi 70; SSE4-NEXT: adcq %r9, %rsi 71; SSE4-NEXT: addq {{[0-9]+}}(%rsp), %rax 72; SSE4-NEXT: adcq {{[0-9]+}}(%rsp), %rcx 73; SSE4-NEXT: addq %rdi, %rax 74; SSE4-NEXT: adcq %rsi, %rcx 75; SSE4-NEXT: movq %rcx, %rdx 76; SSE4-NEXT: retq 77; 78; AVX1-LABEL: split128: 79; AVX1: # %bb.0: 80; AVX1-NEXT: movq %rdx, %rax 81; AVX1-NEXT: addq %r8, %rdi 82; AVX1-NEXT: adcq %r9, %rsi 83; AVX1-NEXT: addq {{[0-9]+}}(%rsp), %rax 84; AVX1-NEXT: adcq {{[0-9]+}}(%rsp), %rcx 85; AVX1-NEXT: addq %rdi, %rax 86; AVX1-NEXT: adcq %rsi, %rcx 87; AVX1-NEXT: movq %rcx, %rdx 88; AVX1-NEXT: retq 89; 90; AVX2-LABEL: split128: 91; AVX2: # %bb.0: 92; AVX2-NEXT: movq %rdx, %rax 93; AVX2-NEXT: addq %r8, %rdi 94; AVX2-NEXT: adcq %r9, %rsi 95; AVX2-NEXT: addq {{[0-9]+}}(%rsp), %rax 96; AVX2-NEXT: adcq {{[0-9]+}}(%rsp), %rcx 97; AVX2-NEXT: addq %rdi, %rax 98; AVX2-NEXT: adcq %rsi, %rcx 99; AVX2-NEXT: movq %rcx, %rdx 100; AVX2-NEXT: retq 101 %add = add nsw <2 x i128> %a, %b 102 %rdx.shuf = shufflevector <2 x i128> %add, <2 x i128> undef, <2 x i32> <i32 undef, i32 0> 103 %bin.rdx = add <2 x i128> %add, %rdx.shuf 104 %e = extractelement <2 x i128> %bin.rdx, i32 1 105 ret i128 %e 106} 107