1; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=sse2 < %s | FileCheck %s --check-prefix=SSE 2; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=avx2 < %s | FileCheck %s --check-prefix=AVX 3 4; Verify that 128-bit vector logical ops are reassociated. 5 6define <4 x i32> @reassociate_and_v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, <4 x i32> %x3) { 7; SSE-LABEL: reassociate_and_v4i32: 8; SSE: # BB#0: 9; SSE-NEXT: paddd %xmm1, %xmm0 10; SSE-NEXT: pand %xmm3, %xmm2 11; SSE-NEXT: pand %xmm2, %xmm0 12; SSE-NEXT: retq 13; 14; AVX-LABEL: reassociate_and_v4i32: 15; AVX: # BB#0: 16; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 17; AVX-NEXT: vpand %xmm3, %xmm2, %xmm1 18; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 19; AVX-NEXT: retq 20 21 %t0 = add <4 x i32> %x0, %x1 22 %t1 = and <4 x i32> %x2, %t0 23 %t2 = and <4 x i32> %x3, %t1 24 ret <4 x i32> %t2 25} 26 27define <4 x i32> @reassociate_or_v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, <4 x i32> %x3) { 28; SSE-LABEL: reassociate_or_v4i32: 29; SSE: # BB#0: 30; SSE-NEXT: paddd %xmm1, %xmm0 31; SSE-NEXT: por %xmm3, %xmm2 32; SSE-NEXT: por %xmm2, %xmm0 33; SSE-NEXT: retq 34; 35; AVX-LABEL: reassociate_or_v4i32: 36; AVX: # BB#0: 37; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 38; AVX-NEXT: vpor %xmm3, %xmm2, %xmm1 39; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0 40; AVX-NEXT: retq 41 42 %t0 = add <4 x i32> %x0, %x1 43 %t1 = or <4 x i32> %x2, %t0 44 %t2 = or <4 x i32> %x3, %t1 45 ret <4 x i32> %t2 46} 47 48define <4 x i32> @reassociate_xor_v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, <4 x i32> %x3) { 49; SSE-LABEL: reassociate_xor_v4i32: 50; SSE: # BB#0: 51; SSE-NEXT: paddd %xmm1, %xmm0 52; SSE-NEXT: pxor %xmm3, %xmm2 53; SSE-NEXT: pxor %xmm2, %xmm0 54; SSE-NEXT: retq 55; 56; AVX-LABEL: reassociate_xor_v4i32: 57; AVX: # BB#0: 58; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 59; AVX-NEXT: vpxor %xmm3, %xmm2, %xmm1 60; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 61; AVX-NEXT: retq 62 63 %t0 = add <4 x i32> %x0, %x1 64 %t1 = xor <4 x i32> %x2, %t0 65 %t2 = xor <4 x i32> %x3, %t1 66 ret <4 x i32> %t2 67} 68 69; Verify that 256-bit vector logical ops are reassociated. 70 71define <8 x i32> @reassociate_and_v8i32(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, <8 x i32> %x3) { 72; AVX-LABEL: reassociate_and_v8i32: 73; AVX: # BB#0: 74; AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 75; AVX-NEXT: vpand %ymm3, %ymm2, %ymm1 76; AVX-NEXT: vpand %ymm1, %ymm0, %ymm0 77; AVX-NEXT: retq 78 79 %t0 = add <8 x i32> %x0, %x1 80 %t1 = and <8 x i32> %x2, %t0 81 %t2 = and <8 x i32> %x3, %t1 82 ret <8 x i32> %t2 83} 84 85define <8 x i32> @reassociate_or_v8i32(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, <8 x i32> %x3) { 86; AVX-LABEL: reassociate_or_v8i32: 87; AVX: # BB#0: 88; AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 89; AVX-NEXT: vpor %ymm3, %ymm2, %ymm1 90; AVX-NEXT: vpor %ymm1, %ymm0, %ymm0 91; AVX-NEXT: retq 92 93 %t0 = add <8 x i32> %x0, %x1 94 %t1 = or <8 x i32> %x2, %t0 95 %t2 = or <8 x i32> %x3, %t1 96 ret <8 x i32> %t2 97} 98 99define <8 x i32> @reassociate_xor_v8i32(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, <8 x i32> %x3) { 100; AVX-LABEL: reassociate_xor_v8i32: 101; AVX: # BB#0: 102; AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 103; AVX-NEXT: vpxor %ymm3, %ymm2, %ymm1 104; AVX-NEXT: vpxor %ymm1, %ymm0, %ymm0 105; AVX-NEXT: retq 106 107 %t0 = add <8 x i32> %x0, %x1 108 %t1 = xor <8 x i32> %x2, %t0 109 %t2 = xor <8 x i32> %x3, %t1 110 ret <8 x i32> %t2 111} 112 113