1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -show-mc-encoding -mattr=+sse2 | FileCheck %s --check-prefix=SSE 3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -show-mc-encoding -mattr=+avx | FileCheck %s --check-prefix=AVX 4; RUN: llc < %s -mtriple=x86_64-unknown-unknown -show-mc-encoding -mattr=+avx512dq,+avx512vl | FileCheck %s --check-prefix=AVX512DQ 5 6; Test that we can replace "scalar" FP-bitwise-logic with the optimal instruction. 7; Scalar x86 FP-logic instructions only exist in your imagination and/or the bowels 8; of compilers, but float and double variants of FP-logic instructions are reality 9; and float may be a shorter instruction depending on which flavor of vector ISA 10; you have...so just prefer float all the time, ok? Yay, x86! 11 12define double @FsANDPSrr(double %x, double %y) { 13; SSE-LABEL: FsANDPSrr: 14; SSE: # %bb.0: 15; SSE-NEXT: andps %xmm1, %xmm0 # encoding: [0x0f,0x54,0xc1] 16; SSE-NEXT: retq # encoding: [0xc3] 17; 18; AVX-LABEL: FsANDPSrr: 19; AVX: # %bb.0: 20; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x54,0xc1] 21; AVX-NEXT: retq # encoding: [0xc3] 22; 23; AVX512DQ-LABEL: FsANDPSrr: 24; AVX512DQ: # %bb.0: 25; AVX512DQ-NEXT: vandps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x54,0xc1] 26; AVX512DQ-NEXT: retq # encoding: [0xc3] 27 %bc1 = bitcast double %x to i64 28 %bc2 = bitcast double %y to i64 29 %and = and i64 %bc1, %bc2 30 %bc3 = bitcast i64 %and to double 31 ret double %bc3 32} 33 34define double @FsANDNPSrr(double %x, double %y) { 35; SSE-LABEL: FsANDNPSrr: 36; SSE: # %bb.0: 37; SSE-NEXT: andnps %xmm0, %xmm1 # encoding: [0x0f,0x55,0xc8] 38; SSE-NEXT: movaps %xmm1, %xmm0 # encoding: [0x0f,0x28,0xc1] 39; SSE-NEXT: retq # encoding: [0xc3] 40; 41; AVX-LABEL: FsANDNPSrr: 42; AVX: # %bb.0: 43; AVX-NEXT: vandnps %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf0,0x55,0xc0] 44; AVX-NEXT: retq # encoding: [0xc3] 45; 46; AVX512DQ-LABEL: FsANDNPSrr: 47; AVX512DQ: # %bb.0: 48; AVX512DQ-NEXT: vandnps %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf0,0x55,0xc0] 49; AVX512DQ-NEXT: retq # encoding: [0xc3] 50 %bc1 = bitcast double %x to i64 51 %bc2 = bitcast double %y to i64 52 %not = xor i64 %bc2, -1 53 %and = and i64 %bc1, %not 54 %bc3 = bitcast i64 %and to double 55 ret double %bc3 56} 57 58define double @FsORPSrr(double %x, double %y) { 59; SSE-LABEL: FsORPSrr: 60; SSE: # %bb.0: 61; SSE-NEXT: orps %xmm1, %xmm0 # encoding: [0x0f,0x56,0xc1] 62; SSE-NEXT: retq # encoding: [0xc3] 63; 64; AVX-LABEL: FsORPSrr: 65; AVX: # %bb.0: 66; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x56,0xc1] 67; AVX-NEXT: retq # encoding: [0xc3] 68; 69; AVX512DQ-LABEL: FsORPSrr: 70; AVX512DQ: # %bb.0: 71; AVX512DQ-NEXT: vorps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x56,0xc1] 72; AVX512DQ-NEXT: retq # encoding: [0xc3] 73 %bc1 = bitcast double %x to i64 74 %bc2 = bitcast double %y to i64 75 %or = or i64 %bc1, %bc2 76 %bc3 = bitcast i64 %or to double 77 ret double %bc3 78} 79 80define double @FsXORPSrr(double %x, double %y) { 81; SSE-LABEL: FsXORPSrr: 82; SSE: # %bb.0: 83; SSE-NEXT: xorps %xmm1, %xmm0 # encoding: [0x0f,0x57,0xc1] 84; SSE-NEXT: retq # encoding: [0xc3] 85; 86; AVX-LABEL: FsXORPSrr: 87; AVX: # %bb.0: 88; AVX-NEXT: vxorps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x57,0xc1] 89; AVX-NEXT: retq # encoding: [0xc3] 90; 91; AVX512DQ-LABEL: FsXORPSrr: 92; AVX512DQ: # %bb.0: 93; AVX512DQ-NEXT: vxorps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x57,0xc1] 94; AVX512DQ-NEXT: retq # encoding: [0xc3] 95 %bc1 = bitcast double %x to i64 96 %bc2 = bitcast double %y to i64 97 %xor = xor i64 %bc1, %bc2 98 %bc3 = bitcast i64 %xor to double 99 ret double %bc3 100} 101 102