1; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=bdver1 | FileCheck %s 2; Verify that for the architectures that are known to have poor latency 3; double precision shift instructions we generate alternative sequence 4; of instructions with lower latencies instead of shld instruction. 5 6;uint64_t lshift1(uint64_t a, uint64_t b) 7;{ 8; return (a << 1) | (b >> 63); 9;} 10 11; CHECK-LABEL: lshift1: 12; CHECK: shrq $63, %rsi 13; CHECK-NEXT: leaq (%rsi,%rdi,2), %rax 14 15define i64 @lshift1(i64 %a, i64 %b) nounwind readnone uwtable { 16entry: 17 %shl = shl i64 %a, 1 18 %shr = lshr i64 %b, 63 19 %or = or i64 %shr, %shl 20 ret i64 %or 21} 22 23;uint64_t lshift2(uint64_t a, uint64_t b) 24;{ 25; return (a << 2) | (b >> 62); 26;} 27 28; CHECK-LABEL: lshift2: 29; CHECK: shrq $62, %rsi 30; CHECK-NEXT: leaq (%rsi,%rdi,4), %rax 31 32define i64 @lshift2(i64 %a, i64 %b) nounwind readnone uwtable { 33entry: 34 %shl = shl i64 %a, 2 35 %shr = lshr i64 %b, 62 36 %or = or i64 %shr, %shl 37 ret i64 %or 38} 39 40;uint64_t lshift7(uint64_t a, uint64_t b) 41;{ 42; return (a << 7) | (b >> 57); 43;} 44 45; CHECK: lshift7: 46; CHECK: shlq $7, {{.*}} 47; CHECK-NEXT: shrq $57, {{.*}} 48; CHECK-NEXT: leaq ({{.*}},{{.*}}), {{.*}} 49 50define i64 @lshift7(i64 %a, i64 %b) nounwind readnone uwtable { 51entry: 52 %shl = shl i64 %a, 7 53 %shr = lshr i64 %b, 57 54 %or = or i64 %shr, %shl 55 ret i64 %or 56} 57 58;uint64_t lshift63(uint64_t a, uint64_t b) 59;{ 60; return (a << 63) | (b >> 1); 61;} 62 63; CHECK: lshift63: 64; CHECK: shlq $63, {{.*}} 65; CHECK-NEXT: shrq {{.*}} 66; CHECK-NEXT: leaq ({{.*}},{{.*}}), {{.*}} 67 68define i64 @lshift63(i64 %a, i64 %b) nounwind readnone uwtable { 69entry: 70 %shl = shl i64 %a, 63 71 %shr = lshr i64 %b, 1 72 %or = or i64 %shr, %shl 73 ret i64 %or 74} 75