1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -O2 \ 3; RUN: -ppc-gpr-icmps=all -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \ 4; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl 5; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -O2 \ 6; RUN: -ppc-gpr-icmps=all -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck %s \ 7; RUN: --implicit-check-not cmpw --implicit-check-not cmpd --implicit-check-not cmpl 8 9@glob = common local_unnamed_addr global i64 0, align 8 10 11define i64 @test_llneull(i64 %a, i64 %b) { 12; CHECK-LABEL: test_llneull: 13; CHECK: # %bb.0: # %entry 14; CHECK-NEXT: xor r3, r3, r4 15; CHECK-NEXT: addic r4, r3, -1 16; CHECK-NEXT: subfe r3, r4, r3 17; CHECK-NEXT: blr 18entry: 19 %cmp = icmp ne i64 %a, %b 20 %conv1 = zext i1 %cmp to i64 21 ret i64 %conv1 22} 23 24define i64 @test_llneull_sext(i64 %a, i64 %b) { 25; CHECK-LABEL: test_llneull_sext: 26; CHECK: # %bb.0: # %entry 27; CHECK-NEXT: xor r3, r3, r4 28; CHECK-NEXT: subfic r3, r3, 0 29; CHECK-NEXT: subfe r3, r3, r3 30; CHECK-NEXT: blr 31entry: 32 %cmp = icmp ne i64 %a, %b 33 %conv1 = sext i1 %cmp to i64 34 ret i64 %conv1 35} 36 37define i64 @test_llneull_z(i64 %a) { 38; CHECK-LABEL: test_llneull_z: 39; CHECK: # %bb.0: # %entry 40; CHECK-NEXT: addic r4, r3, -1 41; CHECK-NEXT: subfe r3, r4, r3 42; CHECK-NEXT: blr 43entry: 44 %cmp = icmp ne i64 %a, 0 45 %conv1 = zext i1 %cmp to i64 46 ret i64 %conv1 47} 48 49define i64 @test_llneull_sext_z(i64 %a) { 50; CHECK-LABEL: test_llneull_sext_z: 51; CHECK: # %bb.0: # %entry 52; CHECK-NEXT: subfic r3, r3, 0 53; CHECK-NEXT: subfe r3, r3, r3 54; CHECK-NEXT: blr 55entry: 56 %cmp = icmp ne i64 %a, 0 57 %conv1 = sext i1 %cmp to i64 58 ret i64 %conv1 59} 60 61define void @test_llneull_store(i64 %a, i64 %b) { 62; CHECK-LABEL: test_llneull_store: 63; CHECK: # %bb.0: # %entry 64; CHECK-NEXT: addis r5, r2, .LC0@toc@ha 65; CHECK-NEXT: xor r3, r3, r4 66; CHECK-NEXT: ld r4, .LC0@toc@l(r5) 67; CHECK-NEXT: addic r5, r3, -1 68; CHECK-NEXT: subfe r3, r5, r3 69; CHECK-NEXT: std r3, 0(r4) 70; CHECK-NEXT: blr 71entry: 72 %cmp = icmp ne i64 %a, %b 73 %conv1 = zext i1 %cmp to i64 74 store i64 %conv1, i64* @glob, align 8 75 ret void 76} 77 78define void @test_llneull_sext_store(i64 %a, i64 %b) { 79; CHECK-LABEL: test_llneull_sext_store: 80; CHECK: # %bb.0: # %entry 81; CHECK-NEXT: addis r5, r2, .LC0@toc@ha 82; CHECK-NEXT: xor r3, r3, r4 83; CHECK-NEXT: ld r4, .LC0@toc@l(r5) 84; CHECK-NEXT: subfic r3, r3, 0 85; CHECK-NEXT: subfe r3, r3, r3 86; CHECK-NEXT: std r3, 0(r4) 87; CHECK-NEXT: blr 88entry: 89 %cmp = icmp ne i64 %a, %b 90 %conv1 = sext i1 %cmp to i64 91 store i64 %conv1, i64* @glob, align 8 92 ret void 93} 94 95define void @test_llneull_z_store(i64 %a) { 96; CHECK-LABEL: test_llneull_z_store: 97; CHECK: # %bb.0: # %entry 98; CHECK-NEXT: addis r4, r2, .LC0@toc@ha 99; CHECK-NEXT: addic r5, r3, -1 100; CHECK-NEXT: ld r4, .LC0@toc@l(r4) 101; CHECK-NEXT: subfe r3, r5, r3 102; CHECK-NEXT: std r3, 0(r4) 103; CHECK-NEXT: blr 104entry: 105 %cmp = icmp ne i64 %a, 0 106 %conv1 = zext i1 %cmp to i64 107 store i64 %conv1, i64* @glob, align 8 108 ret void 109} 110 111define void @test_llneull_sext_z_store(i64 %a) { 112; CHECK-LABEL: test_llneull_sext_z_store: 113; CHECK: # %bb.0: # %entry 114; CHECK-NEXT: addis r4, r2, .LC0@toc@ha 115; CHECK-NEXT: subfic r3, r3, 0 116; CHECK-NEXT: ld r4, .LC0@toc@l(r4) 117; CHECK-NEXT: subfe r3, r3, r3 118; CHECK-NEXT: std r3, 0(r4) 119; CHECK-NEXT: blr 120entry: 121 %cmp = icmp ne i64 %a, 0 122 %conv1 = sext i1 %cmp to i64 123 store i64 %conv1, i64* @glob, align 8 124 ret void 125} 126