1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=powerpc64le-- | FileCheck %s 3 4; If positive... 5 6define i32 @zext_ifpos(i32 %x) { 7; CHECK-LABEL: zext_ifpos: 8; CHECK: # %bb.0: 9; CHECK-NEXT: nor 3, 3, 3 10; CHECK-NEXT: srwi 3, 3, 31 11; CHECK-NEXT: blr 12 %c = icmp sgt i32 %x, -1 13 %e = zext i1 %c to i32 14 ret i32 %e 15} 16 17define i32 @add_zext_ifpos(i32 %x) { 18; CHECK-LABEL: add_zext_ifpos: 19; CHECK: # %bb.0: 20; CHECK-NEXT: srawi 3, 3, 31 21; CHECK-NEXT: addi 3, 3, 42 22; CHECK-NEXT: blr 23 %c = icmp sgt i32 %x, -1 24 %e = zext i1 %c to i32 25 %r = add i32 %e, 41 26 ret i32 %r 27} 28 29define <4 x i32> @add_zext_ifpos_vec_splat(<4 x i32> %x) { 30; CHECK-LABEL: add_zext_ifpos_vec_splat: 31; CHECK: # %bb.0: 32; CHECK-NEXT: vspltisb 3, -1 33; CHECK-NEXT: addis 3, 2, .LCPI2_0@toc@ha 34; CHECK-NEXT: addi 3, 3, .LCPI2_0@toc@l 35; CHECK-NEXT: vcmpgtsw 2, 2, 3 36; CHECK-NEXT: lvx 3, 0, 3 37; CHECK-NEXT: vsubuwm 2, 3, 2 38; CHECK-NEXT: blr 39 %c = icmp sgt <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1> 40 %e = zext <4 x i1> %c to <4 x i32> 41 %r = add <4 x i32> %e, <i32 41, i32 41, i32 41, i32 41> 42 ret <4 x i32> %r 43} 44 45define i32 @sel_ifpos_tval_bigger(i32 %x) { 46; CHECK-LABEL: sel_ifpos_tval_bigger: 47; CHECK: # %bb.0: 48; CHECK-NEXT: li 4, 41 49; CHECK-NEXT: cmpwi 0, 3, -1 50; CHECK-NEXT: li 3, 42 51; CHECK-NEXT: isel 3, 3, 4, 1 52; CHECK-NEXT: blr 53 %c = icmp sgt i32 %x, -1 54 %r = select i1 %c, i32 42, i32 41 55 ret i32 %r 56} 57 58define i32 @sext_ifpos(i32 %x) { 59; CHECK-LABEL: sext_ifpos: 60; CHECK: # %bb.0: 61; CHECK-NEXT: nor 3, 3, 3 62; CHECK-NEXT: srawi 3, 3, 31 63; CHECK-NEXT: blr 64 %c = icmp sgt i32 %x, -1 65 %e = sext i1 %c to i32 66 ret i32 %e 67} 68 69define i32 @add_sext_ifpos(i32 %x) { 70; CHECK-LABEL: add_sext_ifpos: 71; CHECK: # %bb.0: 72; CHECK-NEXT: nor 3, 3, 3 73; CHECK-NEXT: srawi 3, 3, 31 74; CHECK-NEXT: addi 3, 3, 42 75; CHECK-NEXT: blr 76 %c = icmp sgt i32 %x, -1 77 %e = sext i1 %c to i32 78 %r = add i32 %e, 42 79 ret i32 %r 80} 81 82define <4 x i32> @add_sext_ifpos_vec_splat(<4 x i32> %x) { 83; CHECK-LABEL: add_sext_ifpos_vec_splat: 84; CHECK: # %bb.0: 85; CHECK-NEXT: vspltisb 3, -1 86; CHECK-NEXT: addis 3, 2, .LCPI6_0@toc@ha 87; CHECK-NEXT: addi 3, 3, .LCPI6_0@toc@l 88; CHECK-NEXT: vcmpgtsw 2, 2, 3 89; CHECK-NEXT: lvx 3, 0, 3 90; CHECK-NEXT: vadduwm 2, 2, 3 91; CHECK-NEXT: blr 92 %c = icmp sgt <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1> 93 %e = sext <4 x i1> %c to <4 x i32> 94 %r = add <4 x i32> %e, <i32 42, i32 42, i32 42, i32 42> 95 ret <4 x i32> %r 96} 97 98define i32 @sel_ifpos_fval_bigger(i32 %x) { 99; CHECK-LABEL: sel_ifpos_fval_bigger: 100; CHECK: # %bb.0: 101; CHECK-NEXT: li 4, 42 102; CHECK-NEXT: cmpwi 0, 3, -1 103; CHECK-NEXT: li 3, 41 104; CHECK-NEXT: isel 3, 3, 4, 1 105; CHECK-NEXT: blr 106 %c = icmp sgt i32 %x, -1 107 %r = select i1 %c, i32 41, i32 42 108 ret i32 %r 109} 110 111; If negative... 112 113define i32 @zext_ifneg(i32 %x) { 114; CHECK-LABEL: zext_ifneg: 115; CHECK: # %bb.0: 116; CHECK-NEXT: srwi 3, 3, 31 117; CHECK-NEXT: blr 118 %c = icmp slt i32 %x, 0 119 %r = zext i1 %c to i32 120 ret i32 %r 121} 122 123define i32 @add_zext_ifneg(i32 %x) { 124; CHECK-LABEL: add_zext_ifneg: 125; CHECK: # %bb.0: 126; CHECK-NEXT: srwi 3, 3, 31 127; CHECK-NEXT: addi 3, 3, 41 128; CHECK-NEXT: blr 129 %c = icmp slt i32 %x, 0 130 %e = zext i1 %c to i32 131 %r = add i32 %e, 41 132 ret i32 %r 133} 134 135define i32 @sel_ifneg_tval_bigger(i32 %x) { 136; CHECK-LABEL: sel_ifneg_tval_bigger: 137; CHECK: # %bb.0: 138; CHECK-NEXT: li 4, 41 139; CHECK-NEXT: cmpwi 0, 3, 0 140; CHECK-NEXT: li 3, 42 141; CHECK-NEXT: isel 3, 3, 4, 0 142; CHECK-NEXT: blr 143 %c = icmp slt i32 %x, 0 144 %r = select i1 %c, i32 42, i32 41 145 ret i32 %r 146} 147 148define i32 @sext_ifneg(i32 %x) { 149; CHECK-LABEL: sext_ifneg: 150; CHECK: # %bb.0: 151; CHECK-NEXT: srawi 3, 3, 31 152; CHECK-NEXT: blr 153 %c = icmp slt i32 %x, 0 154 %r = sext i1 %c to i32 155 ret i32 %r 156} 157 158define i32 @add_sext_ifneg(i32 %x) { 159; CHECK-LABEL: add_sext_ifneg: 160; CHECK: # %bb.0: 161; CHECK-NEXT: srawi 3, 3, 31 162; CHECK-NEXT: addi 3, 3, 42 163; CHECK-NEXT: blr 164 %c = icmp slt i32 %x, 0 165 %e = sext i1 %c to i32 166 %r = add i32 %e, 42 167 ret i32 %r 168} 169 170define i32 @sel_ifneg_fval_bigger(i32 %x) { 171; CHECK-LABEL: sel_ifneg_fval_bigger: 172; CHECK: # %bb.0: 173; CHECK-NEXT: li 4, 42 174; CHECK-NEXT: cmpwi 0, 3, 0 175; CHECK-NEXT: li 3, 41 176; CHECK-NEXT: isel 3, 3, 4, 0 177; CHECK-NEXT: blr 178 %c = icmp slt i32 %x, 0 179 %r = select i1 %c, i32 41, i32 42 180 ret i32 %r 181} 182 183define i32 @add_lshr_not(i32 %x) { 184; CHECK-LABEL: add_lshr_not: 185; CHECK: # %bb.0: 186; CHECK-NEXT: srawi 3, 3, 31 187; CHECK-NEXT: addi 3, 3, 42 188; CHECK-NEXT: blr 189 %not = xor i32 %x, -1 190 %sh = lshr i32 %not, 31 191 %r = add i32 %sh, 41 192 ret i32 %r 193} 194 195define <4 x i32> @add_lshr_not_vec_splat(<4 x i32> %x) { 196; CHECK-LABEL: add_lshr_not_vec_splat: 197; CHECK: # %bb.0: 198; CHECK-NEXT: vspltisw 3, -16 199; CHECK-NEXT: vspltisw 4, 15 200; CHECK-NEXT: addis 3, 2, .LCPI15_0@toc@ha 201; CHECK-NEXT: addi 3, 3, .LCPI15_0@toc@l 202; CHECK-NEXT: vsubuwm 3, 4, 3 203; CHECK-NEXT: vsraw 2, 2, 3 204; CHECK-NEXT: lvx 3, 0, 3 205; CHECK-NEXT: vadduwm 2, 2, 3 206; CHECK-NEXT: blr 207 %c = xor <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1> 208 %e = lshr <4 x i32> %c, <i32 31, i32 31, i32 31, i32 31> 209 %r = add <4 x i32> %e, <i32 42, i32 42, i32 42, i32 42> 210 ret <4 x i32> %r 211} 212 213define i32 @sub_lshr_not(i32 %x) { 214; CHECK-LABEL: sub_lshr_not: 215; CHECK: # %bb.0: 216; CHECK-NEXT: srwi 3, 3, 31 217; CHECK-NEXT: ori 3, 3, 42 218; CHECK-NEXT: blr 219 %not = xor i32 %x, -1 220 %sh = lshr i32 %not, 31 221 %r = sub i32 43, %sh 222 ret i32 %r 223} 224 225define <4 x i32> @sub_lshr_not_vec_splat(<4 x i32> %x) { 226; CHECK-LABEL: sub_lshr_not_vec_splat: 227; CHECK: # %bb.0: 228; CHECK-NEXT: vspltisw 3, -16 229; CHECK-NEXT: vspltisw 4, 15 230; CHECK-NEXT: addis 3, 2, .LCPI17_0@toc@ha 231; CHECK-NEXT: addi 3, 3, .LCPI17_0@toc@l 232; CHECK-NEXT: vsubuwm 3, 4, 3 233; CHECK-NEXT: vsrw 2, 2, 3 234; CHECK-NEXT: lvx 3, 0, 3 235; CHECK-NEXT: vadduwm 2, 2, 3 236; CHECK-NEXT: blr 237 %c = xor <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1> 238 %e = lshr <4 x i32> %c, <i32 31, i32 31, i32 31, i32 31> 239 %r = sub <4 x i32> <i32 42, i32 42, i32 42, i32 42>, %e 240 ret <4 x i32> %r 241} 242 243define i32 @sub_lshr(i32 %x, i32 %y) { 244; CHECK-LABEL: sub_lshr: 245; CHECK: # %bb.0: 246; CHECK-NEXT: srawi 3, 3, 31 247; CHECK-NEXT: add 3, 4, 3 248; CHECK-NEXT: blr 249 %sh = lshr i32 %x, 31 250 %r = sub i32 %y, %sh 251 ret i32 %r 252} 253 254define <4 x i32> @sub_lshr_vec(<4 x i32> %x, <4 x i32> %y) { 255; CHECK-LABEL: sub_lshr_vec: 256; CHECK: # %bb.0: 257; CHECK-NEXT: vspltisw 4, -16 258; CHECK-NEXT: vspltisw 5, 15 259; CHECK-NEXT: vsubuwm 4, 5, 4 260; CHECK-NEXT: vsraw 2, 2, 4 261; CHECK-NEXT: vadduwm 2, 3, 2 262; CHECK-NEXT: blr 263 %sh = lshr <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31> 264 %r = sub <4 x i32> %y, %sh 265 ret <4 x i32> %r 266} 267 268define i32 @sub_const_op_lshr(i32 %x) { 269; CHECK-LABEL: sub_const_op_lshr: 270; CHECK: # %bb.0: 271; CHECK-NEXT: srawi 3, 3, 31 272; CHECK-NEXT: addi 3, 3, 43 273; CHECK-NEXT: blr 274 %sh = lshr i32 %x, 31 275 %r = sub i32 43, %sh 276 ret i32 %r 277} 278 279define <4 x i32> @sub_const_op_lshr_vec(<4 x i32> %x) { 280; CHECK-LABEL: sub_const_op_lshr_vec: 281; CHECK: # %bb.0: 282; CHECK-NEXT: vspltisw 3, -16 283; CHECK-NEXT: vspltisw 4, 15 284; CHECK-NEXT: addis 3, 2, .LCPI21_0@toc@ha 285; CHECK-NEXT: addi 3, 3, .LCPI21_0@toc@l 286; CHECK-NEXT: vsubuwm 3, 4, 3 287; CHECK-NEXT: vsraw 2, 2, 3 288; CHECK-NEXT: lvx 3, 0, 3 289; CHECK-NEXT: vadduwm 2, 2, 3 290; CHECK-NEXT: blr 291 %sh = lshr <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31> 292 %r = sub <4 x i32> <i32 42, i32 42, i32 42, i32 42>, %sh 293 ret <4 x i32> %r 294} 295 296