1; RUN: llc < %s -mtriple=arm-eabi -mattr=+fullfp16 -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s 2; RUN: llc < %s -mtriple thumbv7a -mattr=+fullfp16 -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s 3 4; TODO: we can't pass half-precision arguments as "half" types yet. We do 5; that for the time being by passing "float %f.coerce" and the necessary 6; bitconverts/truncates. In these tests we pass i16 and use 1 bitconvert, which 7; is the shortest way to get a half type. But when we can pass half types, we 8; want to use that here. 9 10define half @fp16_vminnm_o(i16 signext %a, i16 signext %b) { 11; CHECK-LABEL: fp16_vminnm_o: 12; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} 13; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} 14; CHECK: vminnm.f16 s0, [[S2]], [[S0]] 15entry: 16 %0 = bitcast i16 %a to half 17 %1 = bitcast i16 %b to half 18 %cmp = fcmp fast olt half %0, %1 19 %cond = select i1 %cmp, half %0, half %1 20 ret half %cond 21} 22 23define half @fp16_vminnm_o_rev(i16 signext %a, i16 signext %b) { 24; CHECK-LABEL: fp16_vminnm_o_rev: 25; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} 26; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} 27; CHECK: vminnm.f16 s0, [[S2]], [[S0]] 28entry: 29 %0 = bitcast i16 %a to half 30 %1 = bitcast i16 %b to half 31 %cmp = fcmp fast ogt half %0, %1 32 %cond = select i1 %cmp, half %1, half %0 33 ret half %cond 34} 35 36define half @fp16_vminnm_u(i16 signext %a, i16 signext %b) { 37; CHECK-LABEL: fp16_vminnm_u: 38; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} 39; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} 40; CHECK: vminnm.f16 s0, [[S2]], [[S0]] 41entry: 42 %0 = bitcast i16 %a to half 43 %1 = bitcast i16 %b to half 44 %cmp = fcmp fast ult half %0, %1 45 %cond = select i1 %cmp, half %0, half %1 46 ret half %cond 47} 48 49define half @fp16_vminnm_ule(i16 signext %a, i16 signext %b) { 50; CHECK-LABEL: fp16_vminnm_ule: 51; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} 52; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} 53; CHECK: vminnm.f16 s0, [[S2]], [[S0]] 54entry: 55 %0 = bitcast i16 %a to half 56 %1 = bitcast i16 %b to half 57 %cmp = fcmp fast ule half %0, %1 58 %cond = select i1 %cmp, half %0, half %1 59 ret half %cond 60} 61 62define half @fp16_vminnm_u_rev(i16 signext %a, i16 signext %b) { 63; CHECK-LABEL: fp16_vminnm_u_rev: 64; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} 65; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} 66; CHECK: vminnm.f16 s0, [[S2]], [[S0]] 67entry: 68 %0 = bitcast i16 %a to half 69 %1 = bitcast i16 %b to half 70 %cmp = fcmp fast ugt half %0, %1 71 %cond = select i1 %cmp, half %1, half %0 72 ret half %cond 73} 74 75define half @fp16_vmaxnm_o(i16 signext %a, i16 signext %b) { 76; CHECK-LABEL: fp16_vmaxnm_o: 77; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} 78; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} 79; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] 80entry: 81 %0 = bitcast i16 %a to half 82 %1 = bitcast i16 %b to half 83 %cmp = fcmp fast ogt half %0, %1 84 %cond = select i1 %cmp, half %0, half %1 85 ret half %cond 86} 87 88define half @fp16_vmaxnm_oge(i16 signext %a, i16 signext %b) { 89; CHECK-LABEL: fp16_vmaxnm_oge: 90; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} 91; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} 92; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] 93entry: 94 %0 = bitcast i16 %a to half 95 %1 = bitcast i16 %b to half 96 %cmp = fcmp fast oge half %0, %1 97 %cond = select i1 %cmp, half %0, half %1 98 ret half %cond 99} 100 101define half @fp16_vmaxnm_o_rev(i16 signext %a, i16 signext %b) { 102; CHECK-LABEL: fp16_vmaxnm_o_rev: 103; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} 104; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} 105; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] 106entry: 107 %0 = bitcast i16 %a to half 108 %1 = bitcast i16 %b to half 109 %cmp = fcmp fast olt half %0, %1 110 %cond = select i1 %cmp, half %1, half %0 111 ret half %cond 112} 113 114define half @fp16_vmaxnm_ole_rev(i16 signext %a, i16 signext %b) { 115; CHECK-LABEL: fp16_vmaxnm_ole_rev: 116; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} 117; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} 118; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] 119entry: 120 %0 = bitcast i16 %a to half 121 %1 = bitcast i16 %b to half 122 %cmp = fcmp fast ole half %0, %1 123 %cond = select i1 %cmp, half %1, half %0 124 ret half %cond 125} 126 127define half @fp16_vmaxnm_u(i16 signext %a, i16 signext %b) { 128; CHECK-LABEL: fp16_vmaxnm_u: 129; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} 130; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} 131; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] 132entry: 133 %0 = bitcast i16 %a to half 134 %1 = bitcast i16 %b to half 135 %cmp = fcmp fast ugt half %0, %1 136 %cond = select i1 %cmp, half %0, half %1 137 ret half %cond 138} 139 140define half @fp16_vmaxnm_uge(i16 signext %a, i16 signext %b) { 141; CHECK-LABEL: fp16_vmaxnm_uge: 142; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} 143; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} 144; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] 145entry: 146 %0 = bitcast i16 %a to half 147 %1 = bitcast i16 %b to half 148 %cmp = fcmp fast uge half %0, %1 149 %cond = select i1 %cmp, half %0, half %1 150 ret half %cond 151} 152 153define half @fp16_vmaxnm_u_rev(i16 signext %a, i16 signext %b) { 154; CHECK-LABEL: fp16_vmaxnm_u_rev: 155; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} 156; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} 157; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] 158entry: 159 %0 = bitcast i16 %a to half 160 %1 = bitcast i16 %b to half 161 %cmp = fcmp fast ult half %0, %1 162 %cond = select i1 %cmp, half %1, half %0 163 ret half %cond 164} 165 166; known non-NaNs 167 168define half @fp16_vminnm_NNNo(i16 signext %a) { 169; CHECK-LABEL: fp16_vminnm_NNNo: 170; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} 171; CHECK: vmov.f16 [[S2:s[0-9]]], #1.200000e+01 172; CHECK: vminnm.f16 s0, [[S0]], [[S2]] 173; CHECK: vldr.16 s2, .LCPI{{.*}} 174; CHECK: vminnm.f16 s0, [[S0]], [[S2]] 175entry: 176 %0 = bitcast i16 %a to half 177 %cmp1 = fcmp fast olt half %0, 12. 178 %cond1 = select i1 %cmp1, half %0, half 12. 179 %cmp2 = fcmp fast olt half 34., %cond1 180 %cond2 = select i1 %cmp2, half 34., half %cond1 181 ret half %cond2 182} 183 184define half @fp16_vminnm_NNNo_rev(i16 signext %a) { 185; CHECK-LABEL: fp16_vminnm_NNNo_rev: 186; CHECK: vldr.16 s2, .LCPI{{.*}} 187; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} 188; CHECK: vminnm.f16 s0, [[S0]], [[S2]] 189; CHECK: vldr.16 s2, .LCPI{{.*}} 190; CHECK: vminnm.f16 s0, [[S0]], [[S2]] 191entry: 192 %0 = bitcast i16 %a to half 193 %cmp1 = fcmp fast ogt half %0, 56. 194 %cond1 = select i1 %cmp1, half 56., half %0 195 %cmp2 = fcmp fast ogt half 78., %cond1 196 %cond2 = select i1 %cmp2, half %cond1, half 78. 197 ret half %cond2 198} 199 200define half @fp16_vminnm_NNNu(i16 signext %b) { 201; CHECK-LABEL: fp16_vminnm_NNNu: 202; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} 203; CHECK: vmov.f16 [[S2:s[0-9]]], #1.200000e+01 204; CHECK: vminnm.f16 s0, [[S0]], [[S2]] 205; CHECK: vldr.16 s2, .LCPI{{.*}} 206; CHECK: vminnm.f16 s0, [[S0]], [[S2]] 207entry: 208 %0 = bitcast i16 %b to half 209 %cmp1 = fcmp fast ult half 12., %0 210 %cond1 = select i1 %cmp1, half 12., half %0 211 %cmp2 = fcmp fast ult half %cond1, 34. 212 %cond2 = select i1 %cmp2, half %cond1, half 34. 213 ret half %cond2 214} 215 216define half @fp16_vminnm_NNNule(i16 signext %b) { 217; CHECK-LABEL: fp16_vminnm_NNNule: 218; CHECK: vldr.16 s2, .LCPI{{.*}} 219; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} 220; CHECK: vminnm.f16 s0, [[S0]], [[S2]] 221; CHECK: vldr.16 s2, .LCPI{{.*}} 222; CHECK: vminnm.f16 s0, [[S0]], [[S2]] 223entry: 224 %0 = bitcast i16 %b to half 225 %cmp1 = fcmp fast ule half 34., %0 226 %cond1 = select i1 %cmp1, half 34., half %0 227 %cmp2 = fcmp fast ule half %cond1, 56. 228 %cond2 = select i1 %cmp2, half %cond1, half 56. 229 ret half %cond2 230} 231 232define half @fp16_vminnm_NNNu_rev(i16 signext %b) { 233; CHECK-LABEL: fp16_vminnm_NNNu_rev: 234; CHECK: vldr.16 s2, .LCPI{{.*}} 235; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} 236; CHECK: vminnm.f16 s0, [[S0]], [[S2]] 237; CHECK: vldr.16 s2, .LCPI{{.*}} 238; CHECK: vminnm.f16 s0, [[S0]], [[S2]] 239entry: 240 %0 = bitcast i16 %b to half 241 %cmp1 = fcmp fast ugt half 56., %0 242 %cond1 = select i1 %cmp1, half %0, half 56. 243 %cmp2 = fcmp fast ugt half %cond1, 78. 244 %cond2 = select i1 %cmp2, half 78., half %cond1 245 ret half %cond2 246} 247 248define half @fp16_vmaxnm_NNNo(i16 signext %a) { 249; CHECK-LABEL: fp16_vmaxnm_NNNo: 250; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} 251; CHECK: vmov.f16 [[S2:s[0-9]]], #1.200000e+01 252; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] 253; CHECK: vldr.16 s2, .LCPI{{.*}} 254; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] 255entry: 256 %0 = bitcast i16 %a to half 257 %cmp1 = fcmp fast ogt half %0, 12. 258 %cond1 = select i1 %cmp1, half %0, half 12. 259 %cmp2 = fcmp fast ogt half 34., %cond1 260 %cond2 = select i1 %cmp2, half 34., half %cond1 261 ret half %cond2 262} 263 264define half @fp16_vmaxnm_NNNoge(i16 signext %a) { 265; CHECK-LABEL: fp16_vmaxnm_NNNoge: 266; CHECK: vldr.16 s2, .LCPI{{.*}} 267; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} 268; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] 269; CHECK: vldr.16 s2, .LCPI{{.*}} 270; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] 271entry: 272 %0 = bitcast i16 %a to half 273 %cmp1 = fcmp fast oge half %0, 34. 274 %cond1 = select i1 %cmp1, half %0, half 34. 275 %cmp2 = fcmp fast oge half 56., %cond1 276 %cond2 = select i1 %cmp2, half 56., half %cond1 277 ret half %cond2 278} 279 280define half @fp16_vmaxnm_NNNo_rev(i16 signext %a) { 281; CHECK-LABEL: fp16_vmaxnm_NNNo_rev: 282; CHECK: vldr.16 s2, .LCPI{{.*}} 283; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} 284; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] 285; CHECK: vldr.16 s2, .LCPI{{.*}} 286; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] 287entry: 288 %0 = bitcast i16 %a to half 289 %cmp1 = fcmp fast olt half %0, 56. 290 %cond1 = select i1 %cmp1, half 56., half %0 291 %cmp2 = fcmp fast olt half 78., %cond1 292 %cond2 = select i1 %cmp2, half %cond1, half 78. 293 ret half %cond2 294} 295 296define half @fp16_vmaxnm_NNNole_rev(i16 signext %a) { 297; CHECK-LABEL: fp16_vmaxnm_NNNole_rev: 298; CHECK: vldr.16 s2, .LCPI{{.*}} 299; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} 300; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] 301; CHECK: vldr.16 s2, .LCPI{{.*}} 302; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] 303entry: 304 %0 = bitcast i16 %a to half 305 %cmp1 = fcmp fast ole half %0, 78. 306 %cond1 = select i1 %cmp1, half 78., half %0 307 %cmp2 = fcmp fast ole half 90., %cond1 308 %cond2 = select i1 %cmp2, half %cond1, half 90. 309 ret half %cond2 310} 311 312define half @fp16_vmaxnm_NNNu(i16 signext %b) { 313; CHECK-LABEL: fp16_vmaxnm_NNNu: 314; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} 315; CHECK: vmov.f16 [[S2:s[0-9]]], #1.200000e+01 316; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] 317; CHECK: vldr.16 s2, .LCPI{{.*}} 318; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] 319entry: 320 %0 = bitcast i16 %b to half 321 %cmp1 = fcmp fast ugt half 12., %0 322 %cond1 = select i1 %cmp1, half 12., half %0 323 %cmp2 = fcmp fast ugt half %cond1, 34. 324 %cond2 = select i1 %cmp2, half %cond1, half 34. 325 ret half %cond2 326} 327 328define half @fp16_vmaxnm_NNNuge(i16 signext %b) { 329; CHECK-LABEL: fp16_vmaxnm_NNNuge: 330; CHECK: vldr.16 s2, .LCPI{{.*}} 331; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} 332; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] 333; CHECK: vldr.16 s2, .LCPI{{.*}} 334; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] 335entry: 336 %0 = bitcast i16 %b to half 337 %cmp1 = fcmp fast uge half 34., %0 338 %cond1 = select i1 %cmp1, half 34., half %0 339 %cmp2 = fcmp fast uge half %cond1, 56. 340 %cond2 = select i1 %cmp2, half %cond1, half 56. 341 ret half %cond2 342} 343 344define half @fp16_vmaxnm_NNNu_rev(i16 signext %b) { 345; CHECK-LABEL: fp16_vmaxnm_NNNu_rev: 346; CHECK: vldr.16 s2, .LCPI{{.*}} 347; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} 348; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] 349; CHECK: vldr.16 s2, .LCPI{{.*}} 350; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] 351entry: 352 %0 = bitcast i16 %b to half 353 %cmp1 = fcmp fast ult half 56., %0 354 %cond1 = select i1 %cmp1, half %0, half 56. 355 %cmp2 = fcmp fast ult half %cond1, 78. 356 %cond2 = select i1 %cmp2, half 78., half %cond1 357 ret half %cond2 358} 359 360define half @fp16_vminmaxnm_0(i16 signext %a) { 361; CHECK-LABEL: fp16_vminmaxnm_0: 362; CHECK: vldr.16 s0, .LCPI{{.*}} 363; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} 364; CHECK: vminnm.f16 s2, s2, s0 365; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] 366entry: 367 %0 = bitcast i16 %a to half 368 %cmp1 = fcmp fast olt half %0, 0. 369 %cond1 = select i1 %cmp1, half %0, half 0. 370 %cmp2 = fcmp fast ogt half %cond1, 0. 371 %cond2 = select i1 %cmp2, half %cond1, half 0. 372 ret half %cond2 373} 374 375define half @fp16_vminmaxnm_neg0(i16 signext %a) { 376; CHECK-LABEL: fp16_vminmaxnm_neg0: 377; CHECK: vldr.16 s0, .LCPI{{.*}} 378; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} 379; CHECK: vminnm.f16 s2, s2, s0 380; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] 381entry: 382 %0 = bitcast i16 %a to half 383 %cmp1 = fcmp fast olt half %0, -0. 384 %cond1 = select i1 %cmp1, half %0, half -0. 385 %cmp2 = fcmp fast ugt half %cond1, -0. 386 %cond2 = select i1 %cmp2, half %cond1, half -0. 387 ret half %cond2 388} 389 390define half @fp16_vminmaxnm_e_0(i16 signext %a) { 391; CHECK-LABEL: fp16_vminmaxnm_e_0: 392; CHECK: vldr.16 s0, .LCPI{{.*}} 393; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} 394; CHECK: vminnm.f16 s2, s2, s0 395; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] 396entry: 397 %0 = bitcast i16 %a to half 398 %cmp1 = fcmp fast ule half 0., %0 399 %cond1 = select i1 %cmp1, half 0., half %0 400 %cmp2 = fcmp fast uge half 0., %cond1 401 %cond2 = select i1 %cmp2, half 0., half %cond1 402 ret half %cond2 403} 404 405define half @fp16_vminmaxnm_e_neg0(i16 signext %a) { 406; CHECK-LABEL: fp16_vminmaxnm_e_neg0: 407; CHECK: vldr.16 s0, .LCPI{{.*}} 408; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} 409; CHECK: vminnm.f16 s2, s2, s0 410; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] 411entry: 412 %0 = bitcast i16 %a to half 413 %cmp1 = fcmp fast ule half -0., %0 414 %cond1 = select i1 %cmp1, half -0., half %0 415 %cmp2 = fcmp fast oge half -0., %cond1 416 %cond2 = select i1 %cmp2, half -0., half %cond1 417 ret half %cond2 418} 419