1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py 2; RUN: llc -global-isel -march=amdgcn -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s 3 4define float @v_constained_fadd_f32_fpexcept_strict(float %x, float %y) #0 { 5 ; CHECK-LABEL: name: v_constained_fadd_f32_fpexcept_strict 6 ; CHECK: bb.1 (%ir-block.0): 7 ; CHECK: liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31 8 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 9 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 10 ; CHECK: [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31 11 ; CHECK: [[STRICT_FADD:%[0-9]+]]:_(s32) = G_STRICT_FADD [[COPY]], [[COPY1]] 12 ; CHECK: $vgpr0 = COPY [[STRICT_FADD]](s32) 13 ; CHECK: [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]] 14 ; CHECK: S_SETPC_B64_return [[COPY3]], implicit $vgpr0 15 %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") 16 ret float %val 17} 18 19define float @v_constained_fadd_f32_fpexcept_strict_flags(float %x, float %y) #0 { 20 ; CHECK-LABEL: name: v_constained_fadd_f32_fpexcept_strict_flags 21 ; CHECK: bb.1 (%ir-block.0): 22 ; CHECK: liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31 23 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 24 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 25 ; CHECK: [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31 26 ; CHECK: [[STRICT_FADD:%[0-9]+]]:_(s32) = nsz G_STRICT_FADD [[COPY]], [[COPY1]] 27 ; CHECK: $vgpr0 = COPY [[STRICT_FADD]](s32) 28 ; CHECK: [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]] 29 ; CHECK: S_SETPC_B64_return [[COPY3]], implicit $vgpr0 30 %val = call nsz float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") 31 ret float %val 32} 33 34define float @v_constained_fadd_f32_fpexcept_ignore(float %x, float %y) #0 { 35 ; CHECK-LABEL: name: v_constained_fadd_f32_fpexcept_ignore 36 ; CHECK: bb.1 (%ir-block.0): 37 ; CHECK: liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31 38 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 39 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 40 ; CHECK: [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31 41 ; CHECK: %3:_(s32) = nofpexcept G_STRICT_FADD [[COPY]], [[COPY1]] 42 ; CHECK: $vgpr0 = COPY %3(s32) 43 ; CHECK: [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]] 44 ; CHECK: S_SETPC_B64_return [[COPY3]], implicit $vgpr0 45 %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") 46 ret float %val 47} 48 49define float @v_constained_fadd_f32_fpexcept_ignore_flags(float %x, float %y) #0 { 50 ; CHECK-LABEL: name: v_constained_fadd_f32_fpexcept_ignore_flags 51 ; CHECK: bb.1 (%ir-block.0): 52 ; CHECK: liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31 53 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 54 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 55 ; CHECK: [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31 56 ; CHECK: %3:_(s32) = nsz nofpexcept G_STRICT_FADD [[COPY]], [[COPY1]] 57 ; CHECK: $vgpr0 = COPY %3(s32) 58 ; CHECK: [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]] 59 ; CHECK: S_SETPC_B64_return [[COPY3]], implicit $vgpr0 60 %val = call nsz float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") 61 ret float %val 62} 63 64define float @v_constained_fadd_f32_fpexcept_maytrap(float %x, float %y) #0 { 65 ; CHECK-LABEL: name: v_constained_fadd_f32_fpexcept_maytrap 66 ; CHECK: bb.1 (%ir-block.0): 67 ; CHECK: liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31 68 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 69 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 70 ; CHECK: [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31 71 ; CHECK: [[STRICT_FADD:%[0-9]+]]:_(s32) = G_STRICT_FADD [[COPY]], [[COPY1]] 72 ; CHECK: $vgpr0 = COPY [[STRICT_FADD]](s32) 73 ; CHECK: [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]] 74 ; CHECK: S_SETPC_B64_return [[COPY3]], implicit $vgpr0 75 %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") 76 ret float %val 77} 78 79define <2 x float> @v_constained_fadd_v2f32_fpexcept_strict(<2 x float> %x, <2 x float> %y) #0 { 80 ; CHECK-LABEL: name: v_constained_fadd_v2f32_fpexcept_strict 81 ; CHECK: bb.1 (%ir-block.0): 82 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31 83 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 84 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 85 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 86 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3 87 ; CHECK: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31 88 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32) 89 ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32) 90 ; CHECK: [[STRICT_FADD:%[0-9]+]]:_(<2 x s32>) = G_STRICT_FADD [[BUILD_VECTOR]], [[BUILD_VECTOR1]] 91 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[STRICT_FADD]](<2 x s32>) 92 ; CHECK: $vgpr0 = COPY [[UV]](s32) 93 ; CHECK: $vgpr1 = COPY [[UV1]](s32) 94 ; CHECK: [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]] 95 ; CHECK: S_SETPC_B64_return [[COPY5]], implicit $vgpr0, implicit $vgpr1 96 %val = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %x, <2 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") 97 ret <2 x float> %val 98} 99 100define <2 x float> @v_constained_fadd_v2f32_fpexcept_ignore(<2 x float> %x, <2 x float> %y) #0 { 101 ; CHECK-LABEL: name: v_constained_fadd_v2f32_fpexcept_ignore 102 ; CHECK: bb.1 (%ir-block.0): 103 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31 104 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 105 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 106 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 107 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3 108 ; CHECK: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31 109 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32) 110 ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32) 111 ; CHECK: %7:_(<2 x s32>) = nofpexcept G_STRICT_FADD [[BUILD_VECTOR]], [[BUILD_VECTOR1]] 112 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES %7(<2 x s32>) 113 ; CHECK: $vgpr0 = COPY [[UV]](s32) 114 ; CHECK: $vgpr1 = COPY [[UV1]](s32) 115 ; CHECK: [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]] 116 ; CHECK: S_SETPC_B64_return [[COPY5]], implicit $vgpr0, implicit $vgpr1 117 %val = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %x, <2 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") 118 ret <2 x float> %val 119} 120 121define <2 x float> @v_constained_fadd_v2f32_fpexcept_maytrap(<2 x float> %x, <2 x float> %y) #0 { 122 ; CHECK-LABEL: name: v_constained_fadd_v2f32_fpexcept_maytrap 123 ; CHECK: bb.1 (%ir-block.0): 124 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31 125 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 126 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 127 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 128 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3 129 ; CHECK: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31 130 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32) 131 ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32) 132 ; CHECK: [[STRICT_FADD:%[0-9]+]]:_(<2 x s32>) = G_STRICT_FADD [[BUILD_VECTOR]], [[BUILD_VECTOR1]] 133 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[STRICT_FADD]](<2 x s32>) 134 ; CHECK: $vgpr0 = COPY [[UV]](s32) 135 ; CHECK: $vgpr1 = COPY [[UV1]](s32) 136 ; CHECK: [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]] 137 ; CHECK: S_SETPC_B64_return [[COPY5]], implicit $vgpr0, implicit $vgpr1 138 %val = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %x, <2 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") 139 ret <2 x float> %val 140} 141 142define float @v_constained_fsub_f32_fpexcept_ignore_flags(float %x, float %y) #0 { 143 ; CHECK-LABEL: name: v_constained_fsub_f32_fpexcept_ignore_flags 144 ; CHECK: bb.1 (%ir-block.0): 145 ; CHECK: liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31 146 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 147 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 148 ; CHECK: [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31 149 ; CHECK: %3:_(s32) = nsz nofpexcept G_STRICT_FSUB [[COPY]], [[COPY1]] 150 ; CHECK: $vgpr0 = COPY %3(s32) 151 ; CHECK: [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]] 152 ; CHECK: S_SETPC_B64_return [[COPY3]], implicit $vgpr0 153 %val = call nsz float @llvm.experimental.constrained.fsub.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") 154 ret float %val 155} 156 157define float @v_constained_fmul_f32_fpexcept_ignore_flags(float %x, float %y) #0 { 158 ; CHECK-LABEL: name: v_constained_fmul_f32_fpexcept_ignore_flags 159 ; CHECK: bb.1 (%ir-block.0): 160 ; CHECK: liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31 161 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 162 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 163 ; CHECK: [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31 164 ; CHECK: %3:_(s32) = nsz nofpexcept G_STRICT_FMUL [[COPY]], [[COPY1]] 165 ; CHECK: $vgpr0 = COPY %3(s32) 166 ; CHECK: [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]] 167 ; CHECK: S_SETPC_B64_return [[COPY3]], implicit $vgpr0 168 %val = call nsz float @llvm.experimental.constrained.fmul.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") 169 ret float %val 170} 171 172define float @v_constained_fdiv_f32_fpexcept_ignore_flags(float %x, float %y) #0 { 173 ; CHECK-LABEL: name: v_constained_fdiv_f32_fpexcept_ignore_flags 174 ; CHECK: bb.1 (%ir-block.0): 175 ; CHECK: liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31 176 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 177 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 178 ; CHECK: [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31 179 ; CHECK: %3:_(s32) = nsz nofpexcept G_STRICT_FDIV [[COPY]], [[COPY1]] 180 ; CHECK: $vgpr0 = COPY %3(s32) 181 ; CHECK: [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]] 182 ; CHECK: S_SETPC_B64_return [[COPY3]], implicit $vgpr0 183 %val = call nsz float @llvm.experimental.constrained.fdiv.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") 184 ret float %val 185} 186 187define float @v_constained_frem_f32_fpexcept_ignore_flags(float %x, float %y) #0 { 188 ; CHECK-LABEL: name: v_constained_frem_f32_fpexcept_ignore_flags 189 ; CHECK: bb.1 (%ir-block.0): 190 ; CHECK: liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31 191 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 192 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 193 ; CHECK: [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31 194 ; CHECK: %3:_(s32) = nsz nofpexcept G_STRICT_FREM [[COPY]], [[COPY1]] 195 ; CHECK: $vgpr0 = COPY %3(s32) 196 ; CHECK: [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]] 197 ; CHECK: S_SETPC_B64_return [[COPY3]], implicit $vgpr0 198 %val = call nsz float @llvm.experimental.constrained.frem.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") 199 ret float %val 200} 201 202define float @v_constained_fma_f32_fpexcept_ignore_flags(float %x, float %y, float %z) #0 { 203 ; CHECK-LABEL: name: v_constained_fma_f32_fpexcept_ignore_flags 204 ; CHECK: bb.1 (%ir-block.0): 205 ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31 206 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 207 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 208 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 209 ; CHECK: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31 210 ; CHECK: %4:_(s32) = nsz nofpexcept G_STRICT_FMA [[COPY]], [[COPY1]], [[COPY2]] 211 ; CHECK: $vgpr0 = COPY %4(s32) 212 ; CHECK: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]] 213 ; CHECK: S_SETPC_B64_return [[COPY4]], implicit $vgpr0 214 %val = call nsz float @llvm.experimental.constrained.fma.f32(float %x, float %y, float %z, metadata !"round.tonearest", metadata !"fpexcept.ignore") 215 ret float %val 216} 217 218define float @v_constained_sqrt_f32_fpexcept_strict(float %x) #0 { 219 ; CHECK-LABEL: name: v_constained_sqrt_f32_fpexcept_strict 220 ; CHECK: bb.1 (%ir-block.0): 221 ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31 222 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 223 ; CHECK: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31 224 ; CHECK: [[STRICT_FSQRT:%[0-9]+]]:_(s32) = G_STRICT_FSQRT [[COPY]] 225 ; CHECK: $vgpr0 = COPY [[STRICT_FSQRT]](s32) 226 ; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]] 227 ; CHECK: S_SETPC_B64_return [[COPY2]], implicit $vgpr0 228 %val = call float @llvm.experimental.constrained.sqrt.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") 229 ret float %val 230} 231 232declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata) #1 233declare <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float>, <2 x float>, metadata, metadata) #1 234declare <3 x float> @llvm.experimental.constrained.fadd.v3f32(<3 x float>, <3 x float>, metadata, metadata) #1 235declare float @llvm.experimental.constrained.fsub.f32(float, float, metadata, metadata) #1 236declare float @llvm.experimental.constrained.fmul.f32(float, float, metadata, metadata) #1 237declare float @llvm.experimental.constrained.fdiv.f32(float, float, metadata, metadata) #1 238declare float @llvm.experimental.constrained.frem.f32(float, float, metadata, metadata) #1 239declare float @llvm.experimental.constrained.fma.f32(float, float, float, metadata, metadata) #1 240declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata) #1 241 242attributes #0 = { strictfp } 243attributes #1 = { inaccessiblememonly nounwind willreturn } 244