1//===- Combine.td - Combine rule definitions ---------------*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// Declare GlobalISel combine rules and provide mechanisms to opt-out. 10// 11//===----------------------------------------------------------------------===// 12 13 14//===----------------------------------------------------------------------===// 15// Base Classes 16// 17// These are the core classes that the combiner backend relies on. 18//===----------------------------------------------------------------------===// 19 20/// All arguments of the defs operator must be subclasses of GIDefKind or 21/// sub-dags whose operator is GIDefKindWithArgs. 22class GIDefKind; 23class GIDefKindWithArgs; 24 25/// Declare a root node. There must be at least one of these in every combine 26/// rule. 27def root : GIDefKind; 28 29def defs; 30 31def pattern; 32def match; 33def apply; 34 35def wip_match_opcode; 36 37// Common base class for GICombineRule and GICombineGroup. 38class GICombine { 39 // See GICombineGroup. We only declare it here to make the tablegen pass 40 // simpler. 41 list<GICombine> Rules = ?; 42} 43 44// A group of combine rules that can be added to a GICombiner or another group. 45class GICombineGroup<list<GICombine> rules> : GICombine { 46 // The rules contained in this group. The rules in a group are flattened into 47 // a single list and sorted into whatever order is most efficient. However, 48 // they will never be re-ordered such that behaviour differs from the 49 // specified order. It is therefore possible to use the order of rules in this 50 // list to describe priorities. 51 let Rules = rules; 52} 53 54// Declares a combiner implementation class 55class GICombiner<string classname, list<GICombine> rules> 56 : GICombineGroup<rules> { 57 // The class name to use in the generated output. 58 string Classname = classname; 59 // Combiners can use this so they're free to define tryCombineAll themselves 60 // and do extra work before/after calling the TableGen-erated code. 61 string CombineAllMethodName = "tryCombineAll"; 62} 63 64/// Declares data that is passed from the match stage to the apply stage. 65class GIDefMatchData<string type> { 66 /// A C++ type name indicating the storage type. 67 string Type = type; 68} 69 70class GICombineRule<dag defs, dag match, dag apply> : GICombine { 71 /// Defines the external interface of the match rule. This includes: 72 /// * The names of the root nodes (requires at least one) 73 /// See GIDefKind for details. 74 dag Defs = defs; 75 76 /// Defines the things which must be true for the pattern to match 77 dag Match = match; 78 79 /// Defines the things which happen after the decision is made to apply a 80 /// combine rule. 81 dag Apply = apply; 82 83 /// Defines the predicates that are checked before the match function 84 /// is called. Targets can use this to, for instance, check Subtarget 85 /// features. 86 list<Predicate> Predicates = []; 87 88 // Maximum number of permutations of this rule that can be emitted. 89 // Set to -1 to disable the limit. 90 int MaxPermutations = 16; 91} 92 93def gi_mo; 94def gi_imm; 95 96// This is an equivalent of PatFrags but for MIR Patterns. 97// 98// GICombinePatFrags can be used in place of instructions for 'match' patterns. 99// Much like normal instructions, the defs (outs) come first, and the ins second 100// 101// Out operands can only be of type "root" or "gi_mo", and they must be defined 102// by an instruction pattern in all alternatives. 103// 104// In operands can be gi_imm or gi_mo. They cannot be redefined in any alternative 105// pattern and may only appear in the C++ code, or in the output operand of an 106// instruction pattern. 107class GICombinePatFrag<dag outs, dag ins, list<dag> alts> { 108 dag InOperands = ins; 109 dag OutOperands = outs; 110 list<dag> Alternatives = alts; 111} 112 113//===----------------------------------------------------------------------===// 114// Pattern Special Types 115//===----------------------------------------------------------------------===// 116 117class GISpecialType; 118 119// In an apply pattern, GITypeOf can be used to set the type of a new temporary 120// register to match the type of a matched register. 121// 122// This can only be used on temporary registers defined by the apply pattern. 123// 124// TODO: Make this work in matchers as well? 125// 126// FIXME: Syntax is very ugly. 127class GITypeOf<string opName> : GISpecialType { 128 string OpName = opName; 129} 130 131//===----------------------------------------------------------------------===// 132// Pattern Builtins 133//===----------------------------------------------------------------------===// 134 135// "Magic" Builtin instructions for MIR patterns. 136// The definitions that implement 137class GIBuiltinInst; 138 139// Replace all references to a register with another one. 140// 141// Usage: 142// (apply (GIReplaceReg $old, $new)) 143// 144// Operands: 145// - $old (out) register defined by a matched instruction 146// - $new (in) register 147// 148// Semantics: 149// - Can only appear in an 'apply' pattern. 150// - If both old/new are operands of matched instructions, 151// "canReplaceReg" is checked before applying the rule. 152def GIReplaceReg : GIBuiltinInst; 153 154// Apply action that erases the match root. 155// 156// Usage: 157// (apply (GIEraseRoot)) 158// 159// Semantics: 160// - Can only appear as the only pattern of an 'apply' pattern list. 161// - The root cannot have any output operands. 162// - The root must be a CodeGenInstruction 163// 164// TODO: Allow using this directly, like (apply GIEraseRoot) 165def GIEraseRoot : GIBuiltinInst; 166 167//===----------------------------------------------------------------------===// 168// Pattern MIFlags 169//===----------------------------------------------------------------------===// 170 171class MIFlagEnum<string enumName> { 172 string EnumName = "MachineInstr::" # enumName; 173} 174 175def FmNoNans : MIFlagEnum<"FmNoNans">; 176def FmNoInfs : MIFlagEnum<"FmNoInfs">; 177def FmNsz : MIFlagEnum<"FmNsz">; 178def FmArcp : MIFlagEnum<"FmArcp">; 179def FmContract : MIFlagEnum<"FmContract">; 180def FmAfn : MIFlagEnum<"FmAfn">; 181def FmReassoc : MIFlagEnum<"FmReassoc">; 182 183def MIFlags; 184// def not; -> Already defined as a SDNode 185 186//===----------------------------------------------------------------------===// 187 188def extending_load_matchdata : GIDefMatchData<"PreferredTuple">; 189def indexed_load_store_matchdata : GIDefMatchData<"IndexedLoadStoreMatchInfo">; 190def instruction_steps_matchdata: GIDefMatchData<"InstructionStepsMatchInfo">; 191 192def register_matchinfo: GIDefMatchData<"Register">; 193def int64_matchinfo: GIDefMatchData<"int64_t">; 194def apint_matchinfo : GIDefMatchData<"APInt">; 195def constantfp_matchinfo : GIDefMatchData<"ConstantFP*">; 196def build_fn_matchinfo : 197GIDefMatchData<"std::function<void(MachineIRBuilder &)>">; 198def unsigned_matchinfo: GIDefMatchData<"unsigned">; 199 200def copy_prop : GICombineRule< 201 (defs root:$d), 202 (match (COPY $d, $s):$mi, 203 [{ return Helper.matchCombineCopy(*${mi}); }]), 204 (apply [{ Helper.applyCombineCopy(*${mi}); }])>; 205 206// idempotent operations 207// Fold (freeze (freeze x)) -> (freeze x). 208// Fold (fabs (fabs x)) -> (fabs x). 209// Fold (fcanonicalize (fcanonicalize x)) -> (fcanonicalize x). 210def idempotent_prop_frags : GICombinePatFrag< 211 (outs root:$dst, $src), (ins), 212 !foreach(op, [G_FREEZE, G_FABS, G_FCANONICALIZE], 213 (pattern (op $dst, $src), (op $src, $x)))>; 214 215def idempotent_prop : GICombineRule< 216 (defs root:$dst), 217 (match (idempotent_prop_frags $dst, $src)), 218 (apply (GIReplaceReg $dst, $src))>; 219 220 221def extending_loads : GICombineRule< 222 (defs root:$root, extending_load_matchdata:$matchinfo), 223 (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD):$root, 224 [{ return Helper.matchCombineExtendingLoads(*${root}, ${matchinfo}); }]), 225 (apply [{ Helper.applyCombineExtendingLoads(*${root}, ${matchinfo}); }])>; 226 227def load_and_mask : GICombineRule< 228 (defs root:$root, build_fn_matchinfo:$matchinfo), 229 (match (wip_match_opcode G_AND):$root, 230 [{ return Helper.matchCombineLoadWithAndMask(*${root}, ${matchinfo}); }]), 231 (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>; 232def combines_for_extload: GICombineGroup<[extending_loads, load_and_mask]>; 233 234def sext_trunc_sextload : GICombineRule< 235 (defs root:$d), 236 (match (wip_match_opcode G_SEXT_INREG):$d, 237 [{ return Helper.matchSextTruncSextLoad(*${d}); }]), 238 (apply [{ Helper.applySextTruncSextLoad(*${d}); }])>; 239 240def sext_inreg_of_load_matchdata : GIDefMatchData<"std::tuple<Register, unsigned>">; 241def sext_inreg_of_load : GICombineRule< 242 (defs root:$root, sext_inreg_of_load_matchdata:$matchinfo), 243 (match (wip_match_opcode G_SEXT_INREG):$root, 244 [{ return Helper.matchSextInRegOfLoad(*${root}, ${matchinfo}); }]), 245 (apply [{ Helper.applySextInRegOfLoad(*${root}, ${matchinfo}); }])>; 246 247def sext_inreg_to_zext_inreg : GICombineRule< 248 (defs root:$dst), 249 (match 250 (G_SEXT_INREG $dst, $src, $imm):$root, 251 [{ 252 unsigned BitWidth = MRI.getType(${src}.getReg()).getScalarSizeInBits(); 253 return Helper.getKnownBits()->maskedValueIsZero(${src}.getReg(), 254 APInt::getOneBitSet(BitWidth, ${imm}.getImm() - 1)); }]), 255 (apply [{ 256 Helper.getBuilder().setInstrAndDebugLoc(*${root}); 257 Helper.getBuilder().buildZExtInReg(${dst}, ${src}, ${imm}.getImm()); 258 ${root}->eraseFromParent(); 259 }]) 260>; 261 262def combine_extracted_vector_load : GICombineRule< 263 (defs root:$root, build_fn_matchinfo:$matchinfo), 264 (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root, 265 [{ return Helper.matchCombineExtractedVectorLoad(*${root}, ${matchinfo}); }]), 266 (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>; 267 268def combine_indexed_load_store : GICombineRule< 269 (defs root:$root, indexed_load_store_matchdata:$matchinfo), 270 (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD, G_STORE):$root, 271 [{ return Helper.matchCombineIndexedLoadStore(*${root}, ${matchinfo}); }]), 272 (apply [{ Helper.applyCombineIndexedLoadStore(*${root}, ${matchinfo}); }])>; 273 274def opt_brcond_by_inverting_cond_matchdata : GIDefMatchData<"MachineInstr *">; 275def opt_brcond_by_inverting_cond : GICombineRule< 276 (defs root:$root, opt_brcond_by_inverting_cond_matchdata:$matchinfo), 277 (match (wip_match_opcode G_BR):$root, 278 [{ return Helper.matchOptBrCondByInvertingCond(*${root}, ${matchinfo}); }]), 279 (apply [{ Helper.applyOptBrCondByInvertingCond(*${root}, ${matchinfo}); }])>; 280 281def ptr_add_immed_matchdata : GIDefMatchData<"PtrAddChain">; 282def ptr_add_immed_chain : GICombineRule< 283 (defs root:$d, ptr_add_immed_matchdata:$matchinfo), 284 (match (wip_match_opcode G_PTR_ADD):$d, 285 [{ return Helper.matchPtrAddImmedChain(*${d}, ${matchinfo}); }]), 286 (apply [{ Helper.applyPtrAddImmedChain(*${d}, ${matchinfo}); }])>; 287 288def shifts_too_big : GICombineRule< 289 (defs root:$root), 290 (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR):$root, 291 [{ return Helper.matchShiftsTooBig(*${root}); }]), 292 (apply [{ Helper.replaceInstWithUndef(*${root}); }])>; 293 294// Fold shift (shift base x), y -> shift base, (x+y), if shifts are same 295def shift_immed_matchdata : GIDefMatchData<"RegisterImmPair">; 296def shift_immed_chain : GICombineRule< 297 (defs root:$d, shift_immed_matchdata:$matchinfo), 298 (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_SSHLSAT, G_USHLSAT):$d, 299 [{ return Helper.matchShiftImmedChain(*${d}, ${matchinfo}); }]), 300 (apply [{ Helper.applyShiftImmedChain(*${d}, ${matchinfo}); }])>; 301 302// Transform shift (logic (shift X, C0), Y), C1 303// -> logic (shift X, (C0+C1)), (shift Y, C1), if shifts are same 304def shift_of_shifted_logic_matchdata : GIDefMatchData<"ShiftOfShiftedLogic">; 305def shift_of_shifted_logic_chain : GICombineRule< 306 (defs root:$d, shift_of_shifted_logic_matchdata:$matchinfo), 307 (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_USHLSAT, G_SSHLSAT):$d, 308 [{ return Helper.matchShiftOfShiftedLogic(*${d}, ${matchinfo}); }]), 309 (apply [{ Helper.applyShiftOfShiftedLogic(*${d}, ${matchinfo}); }])>; 310 311def mul_to_shl_matchdata : GIDefMatchData<"unsigned">; 312def mul_to_shl : GICombineRule< 313 (defs root:$d, mul_to_shl_matchdata:$matchinfo), 314 (match (G_MUL $d, $op1, $op2):$mi, 315 [{ return Helper.matchCombineMulToShl(*${mi}, ${matchinfo}); }]), 316 (apply [{ Helper.applyCombineMulToShl(*${mi}, ${matchinfo}); }])>; 317 318// shl ([asz]ext x), y => zext (shl x, y), if shift does not overflow int 319def reduce_shl_of_extend_matchdata : GIDefMatchData<"RegisterImmPair">; 320def reduce_shl_of_extend : GICombineRule< 321 (defs root:$dst, reduce_shl_of_extend_matchdata:$matchinfo), 322 (match (G_SHL $dst, $src0, $src1):$mi, 323 [{ return Helper.matchCombineShlOfExtend(*${mi}, ${matchinfo}); }]), 324 (apply [{ Helper.applyCombineShlOfExtend(*${mi}, ${matchinfo}); }])>; 325 326// Combine (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) 327// Combine (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2) 328def commute_shift : GICombineRule< 329 (defs root:$d, build_fn_matchinfo:$matchinfo), 330 (match (wip_match_opcode G_SHL):$d, 331 [{ return Helper.matchCommuteShift(*${d}, ${matchinfo}); }]), 332 (apply [{ Helper.applyBuildFn(*${d}, ${matchinfo}); }])>; 333 334def narrow_binop_feeding_and : GICombineRule< 335 (defs root:$root, build_fn_matchinfo:$matchinfo), 336 (match (wip_match_opcode G_AND):$root, 337 [{ return Helper.matchNarrowBinopFeedingAnd(*${root}, ${matchinfo}); }]), 338 (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>; 339 340// [us]itofp(undef) = 0, because the result value is bounded. 341def undef_to_fp_zero : GICombineRule< 342 (defs root:$root), 343 (match (wip_match_opcode G_UITOFP, G_SITOFP):$root, 344 [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]), 345 (apply [{ Helper.replaceInstWithFConstant(*${root}, 0.0); }])>; 346 347def undef_to_int_zero: GICombineRule< 348 (defs root:$root), 349 (match (wip_match_opcode G_AND, G_MUL):$root, 350 [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]), 351 (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>; 352 353def undef_to_negative_one: GICombineRule< 354 (defs root:$root), 355 (match (wip_match_opcode G_OR):$root, 356 [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]), 357 (apply [{ Helper.replaceInstWithConstant(*${root}, -1); }])>; 358 359def binop_left_undef_to_zero: GICombineRule< 360 (defs root:$root), 361 (match (wip_match_opcode G_SHL, G_UDIV, G_UREM):$root, 362 [{ return Helper.matchOperandIsUndef(*${root}, 1); }]), 363 (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>; 364 365def binop_right_undef_to_undef: GICombineRule< 366 (defs root:$root), 367 (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR):$root, 368 [{ return Helper.matchOperandIsUndef(*${root}, 2); }]), 369 (apply [{ Helper.replaceInstWithUndef(*${root}); }])>; 370 371def unary_undef_to_zero: GICombineRule< 372 (defs root:$root), 373 (match (wip_match_opcode G_ABS):$root, 374 [{ return Helper.matchOperandIsUndef(*${root}, 1); }]), 375 (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>; 376 377// Instructions where if any source operand is undef, the instruction can be 378// replaced with undef. 379def propagate_undef_any_op: GICombineRule< 380 (defs root:$root), 381 (match (wip_match_opcode G_ADD, G_FPTOSI, G_FPTOUI, G_SUB, G_XOR, G_TRUNC):$root, 382 [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]), 383 (apply [{ Helper.replaceInstWithUndef(*${root}); }])>; 384 385// Instructions where if all source operands are undef, the instruction can be 386// replaced with undef. 387def propagate_undef_all_ops: GICombineRule< 388 (defs root:$root), 389 (match (wip_match_opcode G_SHUFFLE_VECTOR):$root, 390 [{ return Helper.matchAllExplicitUsesAreUndef(*${root}); }]), 391 (apply [{ Helper.replaceInstWithUndef(*${root}); }])>; 392 393// Replace a G_SHUFFLE_VECTOR with an undef mask with a G_IMPLICIT_DEF. 394def propagate_undef_shuffle_mask: GICombineRule< 395 (defs root:$root), 396 (match (wip_match_opcode G_SHUFFLE_VECTOR):$root, 397 [{ return Helper.matchUndefShuffleVectorMask(*${root}); }]), 398 (apply [{ Helper.replaceInstWithUndef(*${root}); }])>; 399 400// Replace a G_SHUFFLE_VECTOR with a G_EXTRACT_VECTOR_ELT. 401def shuffle_to_extract: GICombineRule< 402 (defs root:$root), 403 (match (wip_match_opcode G_SHUFFLE_VECTOR):$root, 404 [{ return Helper.matchShuffleToExtract(*${root}); }]), 405 (apply [{ Helper.applyShuffleToExtract(*${root}); }])>; 406 407 // Replace an insert/extract element of an out of bounds index with undef. 408 def insert_extract_vec_elt_out_of_bounds : GICombineRule< 409 (defs root:$root), 410 (match (wip_match_opcode G_INSERT_VECTOR_ELT, G_EXTRACT_VECTOR_ELT):$root, 411 [{ return Helper.matchInsertExtractVecEltOutOfBounds(*${root}); }]), 412 (apply [{ Helper.replaceInstWithUndef(*${root}); }])>; 413 414// Fold (cond ? x : x) -> x 415def select_same_val: GICombineRule< 416 (defs root:$root), 417 (match (wip_match_opcode G_SELECT):$root, 418 [{ return Helper.matchSelectSameVal(*${root}); }]), 419 (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, 2); }]) 420>; 421 422// Fold (undef ? x : y) -> y 423def select_undef_cmp: GICombineRule< 424 (defs root:$dst), 425 (match (G_IMPLICIT_DEF $undef), 426 (G_SELECT $dst, $undef, $x, $y)), 427 (apply (GIReplaceReg $dst, $y)) 428>; 429 430// Fold (true ? x : y) -> x 431// Fold (false ? x : y) -> y 432def select_constant_cmp_matchdata : GIDefMatchData<"unsigned">; 433def select_constant_cmp: GICombineRule< 434 (defs root:$root, select_constant_cmp_matchdata:$matchinfo), 435 (match (wip_match_opcode G_SELECT):$root, 436 [{ return Helper.matchConstantSelectCmp(*${root}, ${matchinfo}); }]), 437 (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, ${matchinfo}); }]) 438>; 439 440// Fold (C op x) -> (x op C) 441// TODO: handle more isCommutable opcodes 442// TODO: handle compares (currently not marked as isCommutable) 443def commute_int_constant_to_rhs : GICombineRule< 444 (defs root:$root), 445 (match (wip_match_opcode G_ADD, G_MUL, G_AND, G_OR, G_XOR):$root, 446 [{ return Helper.matchCommuteConstantToRHS(*${root}); }]), 447 (apply [{ Helper.applyCommuteBinOpOperands(*${root}); }]) 448>; 449 450def commute_fp_constant_to_rhs : GICombineRule< 451 (defs root:$root), 452 (match (wip_match_opcode G_FADD, G_FMUL):$root, 453 [{ return Helper.matchCommuteFPConstantToRHS(*${root}); }]), 454 (apply [{ Helper.applyCommuteBinOpOperands(*${root}); }]) 455>; 456 457def commute_constant_to_rhs : GICombineGroup<[ 458 commute_int_constant_to_rhs, 459 commute_fp_constant_to_rhs 460]>; 461 462// Fold x op 0 -> x 463def right_identity_zero_frags : GICombinePatFrag< 464 (outs root:$dst), (ins $x), 465 !foreach(op, 466 [G_SUB, G_ADD, G_OR, G_XOR, G_SHL, G_ASHR, 467 G_LSHR, G_PTR_ADD, G_ROTL, G_ROTR], 468 (pattern (op $dst, $x, 0)))>; 469def right_identity_zero: GICombineRule< 470 (defs root:$dst), 471 (match (right_identity_zero_frags $dst, $lhs)), 472 (apply (GIReplaceReg $dst, $lhs)) 473>; 474 475def right_identity_neg_zero_fp: GICombineRule< 476 (defs root:$dst), 477 (match (G_FADD $dst, $x, $y):$root, 478 [{ return Helper.matchConstantFPOp(${y}, -0.0); }]), 479 (apply (GIReplaceReg $dst, $x)) 480>; 481 482// Fold x op 1 -> x 483def right_identity_one_int: GICombineRule< 484 (defs root:$dst), 485 (match (G_MUL $dst, $x, 1)), 486 (apply (GIReplaceReg $dst, $x)) 487>; 488 489def right_identity_one_fp: GICombineRule< 490 (defs root:$dst), 491 (match (G_FMUL $dst, $x, $y):$root, 492 [{ return Helper.matchConstantFPOp(${y}, 1.0); }]), 493 (apply (GIReplaceReg $dst, $x)) 494>; 495 496def right_identity_neg_one_fp: GICombineRule< 497 (defs root:$dst), 498 (match (G_FMUL $dst, $x, $y):$root, 499 [{ return Helper.matchConstantFPOp(${y}, -1.0); }]), 500 (apply (G_FNEG $dst, $x)) 501>; 502 503def right_identity_one : GICombineGroup<[right_identity_one_int, right_identity_one_fp]>; 504 505// Fold (x op x) - > x 506def binop_same_val_frags : GICombinePatFrag< 507 (outs root:$dst), (ins $x), 508 [ 509 (pattern (G_AND $dst, $x, $x)), 510 (pattern (G_OR $dst, $x, $x)), 511 ] 512>; 513def binop_same_val: GICombineRule< 514 (defs root:$dst), 515 (match (binop_same_val_frags $dst, $src)), 516 (apply (GIReplaceReg $dst, $src)) 517>; 518 519// Fold (0 op x) - > 0 520def binop_left_to_zero: GICombineRule< 521 (defs root:$root), 522 (match (wip_match_opcode G_SDIV, G_UDIV, G_SREM, G_UREM):$root, 523 [{ return Helper.matchOperandIsZero(*${root}, 1); }]), 524 (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, 1); }]) 525>; 526 527def urem_pow2_to_mask : GICombineRule< 528 (defs root:$root), 529 (match (wip_match_opcode G_UREM):$root, 530 [{ return Helper.matchOperandIsKnownToBeAPowerOfTwo(*${root}, 2); }]), 531 (apply [{ Helper.applySimplifyURemByPow2(*${root}); }]) 532>; 533 534// Push a binary operator through a select on constants. 535// 536// binop (select cond, K0, K1), K2 -> 537// select cond, (binop K0, K2), (binop K1, K2) 538 539// Every binary operator that has constant folding. We currently do 540// not have constant folding for G_FPOW, G_FMAXNUM_IEEE or 541// G_FMINNUM_IEEE. 542def fold_binop_into_select : GICombineRule< 543 (defs root:$root, unsigned_matchinfo:$select_op_no), 544 (match (wip_match_opcode 545 G_ADD, G_SUB, G_PTR_ADD, G_AND, G_OR, G_XOR, 546 G_SDIV, G_SREM, G_UDIV, G_UREM, G_LSHR, G_ASHR, G_SHL, 547 G_SMIN, G_SMAX, G_UMIN, G_UMAX, 548 G_FMUL, G_FADD, G_FSUB, G_FDIV, G_FREM, 549 G_FMINNUM, G_FMAXNUM, G_FMINIMUM, G_FMAXIMUM):$root, 550 [{ return Helper.matchFoldBinOpIntoSelect(*${root}, ${select_op_no}); }]), 551 (apply [{ Helper.applyFoldBinOpIntoSelect(*${root}, ${select_op_no}); }]) 552>; 553 554// Transform d = [su]div(x, y) and r = [su]rem(x, y) - > d, r = [su]divrem(x, y) 555def div_rem_to_divrem_matchdata : GIDefMatchData<"MachineInstr *">; 556def div_rem_to_divrem : GICombineRule< 557 (defs root:$root, div_rem_to_divrem_matchdata:$matchinfo), 558 (match (wip_match_opcode G_SDIV, G_UDIV, G_SREM, G_UREM):$root, 559 [{ return Helper.matchCombineDivRem(*${root}, ${matchinfo}); }]), 560 (apply [{ Helper.applyCombineDivRem(*${root}, ${matchinfo}); }]) 561>; 562 563// Fold (x op 0) - > 0 564def binop_right_to_zero: GICombineRule< 565 (defs root:$dst), 566 (match (G_MUL $dst, $lhs, 0:$zero)), 567 (apply (GIReplaceReg $dst, $zero)) 568>; 569 570// Erase stores of undef values. 571def erase_undef_store : GICombineRule< 572 (defs root:$root), 573 (match (wip_match_opcode G_STORE):$root, 574 [{ return Helper.matchUndefStore(*${root}); }]), 575 (apply [{ Helper.eraseInst(*${root}); }]) 576>; 577 578def simplify_add_to_sub_matchinfo: GIDefMatchData<"std::tuple<Register, Register>">; 579def simplify_add_to_sub: GICombineRule < 580 (defs root:$root, simplify_add_to_sub_matchinfo:$info), 581 (match (wip_match_opcode G_ADD):$root, 582 [{ return Helper.matchSimplifyAddToSub(*${root}, ${info}); }]), 583 (apply [{ Helper.applySimplifyAddToSub(*${root}, ${info});}]) 584>; 585 586// Fold fp_op(cst) to the constant result of the floating point operation. 587class constant_fold_unary_fp_op_rule<Instruction opcode> : GICombineRule < 588 (defs root:$dst), 589 (match (opcode $dst, $src0):$root, (G_FCONSTANT $src0, $cst)), 590 (apply [{ Helper.applyCombineConstantFoldFpUnary(*${root}, ${cst}.getFPImm()); }]) 591>; 592 593def constant_fold_fneg : constant_fold_unary_fp_op_rule<G_FNEG>; 594def constant_fold_fabs : constant_fold_unary_fp_op_rule<G_FABS>; 595def constant_fold_fsqrt : constant_fold_unary_fp_op_rule<G_FSQRT>; 596def constant_fold_flog2 : constant_fold_unary_fp_op_rule<G_FLOG2>; 597def constant_fold_fptrunc : constant_fold_unary_fp_op_rule<G_FPTRUNC>; 598 599// Fold constant zero int to fp conversions. 600class itof_const_zero_fold_rule<Instruction opcode> : GICombineRule < 601 (defs root:$dst), 602 (match (opcode $dst, 0)), 603 // Can't use COPY $dst, 0 here because the 0 operand may be a smaller type 604 // than the destination for itofp. 605 (apply [{ Helper.replaceInstWithFConstant(*${dst}.getParent(), 0.0); }]) 606>; 607def itof_const_zero_fold_si : itof_const_zero_fold_rule<G_SITOFP>; 608def itof_const_zero_fold_ui : itof_const_zero_fold_rule<G_UITOFP>; 609 610def constant_fold_fp_ops : GICombineGroup<[ 611 constant_fold_fneg, 612 constant_fold_fabs, 613 constant_fold_fsqrt, 614 constant_fold_flog2, 615 constant_fold_fptrunc, 616 itof_const_zero_fold_si, 617 itof_const_zero_fold_ui 618]>; 619 620// Fold int2ptr(ptr2int(x)) -> x 621def p2i_to_i2p: GICombineRule< 622 (defs root:$root, register_matchinfo:$info), 623 (match (wip_match_opcode G_INTTOPTR):$root, 624 [{ return Helper.matchCombineI2PToP2I(*${root}, ${info}); }]), 625 (apply [{ Helper.applyCombineI2PToP2I(*${root}, ${info}); }]) 626>; 627 628// Fold ptr2int(int2ptr(x)) -> x 629def i2p_to_p2i: GICombineRule< 630 (defs root:$dst, register_matchinfo:$info), 631 (match (G_INTTOPTR $t, $ptr), 632 (G_PTRTOINT $dst, $t):$mi, 633 [{ ${info} = ${ptr}.getReg(); return true; }]), 634 (apply [{ Helper.applyCombineP2IToI2P(*${mi}, ${info}); }]) 635>; 636 637// Fold add ptrtoint(x), y -> ptrtoint (ptr_add x), y 638def add_p2i_to_ptradd_matchinfo : GIDefMatchData<"std::pair<Register, bool>">; 639def add_p2i_to_ptradd : GICombineRule< 640 (defs root:$root, add_p2i_to_ptradd_matchinfo:$info), 641 (match (wip_match_opcode G_ADD):$root, 642 [{ return Helper.matchCombineAddP2IToPtrAdd(*${root}, ${info}); }]), 643 (apply [{ Helper.applyCombineAddP2IToPtrAdd(*${root}, ${info}); }]) 644>; 645 646// Fold (ptr_add (int2ptr C1), C2) -> C1 + C2 647def const_ptradd_to_i2p_matchinfo : GIDefMatchData<"APInt">; 648def const_ptradd_to_i2p: GICombineRule< 649 (defs root:$root, const_ptradd_to_i2p_matchinfo:$info), 650 (match (wip_match_opcode G_PTR_ADD):$root, 651 [{ return Helper.matchCombineConstPtrAddToI2P(*${root}, ${info}); }]), 652 (apply [{ Helper.applyCombineConstPtrAddToI2P(*${root}, ${info}); }]) 653>; 654 655// Simplify: (logic_op (op x...), (op y...)) -> (op (logic_op x, y)) 656def hoist_logic_op_with_same_opcode_hands: GICombineRule < 657 (defs root:$root, instruction_steps_matchdata:$info), 658 (match (wip_match_opcode G_AND, G_OR, G_XOR):$root, 659 [{ return Helper.matchHoistLogicOpWithSameOpcodeHands(*${root}, ${info}); }]), 660 (apply [{ Helper.applyBuildInstructionSteps(*${root}, ${info});}]) 661>; 662 663// Fold ashr (shl x, C), C -> sext_inreg (C) 664def shl_ashr_to_sext_inreg_matchinfo : GIDefMatchData<"std::tuple<Register, int64_t>">; 665def shl_ashr_to_sext_inreg : GICombineRule< 666 (defs root:$root, shl_ashr_to_sext_inreg_matchinfo:$info), 667 (match (wip_match_opcode G_ASHR): $root, 668 [{ return Helper.matchAshrShlToSextInreg(*${root}, ${info}); }]), 669 (apply [{ Helper.applyAshShlToSextInreg(*${root}, ${info});}]) 670>; 671 672// Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0 673def overlapping_and: GICombineRule < 674 (defs root:$root, build_fn_matchinfo:$info), 675 (match (wip_match_opcode G_AND):$root, 676 [{ return Helper.matchOverlappingAnd(*${root}, ${info}); }]), 677 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }]) 678>; 679 680// Fold (x & y) -> x or (x & y) -> y when (x & y) is known to equal x or equal y. 681def redundant_and: GICombineRule < 682 (defs root:$root, register_matchinfo:$matchinfo), 683 (match (wip_match_opcode G_AND):$root, 684 [{ return Helper.matchRedundantAnd(*${root}, ${matchinfo}); }]), 685 (apply [{ Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }]) 686>; 687 688// Fold (x | y) -> x or (x | y) -> y when (x | y) is known to equal x or equal y. 689def redundant_or: GICombineRule < 690 (defs root:$root, register_matchinfo:$matchinfo), 691 (match (wip_match_opcode G_OR):$root, 692 [{ return Helper.matchRedundantOr(*${root}, ${matchinfo}); }]), 693 (apply [{ Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }]) 694>; 695 696// If the input is already sign extended, just drop the extension. 697// sext_inreg x, K -> 698// if computeNumSignBits(x) >= (x.getScalarSizeInBits() - K + 1) 699def redundant_sext_inreg: GICombineRule < 700 (defs root:$root), 701 (match (wip_match_opcode G_SEXT_INREG):$root, 702 [{ return Helper.matchRedundantSExtInReg(*${root}); }]), 703 (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, 1); }]) 704>; 705 706// Fold (anyext (trunc x)) -> x if the source type is same as 707// the destination type. 708def anyext_trunc_fold: GICombineRule < 709 (defs root:$root, register_matchinfo:$matchinfo), 710 (match (wip_match_opcode G_ANYEXT):$root, 711 [{ return Helper.matchCombineAnyExtTrunc(*${root}, ${matchinfo}); }]), 712 (apply [{ Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }]) 713>; 714 715// Fold (zext (trunc x)) -> x if the source type is same as the destination type 716// and truncated bits are known to be zero. 717def zext_trunc_fold_matchinfo : GIDefMatchData<"Register">; 718def zext_trunc_fold: GICombineRule < 719 (defs root:$root, zext_trunc_fold_matchinfo:$matchinfo), 720 (match (wip_match_opcode G_ZEXT):$root, 721 [{ return Helper.matchCombineZextTrunc(*${root}, ${matchinfo}); }]), 722 (apply [{ Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }]) 723>; 724 725// Fold ([asz]ext ([asz]ext x)) -> ([asz]ext x). 726def ext_ext_fold_matchinfo : GIDefMatchData<"std::tuple<Register, unsigned>">; 727def ext_ext_fold: GICombineRule < 728 (defs root:$root, ext_ext_fold_matchinfo:$matchinfo), 729 (match (wip_match_opcode G_ANYEXT, G_SEXT, G_ZEXT):$root, 730 [{ return Helper.matchCombineExtOfExt(*${root}, ${matchinfo}); }]), 731 (apply [{ Helper.applyCombineExtOfExt(*${root}, ${matchinfo}); }]) 732>; 733 734def not_cmp_fold_matchinfo : GIDefMatchData<"SmallVector<Register, 4>">; 735def not_cmp_fold : GICombineRule< 736 (defs root:$d, not_cmp_fold_matchinfo:$info), 737 (match (wip_match_opcode G_XOR): $d, 738 [{ return Helper.matchNotCmp(*${d}, ${info}); }]), 739 (apply [{ Helper.applyNotCmp(*${d}, ${info}); }]) 740>; 741 742// Fold (fneg (fneg x)) -> x. 743def fneg_fneg_fold: GICombineRule < 744 (defs root:$dst), 745 (match (G_FNEG $t, $src), 746 (G_FNEG $dst, $t)), 747 (apply (GIReplaceReg $dst, $src)) 748>; 749 750// Fold (unmerge(merge x, y, z)) -> z, y, z. 751def unmerge_merge_matchinfo : GIDefMatchData<"SmallVector<Register, 8>">; 752def unmerge_merge : GICombineRule< 753 (defs root:$d, unmerge_merge_matchinfo:$info), 754 (match (wip_match_opcode G_UNMERGE_VALUES): $d, 755 [{ return Helper.matchCombineUnmergeMergeToPlainValues(*${d}, ${info}); }]), 756 (apply [{ Helper.applyCombineUnmergeMergeToPlainValues(*${d}, ${info}); }]) 757>; 758 759// Fold merge(unmerge). 760def merge_unmerge : GICombineRule< 761 (defs root:$d, register_matchinfo:$matchinfo), 762 (match (wip_match_opcode G_MERGE_VALUES):$d, 763 [{ return Helper.matchCombineMergeUnmerge(*${d}, ${matchinfo}); }]), 764 (apply [{ Helper.replaceSingleDefInstWithReg(*${d}, ${matchinfo}); }]) 765>; 766 767// Fold (fabs (fneg x)) -> (fabs x). 768def fabs_fneg_fold: GICombineRule < 769 (defs root:$dst), 770 (match (G_FNEG $tmp, $x), 771 (G_FABS $dst, $tmp)), 772 (apply (G_FABS $dst, $x))>; 773 774// Fold (unmerge cst) -> cst1, cst2, ... 775def unmerge_cst_matchinfo : GIDefMatchData<"SmallVector<APInt, 8>">; 776def unmerge_cst : GICombineRule< 777 (defs root:$d, unmerge_cst_matchinfo:$info), 778 (match (wip_match_opcode G_UNMERGE_VALUES): $d, 779 [{ return Helper.matchCombineUnmergeConstant(*${d}, ${info}); }]), 780 (apply [{ Helper.applyCombineUnmergeConstant(*${d}, ${info}); }]) 781>; 782 783// Fold (unmerge undef) -> undef, undef, ... 784def unmerge_undef : GICombineRule< 785 (defs root:$root, build_fn_matchinfo:$info), 786 (match (wip_match_opcode G_UNMERGE_VALUES): $root, 787 [{ return Helper.matchCombineUnmergeUndef(*${root}, ${info}); }]), 788 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }]) 789>; 790 791// Transform x,y<dead> = unmerge z -> x = trunc z. 792def unmerge_dead_to_trunc : GICombineRule< 793 (defs root:$d), 794 (match (wip_match_opcode G_UNMERGE_VALUES): $d, 795 [{ return Helper.matchCombineUnmergeWithDeadLanesToTrunc(*${d}); }]), 796 (apply [{ Helper.applyCombineUnmergeWithDeadLanesToTrunc(*${d}); }]) 797>; 798 799// Transform x,y = unmerge(zext(z)) -> x = zext z; y = 0. 800def unmerge_zext_to_zext : GICombineRule< 801 (defs root:$d), 802 (match (wip_match_opcode G_UNMERGE_VALUES): $d, 803 [{ return Helper.matchCombineUnmergeZExtToZExt(*${d}); }]), 804 (apply [{ Helper.applyCombineUnmergeZExtToZExt(*${d}); }]) 805>; 806 807// Fold trunc ([asz]ext x) -> x or ([asz]ext x) or (trunc x). 808def trunc_ext_fold_matchinfo : GIDefMatchData<"std::pair<Register, unsigned>">; 809def trunc_ext_fold: GICombineRule < 810 (defs root:$root, trunc_ext_fold_matchinfo:$matchinfo), 811 (match (wip_match_opcode G_TRUNC):$root, 812 [{ return Helper.matchCombineTruncOfExt(*${root}, ${matchinfo}); }]), 813 (apply [{ Helper.applyCombineTruncOfExt(*${root}, ${matchinfo}); }]) 814>; 815 816// Under certain conditions, transform: 817// trunc (shl x, K) -> shl (trunc x), K// 818// trunc ([al]shr x, K) -> (trunc ([al]shr (trunc x), K)) 819def trunc_shift_matchinfo : GIDefMatchData<"std::pair<MachineInstr*, LLT>">; 820def trunc_shift: GICombineRule < 821 (defs root:$root, trunc_shift_matchinfo:$matchinfo), 822 (match (wip_match_opcode G_TRUNC):$root, 823 [{ return Helper.matchCombineTruncOfShift(*${root}, ${matchinfo}); }]), 824 (apply [{ Helper.applyCombineTruncOfShift(*${root}, ${matchinfo}); }]) 825>; 826 827// Transform (mul x, -1) -> (sub 0, x) 828def mul_by_neg_one: GICombineRule < 829 (defs root:$dst), 830 (match (G_MUL $dst, $x, -1)), 831 (apply (G_SUB $dst, 0, $x)) 832>; 833 834// Fold (xor (and x, y), y) -> (and (not x), y) 835def xor_of_and_with_same_reg_matchinfo : 836 GIDefMatchData<"std::pair<Register, Register>">; 837def xor_of_and_with_same_reg: GICombineRule < 838 (defs root:$root, xor_of_and_with_same_reg_matchinfo:$matchinfo), 839 (match (wip_match_opcode G_XOR):$root, 840 [{ return Helper.matchXorOfAndWithSameReg(*${root}, ${matchinfo}); }]), 841 (apply [{ Helper.applyXorOfAndWithSameReg(*${root}, ${matchinfo}); }]) 842>; 843 844// Transform (ptr_add 0, x) -> (int_to_ptr x) 845def ptr_add_with_zero: GICombineRule< 846 (defs root:$root), 847 (match (wip_match_opcode G_PTR_ADD):$root, 848 [{ return Helper.matchPtrAddZero(*${root}); }]), 849 (apply [{ Helper.applyPtrAddZero(*${root}); }])>; 850 851def regs_small_vec : GIDefMatchData<"SmallVector<Register, 4>">; 852def combine_insert_vec_elts_build_vector : GICombineRule< 853 (defs root:$root, regs_small_vec:$info), 854 (match (wip_match_opcode G_INSERT_VECTOR_ELT):$root, 855 [{ return Helper.matchCombineInsertVecElts(*${root}, ${info}); }]), 856 (apply [{ Helper.applyCombineInsertVecElts(*${root}, ${info}); }])>; 857 858def load_or_combine : GICombineRule< 859 (defs root:$root, build_fn_matchinfo:$info), 860 (match (wip_match_opcode G_OR):$root, 861 [{ return Helper.matchLoadOrCombine(*${root}, ${info}); }]), 862 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 863 864def extend_through_phis_matchdata: GIDefMatchData<"MachineInstr*">; 865def extend_through_phis : GICombineRule< 866 (defs root:$root, extend_through_phis_matchdata:$matchinfo), 867 (match (wip_match_opcode G_PHI):$root, 868 [{ return Helper.matchExtendThroughPhis(*${root}, ${matchinfo}); }]), 869 (apply [{ Helper.applyExtendThroughPhis(*${root}, ${matchinfo}); }])>; 870 871// Currently only the one combine above. 872def insert_vec_elt_combines : GICombineGroup< 873 [combine_insert_vec_elts_build_vector]>; 874 875def extract_vec_elt_build_vec : GICombineRule< 876 (defs root:$root, register_matchinfo:$matchinfo), 877 (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root, 878 [{ return Helper.matchExtractVecEltBuildVec(*${root}, ${matchinfo}); }]), 879 (apply [{ Helper.applyExtractVecEltBuildVec(*${root}, ${matchinfo}); }])>; 880 881// Fold away full elt extracts from a build_vector. 882def extract_all_elts_from_build_vector_matchinfo : 883 GIDefMatchData<"SmallVector<std::pair<Register, MachineInstr*>>">; 884def extract_all_elts_from_build_vector : GICombineRule< 885 (defs root:$root, extract_all_elts_from_build_vector_matchinfo:$matchinfo), 886 (match (wip_match_opcode G_BUILD_VECTOR):$root, 887 [{ return Helper.matchExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }]), 888 (apply [{ Helper.applyExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }])>; 889 890def extract_vec_elt_combines : GICombineGroup<[ 891 extract_vec_elt_build_vec, 892 extract_all_elts_from_build_vector]>; 893 894def funnel_shift_from_or_shift : GICombineRule< 895 (defs root:$root, build_fn_matchinfo:$info), 896 (match (wip_match_opcode G_OR):$root, 897 [{ return Helper.matchOrShiftToFunnelShift(*${root}, ${info}); }]), 898 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }]) 899>; 900 901def funnel_shift_to_rotate : GICombineRule< 902 (defs root:$root), 903 (match (wip_match_opcode G_FSHL, G_FSHR):$root, 904 [{ return Helper.matchFunnelShiftToRotate(*${root}); }]), 905 (apply [{ Helper.applyFunnelShiftToRotate(*${root}); }]) 906>; 907 908// Fold fshr x, y, 0 -> y 909def funnel_shift_right_zero: GICombineRule< 910 (defs root:$root), 911 (match (G_FSHR $x, $y, $z, 0):$root), 912 (apply (COPY $x, $z)) 913>; 914 915// Fold fshl x, y, 0 -> x 916def funnel_shift_left_zero: GICombineRule< 917 (defs root:$root), 918 (match (G_FSHL $x, $y, $z, 0):$root), 919 (apply (COPY $x, $y)) 920>; 921 922// Fold fsh(l/r) x, y, C -> fsh(l/r) x, y, C % bw 923def funnel_shift_overshift: GICombineRule< 924 (defs root:$root), 925 (match (wip_match_opcode G_FSHL, G_FSHR):$root, 926 [{ return Helper.matchConstantLargerBitWidth(*${root}, 3); }]), 927 (apply [{ Helper.applyFunnelShiftConstantModulo(*${root}); }]) 928>; 929 930def rotate_out_of_range : GICombineRule< 931 (defs root:$root), 932 (match (wip_match_opcode G_ROTR, G_ROTL):$root, 933 [{ return Helper.matchRotateOutOfRange(*${root}); }]), 934 (apply [{ Helper.applyRotateOutOfRange(*${root}); }]) 935>; 936 937def icmp_to_true_false_known_bits : GICombineRule< 938 (defs root:$d, int64_matchinfo:$matchinfo), 939 (match (wip_match_opcode G_ICMP):$d, 940 [{ return Helper.matchICmpToTrueFalseKnownBits(*${d}, ${matchinfo}); }]), 941 (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>; 942 943def icmp_to_lhs_known_bits : GICombineRule< 944 (defs root:$root, build_fn_matchinfo:$info), 945 (match (wip_match_opcode G_ICMP):$root, 946 [{ return Helper.matchICmpToLHSKnownBits(*${root}, ${info}); }]), 947 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 948 949def redundant_binop_in_equality : GICombineRule< 950 (defs root:$root, build_fn_matchinfo:$info), 951 (match (wip_match_opcode G_ICMP):$root, 952 [{ return Helper.matchRedundantBinOpInEquality(*${root}, ${info}); }]), 953 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 954 955// Transform: (X == 0 & Y == 0) -> (X | Y) == 0 956def double_icmp_zero_and_combine: GICombineRule< 957 (defs root:$root), 958 (match (G_ICMP $d1, $p, $s1, 0), 959 (G_ICMP $d2, $p, $s2, 0), 960 (G_AND $root, $d1, $d2), 961 [{ return ${p}.getPredicate() == CmpInst::ICMP_EQ && 962 !MRI.getType(${s1}.getReg()).getScalarType().isPointer() && 963 (MRI.getType(${s1}.getReg()) == 964 MRI.getType(${s2}.getReg())); }]), 965 (apply (G_OR $ordst, $s1, $s2), 966 (G_ICMP $root, $p, $ordst, 0)) 967>; 968 969// Transform: (X != 0 | Y != 0) -> (X | Y) != 0 970def double_icmp_zero_or_combine: GICombineRule< 971 (defs root:$root), 972 (match (G_ICMP $d1, $p, $s1, 0), 973 (G_ICMP $d2, $p, $s2, 0), 974 (G_OR $root, $d1, $d2), 975 [{ return ${p}.getPredicate() == CmpInst::ICMP_NE && 976 !MRI.getType(${s1}.getReg()).getScalarType().isPointer() && 977 (MRI.getType(${s1}.getReg()) == 978 MRI.getType(${s2}.getReg())); }]), 979 (apply (G_OR $ordst, $s1, $s2), 980 (G_ICMP $root, $p, $ordst, 0)) 981>; 982 983def double_icmp_zero_and_or_combine : GICombineGroup<[double_icmp_zero_and_combine, 984 double_icmp_zero_or_combine]>; 985 986def and_or_disjoint_mask : GICombineRule< 987 (defs root:$root, build_fn_matchinfo:$info), 988 (match (wip_match_opcode G_AND):$root, 989 [{ return Helper.matchAndOrDisjointMask(*${root}, ${info}); }]), 990 (apply [{ Helper.applyBuildFnNoErase(*${root}, ${info}); }])>; 991 992def bitfield_extract_from_and : GICombineRule< 993 (defs root:$root, build_fn_matchinfo:$info), 994 (match (wip_match_opcode G_AND):$root, 995 [{ return Helper.matchBitfieldExtractFromAnd(*${root}, ${info}); }]), 996 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 997 998def funnel_shift_combines : GICombineGroup<[funnel_shift_from_or_shift, 999 funnel_shift_to_rotate, 1000 funnel_shift_right_zero, 1001 funnel_shift_left_zero, 1002 funnel_shift_overshift]>; 1003 1004def bitfield_extract_from_sext_inreg : GICombineRule< 1005 (defs root:$root, build_fn_matchinfo:$info), 1006 (match (wip_match_opcode G_SEXT_INREG):$root, 1007 [{ return Helper.matchBitfieldExtractFromSExtInReg(*${root}, ${info}); }]), 1008 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 1009 1010def bitfield_extract_from_shr : GICombineRule< 1011 (defs root:$root, build_fn_matchinfo:$info), 1012 (match (wip_match_opcode G_ASHR, G_LSHR):$root, 1013 [{ return Helper.matchBitfieldExtractFromShr(*${root}, ${info}); }]), 1014 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 1015 1016def bitfield_extract_from_shr_and : GICombineRule< 1017 (defs root:$root, build_fn_matchinfo:$info), 1018 (match (wip_match_opcode G_ASHR, G_LSHR):$root, 1019 [{ return Helper.matchBitfieldExtractFromShrAnd(*${root}, ${info}); }]), 1020 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 1021 1022def form_bitfield_extract : GICombineGroup<[bitfield_extract_from_sext_inreg, 1023 bitfield_extract_from_and, 1024 bitfield_extract_from_shr, 1025 bitfield_extract_from_shr_and]>; 1026 1027def udiv_by_const : GICombineRule< 1028 (defs root:$root), 1029 (match (wip_match_opcode G_UDIV):$root, 1030 [{ return Helper.matchUDivByConst(*${root}); }]), 1031 (apply [{ Helper.applyUDivByConst(*${root}); }])>; 1032 1033def sdiv_by_const : GICombineRule< 1034 (defs root:$root), 1035 (match (wip_match_opcode G_SDIV):$root, 1036 [{ return Helper.matchSDivByConst(*${root}); }]), 1037 (apply [{ Helper.applySDivByConst(*${root}); }])>; 1038 1039def intdiv_combines : GICombineGroup<[udiv_by_const, sdiv_by_const]>; 1040 1041def reassoc_ptradd : GICombineRule< 1042 (defs root:$root, build_fn_matchinfo:$matchinfo), 1043 (match (wip_match_opcode G_PTR_ADD):$root, 1044 [{ return Helper.matchReassocPtrAdd(*${root}, ${matchinfo}); }]), 1045 (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>; 1046 1047def reassoc_comm_binops : GICombineRule< 1048 (defs root:$root, build_fn_matchinfo:$matchinfo), 1049 (match (G_ADD $root, $src1, $src2):$root, 1050 [{ return Helper.matchReassocCommBinOp(*${root}, ${matchinfo}); }]), 1051 (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>; 1052 1053def reassocs : GICombineGroup<[reassoc_ptradd, reassoc_comm_binops]>; 1054 1055// Constant fold operations. 1056def constant_fold_binop : GICombineRule< 1057 (defs root:$d, apint_matchinfo:$matchinfo), 1058 (match (wip_match_opcode G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR, G_SHL, G_LSHR, G_ASHR):$d, 1059 [{ return Helper.matchConstantFoldBinOp(*${d}, ${matchinfo}); }]), 1060 (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>; 1061 1062def constant_fold_fp_binop : GICombineRule< 1063 (defs root:$d, constantfp_matchinfo:$matchinfo), 1064 (match (wip_match_opcode G_FADD, G_FSUB, G_FMUL, G_FDIV):$d, 1065 [{ return Helper.matchConstantFoldFPBinOp(*${d}, ${matchinfo}); }]), 1066 (apply [{ Helper.replaceInstWithFConstant(*${d}, ${matchinfo}); }])>; 1067 1068 1069def constant_fold_fma : GICombineRule< 1070 (defs root:$d, constantfp_matchinfo:$matchinfo), 1071 (match (wip_match_opcode G_FMAD, G_FMA):$d, 1072 [{ return Helper.matchConstantFoldFMA(*${d}, ${matchinfo}); }]), 1073 (apply [{ Helper.replaceInstWithFConstant(*${d}, ${matchinfo}); }])>; 1074 1075def constant_fold_cast_op : GICombineRule< 1076 (defs root:$d, apint_matchinfo:$matchinfo), 1077 (match (wip_match_opcode G_ZEXT, G_SEXT, G_ANYEXT):$d, 1078 [{ return Helper.matchConstantFoldCastOp(*${d}, ${matchinfo}); }]), 1079 (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>; 1080 1081def mulo_by_2: GICombineRule< 1082 (defs root:$root, build_fn_matchinfo:$matchinfo), 1083 (match (wip_match_opcode G_UMULO, G_SMULO):$root, 1084 [{ return Helper.matchMulOBy2(*${root}, ${matchinfo}); }]), 1085 (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>; 1086 1087def mulo_by_0: GICombineRule< 1088 (defs root:$root, build_fn_matchinfo:$matchinfo), 1089 (match (wip_match_opcode G_UMULO, G_SMULO):$root, 1090 [{ return Helper.matchMulOBy0(*${root}, ${matchinfo}); }]), 1091 (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>; 1092 1093def addo_by_0: GICombineRule< 1094 (defs root:$root, build_fn_matchinfo:$matchinfo), 1095 (match (wip_match_opcode G_UADDO, G_SADDO):$root, 1096 [{ return Helper.matchAddOBy0(*${root}, ${matchinfo}); }]), 1097 (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>; 1098 1099// Transform (uadde x, y, 0) -> (uaddo x, y) 1100// (sadde x, y, 0) -> (saddo x, y) 1101// (usube x, y, 0) -> (usubo x, y) 1102// (ssube x, y, 0) -> (ssubo x, y) 1103def adde_to_addo: GICombineRule< 1104 (defs root:$root, build_fn_matchinfo:$matchinfo), 1105 (match (wip_match_opcode G_UADDE, G_SADDE, G_USUBE, G_SSUBE):$root, 1106 [{ return Helper.matchAddEToAddO(*${root}, ${matchinfo}); }]), 1107 (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>; 1108 1109def mulh_to_lshr : GICombineRule< 1110 (defs root:$root), 1111 (match (wip_match_opcode G_UMULH):$root, 1112 [{ return Helper.matchUMulHToLShr(*${root}); }]), 1113 (apply [{ Helper.applyUMulHToLShr(*${root}); }])>; 1114 1115def mulh_combines : GICombineGroup<[mulh_to_lshr]>; 1116 1117def redundant_neg_operands: GICombineRule< 1118 (defs root:$root, build_fn_matchinfo:$matchinfo), 1119 (match (wip_match_opcode G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMAD, G_FMA):$root, 1120 [{ return Helper.matchRedundantNegOperands(*${root}, ${matchinfo}); }]), 1121 (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>; 1122 1123// Transform (fsub +-0.0, X) -> (fneg X) 1124def fsub_to_fneg: GICombineRule< 1125 (defs root:$root, register_matchinfo:$matchinfo), 1126 (match (wip_match_opcode G_FSUB):$root, 1127 [{ return Helper.matchFsubToFneg(*${root}, ${matchinfo}); }]), 1128 (apply [{ Helper.applyFsubToFneg(*${root}, ${matchinfo}); }])>; 1129 1130// Transform (fadd x, (fmul y, z)) -> (fma y, z, x) 1131// (fadd x, (fmul y, z)) -> (fmad y, z, x) 1132// Transform (fadd (fmul x, y), z) -> (fma x, y, z) 1133// (fadd (fmul x, y), z) -> (fmad x, y, z) 1134def combine_fadd_fmul_to_fmad_or_fma: GICombineRule< 1135 (defs root:$root, build_fn_matchinfo:$info), 1136 (match (wip_match_opcode G_FADD):$root, 1137 [{ return Helper.matchCombineFAddFMulToFMadOrFMA(*${root}, 1138 ${info}); }]), 1139 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 1140 1141// Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z) 1142// -> (fmad (fpext x), (fpext y), z) 1143// Transform (fadd x, (fpext (fmul y, z))) -> (fma (fpext y), (fpext z), x) 1144// -> (fmad (fpext y), (fpext z), x) 1145def combine_fadd_fpext_fmul_to_fmad_or_fma: GICombineRule< 1146 (defs root:$root, build_fn_matchinfo:$info), 1147 (match (wip_match_opcode G_FADD):$root, 1148 [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMA(*${root}, 1149 ${info}); }]), 1150 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 1151 1152// Transform (fadd (fma x, y, (fmul z, u)), v) -> (fma x, y, (fma z, u, v)) 1153// (fadd (fmad x, y, (fmul z, u)), v) -> (fmad x, y, (fmad z, u, v)) 1154// Transform (fadd v, (fma x, y, (fmul z, u))) -> (fma x, y, (fma z, u, v)) 1155// (fadd v, (fmad x, y, (fmul z, u))) -> (fmad x, y, (fmad z, u, v)) 1156def combine_fadd_fma_fmul_to_fmad_or_fma: GICombineRule< 1157 (defs root:$root, build_fn_matchinfo:$info), 1158 (match (wip_match_opcode G_FADD):$root, 1159 [{ return Helper.matchCombineFAddFMAFMulToFMadOrFMA(*${root}, 1160 ${info}); }]), 1161 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 1162 1163// Transform (fadd (fma x, y, (fpext (fmul u, v))), z) -> 1164// (fma x, y, (fma (fpext u), (fpext v), z)) 1165def combine_fadd_fpext_fma_fmul_to_fmad_or_fma: GICombineRule< 1166 (defs root:$root, build_fn_matchinfo:$info), 1167 (match (wip_match_opcode G_FADD):$root, 1168 [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMAAggressive( 1169 *${root}, ${info}); }]), 1170 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 1171 1172// Transform (fsub (fmul x, y), z) -> (fma x, y, -z) 1173// -> (fmad x, y, -z) 1174def combine_fsub_fmul_to_fmad_or_fma: GICombineRule< 1175 (defs root:$root, build_fn_matchinfo:$info), 1176 (match (wip_match_opcode G_FSUB):$root, 1177 [{ return Helper.matchCombineFSubFMulToFMadOrFMA(*${root}, 1178 ${info}); }]), 1179 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 1180 1181// Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) 1182// (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x) 1183def combine_fsub_fneg_fmul_to_fmad_or_fma: GICombineRule< 1184 (defs root:$root, build_fn_matchinfo:$info), 1185 (match (wip_match_opcode G_FSUB):$root, 1186 [{ return Helper.matchCombineFSubFNegFMulToFMadOrFMA(*${root}, 1187 ${info}); }]), 1188 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 1189 1190// Transform (fsub (fpext (fmul x, y)), z) -> 1191// (fma (fpext x), (fpext y), (fneg z)) 1192def combine_fsub_fpext_fmul_to_fmad_or_fma: GICombineRule< 1193 (defs root:$root, build_fn_matchinfo:$info), 1194 (match (wip_match_opcode G_FSUB):$root, 1195 [{ return Helper.matchCombineFSubFpExtFMulToFMadOrFMA(*${root}, 1196 ${info}); }]), 1197 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 1198 1199// Transform (fsub (fneg (fpext (fmul x, y))), z) -> 1200// (fneg (fma (fpext x), (fpext y), z)) 1201def combine_fsub_fpext_fneg_fmul_to_fmad_or_fma: GICombineRule< 1202 (defs root:$root, build_fn_matchinfo:$info), 1203 (match (wip_match_opcode G_FSUB):$root, 1204 [{ return Helper.matchCombineFSubFpExtFNegFMulToFMadOrFMA( 1205 *${root}, ${info}); }]), 1206 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 1207 1208def combine_minmax_nan: GICombineRule< 1209 (defs root:$root, unsigned_matchinfo:$info), 1210 (match (wip_match_opcode G_FMINNUM, G_FMAXNUM, G_FMINIMUM, G_FMAXIMUM):$root, 1211 [{ return Helper.matchCombineFMinMaxNaN(*${root}, ${info}); }]), 1212 (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, ${info}); }])>; 1213 1214// Transform (add x, (sub y, x)) -> y 1215// Transform (add (sub y, x), x) -> y 1216def add_sub_reg_frags : GICombinePatFrag< 1217 (outs root:$dst), (ins $src), 1218 [ 1219 (pattern (G_ADD $dst, $x, $tmp), (G_SUB $tmp, $src, $x)), 1220 (pattern (G_ADD $dst, $tmp, $x), (G_SUB $tmp, $src, $x)) 1221 ]>; 1222def add_sub_reg: GICombineRule < 1223 (defs root:$dst), 1224 (match (add_sub_reg_frags $dst, $src)), 1225 (apply (GIReplaceReg $dst, $src))>; 1226 1227def buildvector_identity_fold : GICombineRule< 1228 (defs root:$build_vector, register_matchinfo:$matchinfo), 1229 (match (wip_match_opcode G_BUILD_VECTOR_TRUNC, G_BUILD_VECTOR):$build_vector, 1230 [{ return Helper.matchBuildVectorIdentityFold(*${build_vector}, ${matchinfo}); }]), 1231 (apply [{ Helper.replaceSingleDefInstWithReg(*${build_vector}, ${matchinfo}); }])>; 1232 1233def trunc_buildvector_fold : GICombineRule< 1234 (defs root:$op, register_matchinfo:$matchinfo), 1235 (match (wip_match_opcode G_TRUNC):$op, 1236 [{ return Helper.matchTruncBuildVectorFold(*${op}, ${matchinfo}); }]), 1237 (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${matchinfo}); }])>; 1238 1239def trunc_lshr_buildvector_fold : GICombineRule< 1240 (defs root:$op, register_matchinfo:$matchinfo), 1241 (match (wip_match_opcode G_TRUNC):$op, 1242 [{ return Helper.matchTruncLshrBuildVectorFold(*${op}, ${matchinfo}); }]), 1243 (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${matchinfo}); }])>; 1244 1245// Transform: 1246// (x + y) - y -> x 1247// (x + y) - x -> y 1248// x - (y + x) -> 0 - y 1249// x - (x + z) -> 0 - z 1250def sub_add_reg: GICombineRule < 1251 (defs root:$root, build_fn_matchinfo:$matchinfo), 1252 (match (wip_match_opcode G_SUB):$root, 1253 [{ return Helper.matchSubAddSameReg(*${root}, ${matchinfo}); }]), 1254 (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>; 1255 1256def bitcast_bitcast_fold : GICombineRule< 1257 (defs root:$dst), 1258 (match (G_BITCAST $dst, $src1):$op, (G_BITCAST $src1, $src0), 1259 [{ return MRI.getType(${src0}.getReg()) == MRI.getType(${dst}.getReg()); }]), 1260 (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${src0}.getReg()); }])>; 1261 1262 1263def fptrunc_fpext_fold : GICombineRule< 1264 (defs root:$dst), 1265 (match (G_FPTRUNC $dst, $src1):$op, (G_FPEXT $src1, $src0), 1266 [{ return MRI.getType(${src0}.getReg()) == MRI.getType(${dst}.getReg()); }]), 1267 (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${src0}.getReg()); }])>; 1268 1269 1270def select_to_minmax: GICombineRule< 1271 (defs root:$root, build_fn_matchinfo:$info), 1272 (match (wip_match_opcode G_SELECT):$root, 1273 [{ return Helper.matchSimplifySelectToMinMax(*${root}, ${info}); }]), 1274 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 1275 1276def match_selects : GICombineRule< 1277 (defs root:$root, build_fn_matchinfo:$matchinfo), 1278 (match (wip_match_opcode G_SELECT):$root, 1279 [{ return Helper.matchSelect(*${root}, ${matchinfo}); }]), 1280 (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>; 1281 1282def match_ands : GICombineRule< 1283 (defs root:$root, build_fn_matchinfo:$matchinfo), 1284 (match (wip_match_opcode G_AND):$root, 1285 [{ return Helper.matchAnd(*${root}, ${matchinfo}); }]), 1286 (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>; 1287 1288def match_ors : GICombineRule< 1289 (defs root:$root, build_fn_matchinfo:$matchinfo), 1290 (match (wip_match_opcode G_OR):$root, 1291 [{ return Helper.matchOr(*${root}, ${matchinfo}); }]), 1292 (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>; 1293 1294// Combines concat operations 1295def concat_matchinfo : GIDefMatchData<"SmallVector<Register>">; 1296def combine_concat_vector : GICombineRule< 1297 (defs root:$root, concat_matchinfo:$matchinfo), 1298 (match (wip_match_opcode G_CONCAT_VECTORS):$root, 1299 [{ return Helper.matchCombineConcatVectors(*${root}, ${matchinfo}); }]), 1300 (apply [{ Helper.applyCombineConcatVectors(*${root}, ${matchinfo}); }])>; 1301 1302// FIXME: These should use the custom predicate feature once it lands. 1303def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero, 1304 undef_to_negative_one, 1305 binop_left_undef_to_zero, 1306 binop_right_undef_to_undef, 1307 unary_undef_to_zero, 1308 propagate_undef_any_op, 1309 propagate_undef_all_ops, 1310 propagate_undef_shuffle_mask, 1311 erase_undef_store, 1312 unmerge_undef, 1313 insert_extract_vec_elt_out_of_bounds]>; 1314 1315def identity_combines : GICombineGroup<[select_same_val, right_identity_zero, 1316 binop_same_val, binop_left_to_zero, 1317 binop_right_to_zero, p2i_to_i2p, 1318 i2p_to_p2i, anyext_trunc_fold, 1319 fneg_fneg_fold, right_identity_one, 1320 add_sub_reg, buildvector_identity_fold, 1321 trunc_buildvector_fold, 1322 trunc_lshr_buildvector_fold, 1323 bitcast_bitcast_fold, fptrunc_fpext_fold, 1324 right_identity_neg_zero_fp, 1325 right_identity_neg_one_fp]>; 1326 1327def const_combines : GICombineGroup<[constant_fold_fp_ops, const_ptradd_to_i2p, 1328 overlapping_and, mulo_by_2, mulo_by_0, 1329 addo_by_0, adde_to_addo, 1330 combine_minmax_nan]>; 1331 1332def known_bits_simplifications : GICombineGroup<[ 1333 redundant_and, redundant_sext_inreg, redundant_or, urem_pow2_to_mask, 1334 zext_trunc_fold, icmp_to_true_false_known_bits, icmp_to_lhs_known_bits, 1335 sext_inreg_to_zext_inreg]>; 1336 1337def width_reduction_combines : GICombineGroup<[reduce_shl_of_extend, 1338 narrow_binop_feeding_and]>; 1339 1340def phi_combines : GICombineGroup<[extend_through_phis]>; 1341 1342def select_combines : GICombineGroup<[select_undef_cmp, select_constant_cmp, 1343 match_selects]>; 1344 1345def trivial_combines : GICombineGroup<[copy_prop, mul_to_shl, add_p2i_to_ptradd, 1346 mul_by_neg_one, idempotent_prop]>; 1347 1348def fma_combines : GICombineGroup<[combine_fadd_fmul_to_fmad_or_fma, 1349 combine_fadd_fpext_fmul_to_fmad_or_fma, combine_fadd_fma_fmul_to_fmad_or_fma, 1350 combine_fadd_fpext_fma_fmul_to_fmad_or_fma, combine_fsub_fmul_to_fmad_or_fma, 1351 combine_fsub_fneg_fmul_to_fmad_or_fma, combine_fsub_fpext_fmul_to_fmad_or_fma, 1352 combine_fsub_fpext_fneg_fmul_to_fmad_or_fma]>; 1353 1354def constant_fold_binops : GICombineGroup<[constant_fold_binop, 1355 constant_fold_fp_binop]>; 1356 1357def all_combines : GICombineGroup<[trivial_combines, insert_vec_elt_combines, 1358 extract_vec_elt_combines, combines_for_extload, combine_extracted_vector_load, 1359 undef_combines, identity_combines, phi_combines, 1360 simplify_add_to_sub, hoist_logic_op_with_same_opcode_hands, shifts_too_big, 1361 reassocs, ptr_add_immed_chain, 1362 shl_ashr_to_sext_inreg, sext_inreg_of_load, 1363 width_reduction_combines, select_combines, 1364 known_bits_simplifications, ext_ext_fold, 1365 not_cmp_fold, opt_brcond_by_inverting_cond, 1366 unmerge_merge, unmerge_cst, unmerge_dead_to_trunc, 1367 unmerge_zext_to_zext, merge_unmerge, trunc_ext_fold, trunc_shift, 1368 const_combines, xor_of_and_with_same_reg, ptr_add_with_zero, 1369 shift_immed_chain, shift_of_shifted_logic_chain, load_or_combine, 1370 div_rem_to_divrem, funnel_shift_combines, commute_shift, 1371 form_bitfield_extract, constant_fold_binops, constant_fold_fma, 1372 constant_fold_cast_op, fabs_fneg_fold, 1373 intdiv_combines, mulh_combines, redundant_neg_operands, 1374 and_or_disjoint_mask, fma_combines, fold_binop_into_select, 1375 sub_add_reg, select_to_minmax, redundant_binop_in_equality, 1376 fsub_to_fneg, commute_constant_to_rhs, match_ands, match_ors, 1377 combine_concat_vector, double_icmp_zero_and_or_combine]>; 1378 1379// A combine group used to for prelegalizer combiners at -O0. The combines in 1380// this group have been selected based on experiments to balance code size and 1381// compile time performance. 1382def optnone_combines : GICombineGroup<[trivial_combines, 1383 ptr_add_immed_chain, combines_for_extload, 1384 not_cmp_fold, opt_brcond_by_inverting_cond, combine_concat_vector]>; 1385