/external/mesa3d/src/amd/compiler/ |
D | aco_validate.cpp | 186 if (instr->operands[i].hasRegClass() && instr->operands[i].regClass().is_subdword()) in validate_ir() 189 if (instr->definitions[0].regClass().is_subdword()) in validate_ir() 200 …(instr->operands[i].hasRegClass() && instr->operands[i].regClass().is_subdword() && !instr->operan… in validate_ir() 203 if (instr->definitions[0].regClass().is_subdword() && !instr->definitions[0].isFixed()) in validate_ir() 224 if (instr->definitions[i].regClass().is_subdword()) in validate_ir() 284 (op.isTemp() && op.regClass().type() == RegType::sgpr) || in validate_ir() 288 … (op.isTemp() && op.regClass().type() == RegType::vgpr && op.bytes() <= 4), in validate_ir() 296 … (op.isTemp() && op.regClass().type() == RegType::vgpr && op.bytes() <= 4), in validate_ir() 299 (op.isTemp() && op.regClass().type() == RegType::sgpr) || in validate_ir() 304 if (op.isTemp() && instr->operands[i].regClass().type() == RegType::sgpr) { in validate_ir() [all …]
|
D | aco_lower_to_hw_instr.cpp | 194 assert(instr->definitions[1].regClass() == bld.lm); in emit_vadd32() 522 if (src.regClass() == v1b) { in emit_reduction() 544 } else if (src.regClass() == v2b) { in emit_reduction() 783 if (reduction_needs_last_op && dst.regClass().type() == RegType::vgpr) { in emit_reduction() 796 if (dst.regClass().type() == RegType::sgpr) { in emit_reduction() 829 assert(dst.regClass() == v1); in emit_gfx10_wave64_bpermute() 830 assert(tmp_exec.regClass() == bld.lm); in emit_gfx10_wave64_bpermute() 832 assert(same_half.regClass() == bld.lm); in emit_gfx10_wave64_bpermute() 833 assert(index_x4.regClass() == v1); in emit_gfx10_wave64_bpermute() 834 assert(input_data.regClass().type() == RegType::vgpr); in emit_gfx10_wave64_bpermute() [all …]
|
D | aco_lower_phis.cpp | 228 assert(cur.regClass() == bld.lm); in lower_divergent_bool_phi() 231 assert(new_cur.regClass() == bld.lm); in lower_divergent_bool_phi() 260 if (phi->operands[i].regClass() == phi->definitions[0].regClass()) in lower_subdword_phis() 267 assert(phi_src.regClass().type() == RegType::sgpr); in lower_subdword_phis() 270 Temp new_phi_src = bld.tmp(phi->definitions[0].regClass()); in lower_subdword_phis() 287 …ssert(program->wave_size == 64 ? phi->definitions[0].regClass() != s1 : phi->definitions[0].regCla… in lower_phis() 288 if (phi->definitions[0].regClass() == program->lane_mask) in lower_phis() 290 else if (phi->definitions[0].regClass().is_subdword()) in lower_phis()
|
D | aco_instruction_selection.cpp | 133 assert(mask.isUndefined() || mask.regClass() == bld.lm); in emit_mbcnt() 165 dst = bld.tmp(src.regClass()); in emit_wqm() 184 if (index.regClass() == s1) in emit_bpermute() 321 if (src.regClass() == dst_rc) { in emit_extract_vector() 329 if (it != ctx->allocated_vec.end() && dst_rc.bytes() == it->second[idx].regClass().bytes()) { in emit_extract_vector() 330 if (it->second[idx].regClass() == dst_rc) { in emit_extract_vector() 495 if (vec.regClass() == dst.regClass()) { in byte_align_vector() 538 assert(val.regClass() == s1); in bool_to_vector_condition() 539 assert(dst.regClass() == bld.lm); in bool_to_vector_condition() 550 assert(val.regClass() == bld.lm); in bool_to_scalar_condition() [all …]
|
D | aco_register_allocation.cpp | 205 if (op.regClass().is_subdword()) in fill() 212 clear(op.physReg(), op.regClass()); in clear() 216 if (def.regClass().is_subdword()) in fill() 223 clear(def.physReg(), def.regClass()); in clear() 492 RegClass rc = instr->definitions[idx].regClass(); in add_subdword_definition() 570 copy.second.setTemp(ctx.program->allocateTmp(copy.second.regClass())); in update_renames() 571 ctx.assignments.emplace_back(copy.second.physReg(), copy.second.regClass()); in update_renames() 832 Definition pc_def = Definition(res.first, pc_op.regClass()); in get_regs_for_copies() 924 Definition pc_def = Definition(PhysReg{reg_lo}, pc_op.regClass()); in get_regs_for_copies() 956 reg_file.block(instr->operands[j].physReg(), instr->operands[j].regClass()); in get_reg_impl() [all …]
|
D | aco_lower_to_cssa.cpp | 115 } else if (op.regClass() != phi->definitions[0].regClass()) { in collect_phi_info() 127 … if (!phi->operands[j].isTemp() || phi->operands[j].regClass() != phi->definitions[0].regClass()) in collect_phi_info() 149 Temp new_tmp = ctx.program->allocateTmp(phi->definitions[0].regClass()); in collect_phi_info()
|
D | aco_opt_value_numbering.cpp | 160 if (a->definitions[i].regClass() != b->definitions[i].regClass()) in operator ()() 395 instr->operands[0].regClass() == instr->definitions[0].regClass()) { in process_block() 411 assert(instr->definitions[i].regClass() == orig_instr->definitions[i].regClass()); in process_block()
|
D | aco_ir.h | 348 constexpr RegClass regClass() const noexcept { return (RegClass::RC)reg_class; } in regClass() function 350 constexpr unsigned bytes() const noexcept { return regClass().bytes(); } in bytes() 351 constexpr unsigned size() const noexcept { return regClass().size(); } in size() 352 constexpr RegType type() const noexcept { return regClass().type(); } in type() 353 constexpr bool is_linear() const noexcept { return regClass().is_linear(); } in is_linear() 582 constexpr RegClass regClass() const noexcept in regClass() function 584 return data_.temp.regClass(); in regClass() 680 return hasRegClass() && regClass().type() == type; in isOfType() 745 return other.isUndefined() && other.regClass() == regClass(); 817 constexpr RegClass regClass() const noexcept in regClass() function [all …]
|
D | aco_reduce_assign.cpp | 98 reduceTmp = program->allocateTmp(reduceTmp.regClass()); in setup_reduce_temp() 138 vtmp = program->allocateTmp(vtmp.regClass()); in setup_reduce_temp()
|
D | aco_spill.cpp | 173 if (op.regClass().type() == RegType::vgpr && op.regClass().is_linear()) in next_uses_per_block() 346 if (op.regClass().type() == RegType::vgpr && op.regClass().is_linear()) in local_next_uses() 419 spill_id = ctx.allocate_spill_id(to_spill.regClass()); in init_live_in_vars() 446 spill_id = ctx.allocate_spill_id(to_spill.regClass()); in init_live_in_vars() 489 ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass()); in init_live_in_vars() 505 ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass()); in init_live_in_vars() 617 …[block_idx][phi->definitions[0].getTemp()] = ctx.allocate_spill_id(phi->definitions[0].regClass()); in init_live_in_vars() 678 ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass()); in init_live_in_vars() 701 ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass()); in init_live_in_vars() 755 Temp new_name = ctx.program->allocateTmp(live.first.regClass()); in add_coupling_code() [all …]
|
D | aco_optimizer.cpp | 698 if (op.hasRegClass() && op.regClass().type() == RegType::sgpr) { in check_vop3_operands() 838 instr->operands[i] = Operand(instr->operands[i].regClass()); in label_instruction() 840 if (info.is_temp() && info.temp.regClass() == instr->operands[i].getTemp().regClass()) { in label_instruction() 851 … [] (const Definition& def) { return def.regClass().is_subdword();}); in label_instruction() 991 base.regClass() == v1 && mubuf->offset + offset < 4096) { in label_instruction() 997 base.regClass() == s1 && mubuf->offset + offset < 4096) { in label_instruction() 1013 base.regClass() == instr->operands[i].regClass() && in label_instruction() 1049 …e_offset(ctx, instr.get(), i, &base, &offset, prevent_overflow) && base.regClass() == s1 && offset… in label_instruction() 1101 instr->operands[0].regClass() == instr->definitions[0].regClass(); in label_instruction() 1108 bool accept_subdword = instr->definitions[0].regClass().type() == RegType::vgpr && in label_instruction() [all …]
|
D | aco_insert_waitcnt.cpp | 418 …format == Format::MIMG && !instr->operands[1].isUndefined() && instr->operands[1].regClass() == s4; in check_instr() 739 insert_wait_entry(ctx, op.physReg(), op.regClass(), event, false, has_sampler); in insert_wait_entry() 744 insert_wait_entry(ctx, def.physReg(), def.regClass(), event, true, has_sampler); in insert_wait_entry() 821 …format == Format::MIMG && !instr->operands[1].isUndefined() && instr->operands[1].regClass() == s4; in gen() 834 instr->operands[1].regClass().type() == RegType::vgpr) { in gen()
|
D | aco_insert_NOPs.cpp | 370 if (def.regClass().type() != RegType::sgpr) { in handle_instruction_gfx6() 400 if (!op.isConstant() && !op.isUndefined() && op.regClass().type() == RegType::sgpr) in handle_instruction_gfx6() 462 if (def.regClass().type() == RegType::sgpr) { in handle_instruction_gfx6() 500 instr->operands[1].regClass().type() == RegType::vgpr && in handle_instruction_gfx6()
|
D | aco_print_ir.cpp | 166 print_reg_class(operand->regClass(), output); in print_operand() 181 print_reg_class(definition->regClass(), output); in print_definition()
|
D | aco_assembler.cpp | 454 } else if (instr->operands[1].regClass().type() == RegType::vgpr) { in emit_instruction() 458 if (instr->operands[1].regClass().type() == RegType::sgpr) in emit_instruction()
|
/external/llvm-project/llvm/lib/Target/AMDGPU/ |
D | FLATInstructions.td | 140 class FLAT_Load_Pseudo <string opName, RegisterClass regClass, 144 (outs regClass:$vdst), 152 !if(HasTiedOutput, (ins GLC:$glc, SLC:$slc, DLC:$dlc, regClass:$vdst_in), 185 multiclass FLAT_Global_Load_Pseudo<string opName, RegisterClass regClass, bit HasTiedInput = 0> { 187 def "" : FLAT_Load_Pseudo<opName, regClass, HasTiedInput, 1>, 189 def _SADDR : FLAT_Load_Pseudo<opName, regClass, HasTiedInput, 1, 1>, 194 class FLAT_Global_Load_AddTid_Pseudo <string opName, RegisterClass regClass, 197 (outs regClass:$vdst), 199 !if(HasTiedOutput, (ins regClass:$vdst_in), (ins))), 213 multiclass FLAT_Global_Store_Pseudo<string opName, RegisterClass regClass> { [all …]
|
D | SIWholeQuadMode.cpp | 944 const TargetRegisterClass *regClass = in lowerCopyInstrs() local 947 regClass = TRI->getSubRegClass(regClass, SubReg); in lowerCopyInstrs() 949 const unsigned MovOp = TII->getMovOpcode(regClass); in lowerCopyInstrs()
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
D | FLATInstructions.td | 136 class FLAT_Load_Pseudo <string opName, RegisterClass regClass, 140 (outs regClass:$vdst), 146 !if(HasTiedOutput, (ins regClass:$vdst_in), (ins))), 177 multiclass FLAT_Global_Load_Pseudo<string opName, RegisterClass regClass, bit HasTiedInput = 0> { 179 def "" : FLAT_Load_Pseudo<opName, regClass, HasTiedInput, 1>, 181 def _SADDR : FLAT_Load_Pseudo<opName, regClass, HasTiedInput, 1, 1>, 186 multiclass FLAT_Global_Store_Pseudo<string opName, RegisterClass regClass> { 188 def "" : FLAT_Store_Pseudo<opName, regClass, 1>, 190 def _SADDR : FLAT_Store_Pseudo<opName, regClass, 1, 1>, 195 class FLAT_Scratch_Load_Pseudo <string opName, RegisterClass regClass, [all …]
|
D | SIWholeQuadMode.cpp | 862 const TargetRegisterClass *regClass = Register::isVirtualRegister(Reg) in lowerCopyInstrs() local 866 const unsigned MovOp = TII->getMovOpcode(regClass); in lowerCopyInstrs()
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/PowerPC/ |
D | PPCInstrInfo.h | 467 int16_t regClass = Desc.OpInfo[OpNo].RegClass; in getRegNumForOperand() local 468 switch (regClass) { in getRegNumForOperand()
|
/external/llvm-project/llvm/lib/Target/PowerPC/ |
D | PPCInstrInfo.h | 639 int16_t regClass = Desc.OpInfo[OpNo].RegClass; in getRegNumForOperand() local 640 switch (regClass) { in getRegNumForOperand()
|
/external/llvm/lib/Target/AMDGPU/ |
D | SIInstrInfo.td | 2855 RegisterClass regClass> : MTBUF_m < 2857 (ins regClass:$vdata, u16imm:$offset, i1imm:$offen, i1imm:$idxen, i1imm:$glc, 2869 RegisterClass regClass> : MTBUF_m < 2870 op, opName, (outs regClass:$dst), 3131 multiclass MUBUF_Load_Helper <mubuf op, string name, RegisterClass regClass, 3137 defm _OFFSET : MUBUF_m <op, name#"_offset", (outs regClass:$vdata), 3147 defm _OFFEN : MUBUF_m <op, name#"_offen", (outs regClass:$vdata), 3155 defm _IDXEN : MUBUF_m <op, name#"_idxen", (outs regClass:$vdata), 3163 defm _BOTHEN : MUBUF_m <op, name#"_bothen", (outs regClass:$vdata), 3170 defm _ADDR64 : MUBUFAddr64_m <op, name#"_addr64", (outs regClass:$vdata), [all …]
|
/external/mesa3d/docs/relnotes/ |
D | 20.1.0.rst | 1197 - aco: refactor regClass setup for subdword VGPRs
|