Home
last modified time | relevance | path

Searched full:reg (Results 1 – 25 of 191) sorted by relevance

12345678

/arkcompiler/runtime_core/compiler/optimizer/code_generator/target/aarch64/
Dtarget.h117 static inline vixl::aarch64::Register VixlReg(Reg reg) in VixlReg() argument
119 ASSERT(reg.IsValid()); in VixlReg()
120 if (reg.IsScalar()) { in VixlReg()
121 size_t reg_size = reg.GetSize(); in VixlReg()
128 auto vixl_reg = vixl::aarch64::Register(reg.GetId(), reg_size); in VixlReg()
132 if (reg.GetId() == vixl::aarch64::sp.GetCode()) { in VixlReg()
141 static inline vixl::aarch64::Register VixlReg(Reg reg, const uint8_t SIZE) in VixlReg() argument
143 ASSERT(reg.IsValid()); in VixlReg()
144 if (reg.IsScalar()) { in VixlReg()
145 auto vixl_reg = vixl::aarch64::Register(reg.GetId(), (SIZE < WORD_SIZE ? WORD_SIZE : SIZE)); in VixlReg()
[all …]
Dencode.cpp39 static inline Reg Promote(Reg reg) in Promote() argument
41 if (reg.GetType() == INT8_TYPE) { in Promote()
42 return Reg(reg.GetId(), INT16_TYPE); in Promote()
44 return reg; in Promote()
58 // We enable LR tmp reg by default in Aarch64 in Aarch64Encoder()
109 void Aarch64Encoder::EncodeJump(LabelHolder::LabelId id, Reg src0, Reg src1, Condition cc) in EncodeJump()
126 void Aarch64Encoder::EncodeJump(LabelHolder::LabelId id, Reg src, Imm imm, Condition cc) in EncodeJump()
145 void Aarch64Encoder::EncodeJumpTest(LabelHolder::LabelId id, Reg src0, Reg src1, Condition cc) in EncodeJumpTest()
154 void Aarch64Encoder::EncodeJumpTest(LabelHolder::LabelId id, Reg src, Imm imm, Condition cc) in EncodeJumpTest()
166 void Aarch64Encoder::EncodeJump(LabelHolder::LabelId id, Reg src, Condition cc) in EncodeJump()
[all …]
Dregfile.cpp31 bool Aarch64RegisterDescription::IsRegUsed(ArenaVector<Reg> vec_reg, Reg reg) in IsRegUsed() argument
33 …auto equality = [reg](Reg in) { return (reg.GetId() == in.GetId()) && (reg.GetType() == in.GetType… in IsRegUsed()
37 ArenaVector<Reg> Aarch64RegisterDescription::GetCalleeSaved() in GetCalleeSaved()
39 ArenaVector<Reg> out(GetAllocator()->Adapter()); in GetCalleeSaved()
42 out.emplace_back(Reg(i, FLOAT64_TYPE)); in GetCalleeSaved()
48 out.emplace_back(Reg(i, INT64_TYPE)); in GetCalleeSaved()
54 void Aarch64RegisterDescription::SetCalleeSaved(const ArenaVector<Reg> &regs) in SetCalleeSaved()
60 bool vector_used = IsRegUsed(regs, Reg(i, FLOAT64_TYPE)); in SetCalleeSaved()
66 bool scalar_used = IsRegUsed(regs, Reg(i, INT64_TYPE)); in SetCalleeSaved()
83 void Aarch64RegisterDescription::SetUsedRegs(const ArenaVector<Reg> &regs) in SetUsedRegs()
[all …]
/arkcompiler/runtime_core/compiler/optimizer/code_generator/
Dencode.h144 virtual void Encode##opc(Reg, Reg) \
152 virtual void Encode##opc(Reg, Reg, Reg) \
156 virtual void Encode##opc(Reg, Reg, Imm) \
164 virtual void Encode##opc(Reg, Reg, Shift) \
186 …oid EncodeAddOverflow([[maybe_unused]] compiler::LabelHolder::LabelId id, [[maybe_unused]] Reg dst, in EncodeAddOverflow()
187 … [[maybe_unused]] Reg src0, [[maybe_unused]] Reg src1, [[maybe_unused]] Condition cc) in EncodeAddOverflow()
192 …oid EncodeSubOverflow([[maybe_unused]] compiler::LabelHolder::LabelId id, [[maybe_unused]] Reg dst, in EncodeSubOverflow()
193 … [[maybe_unused]] Reg src0, [[maybe_unused]] Reg src1, [[maybe_unused]] Condition cc) in EncodeSubOverflow()
198 …oid EncodeMulOverflow([[maybe_unused]] compiler::LabelHolder::LabelId id, [[maybe_unused]] Reg dst, in EncodeMulOverflow()
199 … [[maybe_unused]] Reg src0, [[maybe_unused]] Reg src1, [[maybe_unused]] Condition cc) in EncodeMulOverflow()
[all …]
Dtarget_info.h27 // caller reg mask: 0000111111000111 and
28 // callee reg mask: 1111000000001000
49 #define DEFINE_NUMERIC_REGISTERS(REG) \ argument
50 REG(0) \
51 REG(1) \
52 REG(2) \
53 REG(3) \
54 REG(4) \
55 REG(5) \
56 REG(6) \
[all …]
Dslow_path.h139 void SetTmpReg(Reg reg) in SetTmpReg() argument
141 tmp_reg_ = reg; in SetTmpReg()
143 Reg GetTmpReg() const in GetTmpReg()
149 Reg tmp_reg_ {INVALID_REGISTER};
158 void SetDstReg(Reg reg) in SetDstReg() argument
160 dst_reg_ = reg; in SetDstReg()
163 void SetAddrReg(Reg reg) in SetAddrReg() argument
165 addr_reg_ = reg; in SetAddrReg()
179 Reg dst_reg_ {INVALID_REGISTER};
180 Reg addr_reg_ {INVALID_REGISTER};
[all …]
Dregisters_description.h33 // caller reg mask: 0000111111000111 and
34 // callee reg mask: 1111000000001000
57 virtual ArenaVector<Reg> GetCalleeSaved() = 0;
58 virtual void SetCalleeSaved(const ArenaVector<Reg> &) = 0;
60 virtual void SetUsedRegs(const ArenaVector<Reg> &) = 0;
62 virtual Reg GetZeroReg() const = 0;
63 virtual bool IsZeroReg(Reg reg) const = 0;
64 virtual Reg::RegIDType GetTempReg() = 0;
65 virtual Reg::RegIDType GetTempVReg() = 0;
74 virtual bool IsCalleeRegister(Reg reg) = 0;
[all …]
Dcodegen.h133 void Convert(ArenaVector<Reg> *regs_usage, const ArenaVector<bool> *mask, TypeInfo type_info);
135 Reg ConvertRegister(Register ref, DataType::Type type = DataType::Type::INT64);
203 auto reg = ConvertRegister(li->GetReg(), li->GetType()); in GetLiveRegisters()
204 GetEncoder()->SetRegister(&live_regs, &live_fp_regs, reg); in GetLiveRegisters()
231 void InsertTrace(std::initializer_list<std::variant<Reg, Imm>> params);
237 …void CallRuntime(Inst *inst, EntrypointId id, Reg dst_reg, std::initializer_list<std::variant<Reg,…
241 …void CallRuntimeWithMethod(Inst *inst, void *method, EntrypointId eid, Reg dst_reg, Args &&... par… in CallRuntimeWithMethod()
261 void LoadClassFromObject(Reg class_reg, Reg obj_reg);
265 void CreateUnresolvedVirtualMethodLoad(CallInst *vcall, Reg method);
277 void CreatePostWRB(Inst *inst, MemRef mem, Reg reg1, Reg reg2 = INVALID_REGISTER);
[all …]
Dencoder.md38 | allignment reg | (push one reg for padding)
42 callee | + optional allign-reg| |
44 | + optional allign-reg| |
53 caller | + optional allign-reg| |
55 | + optional allign-reg| |
93 Class **Reg** contains number of register(id) and **TypeInfo**.
98 auto reg = Reg(0, TypeInfo(INT32)); // scalar word regster
99 …ASSERT(reg->GetId() == 0 && reg->GetType() == TypeInfo(INT32) && reg->GetSize() == 32 && reg->IsSc…
117 Class **MemRef** contains base **Reg**, index **Reg**, scale **Imm** and disp **Imm**.
124 auto base_reg = Reg(5, TypeInfo(INT64));
[all …]
/arkcompiler/runtime_core/compiler/optimizer/code_generator/target/amd64/
Dtarget.h201 static inline asmjit::x86::Gp ArchReg(Reg reg, uint8_t size = 0)
203 ASSERT(reg.IsValid());
204 if (reg.IsScalar()) {
205 size_t reg_size = size == 0 ? reg.GetSize() : size;
206 auto arch_id = ConvertRegNumber(reg.GetId());
230 if (reg.GetId() == ConvertRegNumber(asmjit::x86::rsp.id())) {
239 static inline asmjit::x86::Xmm ArchVReg(Reg reg) in ArchVReg() argument
241 ASSERT(reg.IsValid() && reg.IsFloat()); in ArchVReg()
242 auto arch_vreg = asmjit::x86::xmm(reg.GetId()); in ArchVReg()
399 * | AMD64 Reg | Panda Reg |
[all …]
Dregfile.cpp31 bool Amd64RegisterDescription::IsRegUsed(ArenaVector<Reg> vec_reg, Reg reg) in IsRegUsed() argument
33 …auto equality = [reg](Reg in) { return (reg.GetId() == in.GetId()) && (reg.GetType() == in.GetType… in IsRegUsed()
37 ArenaVector<Reg> Amd64RegisterDescription::GetCalleeSaved() in GetCalleeSaved()
39 ArenaVector<Reg> out(GetAllocator()->Adapter()); in GetCalleeSaved()
42 out.emplace_back(Reg(i, INT64_TYPE)); in GetCalleeSaved()
45 out.emplace_back(Reg(i, FLOAT64_TYPE)); in GetCalleeSaved()
51 void Amd64RegisterDescription::SetCalleeSaved(const ArenaVector<Reg> &regs) in SetCalleeSaved()
57 bool scalar_used = IsRegUsed(regs, Reg(i, INT64_TYPE)); in SetCalleeSaved()
63 bool vector_used = IsRegUsed(regs, Reg(i, FLOAT64_TYPE)); in SetCalleeSaved()
74 void Amd64RegisterDescription::SetUsedRegs(const ArenaVector<Reg> &regs) in SetUsedRegs()
[all …]
Dencode.cpp137 void Amd64Encoder::EncodeJump(LabelHolder::LabelId id, Reg src0, Reg src1, Condition cc) in EncodeJump()
175 void Amd64Encoder::EncodeJump(LabelHolder::LabelId id, Reg src, Imm imm, Condition cc) in EncodeJump()
197 void Amd64Encoder::EncodeJumpTest(LabelHolder::LabelId id, Reg src0, Reg src1, Condition cc) in EncodeJumpTest()
216 void Amd64Encoder::EncodeJumpTest(LabelHolder::LabelId id, Reg src, Imm imm, Condition cc) in EncodeJumpTest()
233 void Amd64Encoder::EncodeJump(LabelHolder::LabelId id, Reg src, Condition cc) in EncodeJump()
252 void Amd64Encoder::EncodeJump(Reg dst) in EncodeJump()
273 void Amd64Encoder::EncodeBitTestAndBranch(LabelHolder::LabelId id, compiler::Reg reg, uint32_t bit_… in EncodeBitTestAndBranch() argument
275 ASSERT(reg.IsScalar() && reg.GetSize() > bit_pos); in EncodeBitTestAndBranch()
277 if (reg.GetSize() == DOUBLE_WORD_SIZE) { in EncodeBitTestAndBranch()
280 GetMasm()->test(ArchReg(reg), ArchReg(tmp_reg)); in EncodeBitTestAndBranch()
[all …]
/arkcompiler/runtime_core/compiler/optimizer/code_generator/target/aarch32/
Dtarget.h119 static inline vixl::aarch32::Register VixlReg(Reg reg) in VixlReg() argument
121 ASSERT(reg.IsValid()); in VixlReg()
122 if (reg.IsScalar()) { in VixlReg()
123 auto vixl_reg = vixl::aarch32::Register(reg.GetId()); in VixlReg()
133 static inline vixl::aarch32::Register VixlRegU(Reg reg) in VixlRegU() argument
135 ASSERT(reg.IsValid()); in VixlRegU()
136 if (reg.IsScalar()) { in VixlRegU()
137 auto vixl_reg = vixl::aarch32::Register(reg.GetId() + 1); in VixlRegU()
138 ASSERT(reg.GetId() <= AVAILABLE_DOUBLE_WORD_REGISTERS * 2U); in VixlRegU()
147 static inline vixl::aarch32::VRegister VixlVReg(Reg reg) in VixlVReg() argument
[all …]
Dregfile.cpp42 aarch32_reg_list_.emplace_back(Reg(i, INT32_TYPE)); in Aarch32RegisterDescription()
43 aarch32_reg_list_.emplace_back(Reg(i, FLOAT32_TYPE)); in Aarch32RegisterDescription()
56 bool Aarch32RegisterDescription::IsRegUsed(ArenaVector<Reg> vec_reg, Reg reg) in IsRegUsed() argument
58 …auto equality = [reg](Reg in) { return (reg.GetId() == in.GetId()) && (reg.GetType() == in.GetType… in IsRegUsed()
63 bool Aarch32RegisterDescription::IsTmp(Reg reg) in IsTmp() argument
65 if (reg.IsScalar()) { in IsTmp()
67 if (it == reg.GetId()) { in IsTmp()
73 ASSERT(reg.IsFloat()); in IsTmp()
75 if (it == reg.GetId()) { in IsTmp()
82 ArenaVector<Reg> Aarch32RegisterDescription::GetCalleeSaved() in GetCalleeSaved()
[all …]
Dencode.cpp97 void Aarch32Encoder::EncodeJump(LabelHolder::LabelId id, Reg src0, Reg src1, Condition cc) in EncodeJump()
104 void Aarch32Encoder::EncodeJumpTest(LabelHolder::LabelId id, Reg src0, Reg src1, Condition cc) in EncodeJumpTest()
111 void Aarch32Encoder::EncodeBitTestAndBranch(LabelHolder::LabelId id, Reg reg, uint32_t bit_pos, boo… in EncodeBitTestAndBranch() argument
113 ASSERT(reg.IsScalar() && reg.GetSize() > bit_pos); in EncodeBitTestAndBranch()
115 if (reg.GetSize() == DOUBLE_WORD_SIZE) { in EncodeBitTestAndBranch()
117 GetMasm()->tst(VixlReg(reg), VixlImm(1U << bit_pos)); in EncodeBitTestAndBranch()
119 GetMasm()->tst(VixlRegU(reg), VixlImm(1U << (bit_pos - WORD_SIZE))); in EncodeBitTestAndBranch()
122 GetMasm()->tst(VixlReg(reg), VixlImm(1U << bit_pos)); in EncodeBitTestAndBranch()
131 bool Aarch32Encoder::CompareImmHelper(Reg src, Imm imm, Condition *cc) in CompareImmHelper()
142 void Aarch32Encoder::TestImmHelper(Reg src, Imm imm, [[maybe_unused]] Condition cc) in TestImmHelper()
[all …]
/arkcompiler/runtime_core/compiler/tests/aarch32/
Dcallconv32_test.cpp98 // std::variant<Reg, uint8_t> GetNativeParam(const TypeInfo& type) in TEST_F()
105 EXPECT_TRUE(std::holds_alternative<Reg>(ret)); in TEST_F()
106 EXPECT_EQ(std::get<Reg>(ret).GetId(), 0); in TEST_F()
107 EXPECT_EQ(std::get<Reg>(ret), Reg(0, INT8_TYPE)); in TEST_F()
111 EXPECT_TRUE(std::holds_alternative<Reg>(ret)); in TEST_F()
112 EXPECT_EQ(std::get<Reg>(ret).GetId(), i); in TEST_F()
113 EXPECT_EQ(std::get<Reg>(ret), Reg(i, INT8_TYPE)); in TEST_F()
121 EXPECT_TRUE(std::holds_alternative<Reg>(ret)); in TEST_F()
122 EXPECT_EQ(std::get<Reg>(ret).GetId(), 0); in TEST_F()
123 EXPECT_EQ(std::get<Reg>(ret), Reg(0, INT32_TYPE)); in TEST_F()
[all …]
/arkcompiler/runtime_core/compiler/optimizer/optimizations/regalloc/
Dreg_map.cpp26 for (size_t reg = priority_reg; reg < reg_mask.GetSize(); ++reg) { in SetMask() local
27 if (!reg_mask.IsSet(reg)) { in SetMask()
28 codegen_reg_map_.push_back(reg); in SetMask()
34 for (size_t reg = 0; reg < priority_reg; ++reg) { in SetMask() local
35 if (!reg_mask.IsSet(reg)) { in SetMask()
36 codegen_reg_map_.push_back(reg); in SetMask()
42 for (size_t reg = 0; reg < reg_mask.GetSize(); ++reg) { in SetMask() local
43 if (reg_mask.IsSet(reg)) { in SetMask()
44 codegen_reg_map_.push_back(reg); in SetMask()
54 for (size_t reg = 0; reg < first_callee_reg; ++reg) { in SetCallerFirstMask() local
[all …]
/arkcompiler/runtime_core/compiler/tests/aarch64/
Dcallconv64_test.cpp90 // std::variant<Reg, uint8_t> GetNativeParam(const ArenaVector<TypeInfo>& reg_list, in TEST_F()
97 EXPECT_TRUE(std::holds_alternative<Reg>(ret)); in TEST_F()
98 EXPECT_EQ(std::get<Reg>(ret).GetId(), 0); in TEST_F()
99 EXPECT_EQ(std::get<Reg>(ret), Reg(0, INT8_TYPE)); in TEST_F()
103 EXPECT_TRUE(std::holds_alternative<Reg>(ret)); in TEST_F()
104 EXPECT_EQ(std::get<Reg>(ret).GetId(), i); in TEST_F()
105 EXPECT_EQ(std::get<Reg>(ret), Reg(i, INT8_TYPE)); in TEST_F()
113 EXPECT_TRUE(std::holds_alternative<Reg>(ret)); in TEST_F()
114 EXPECT_EQ(std::get<Reg>(ret).GetId(), 0); in TEST_F()
115 EXPECT_EQ(std::get<Reg>(ret), Reg(0, INT32_TYPE)); in TEST_F()
[all …]
/arkcompiler/runtime_core/compiler/tests/amd64/
Dcallconv64_test.cpp90 // std::variant<Reg, uint8_t> GetNativeParam(const ArenaVector<TypeInfo>& reg_list, in TEST_F()
98 EXPECT_TRUE(std::holds_alternative<Reg>(ret)); in TEST_F()
99 EXPECT_EQ(std::get<Reg>(ret).GetId(), target.GetParamRegId(0)); in TEST_F()
100 EXPECT_EQ(std::get<Reg>(ret), Reg(target.GetParamRegId(0), INT8_TYPE)); in TEST_F()
104 EXPECT_TRUE(std::holds_alternative<Reg>(ret)); in TEST_F()
105 EXPECT_EQ(std::get<Reg>(ret).GetId(), target.GetParamRegId(i)); in TEST_F()
106 EXPECT_EQ(std::get<Reg>(ret), Reg(target.GetParamRegId(i), INT8_TYPE)); in TEST_F()
114 EXPECT_TRUE(std::holds_alternative<Reg>(ret)); in TEST_F()
115 EXPECT_EQ(std::get<Reg>(ret).GetId(), target.GetParamRegId(0)); in TEST_F()
116 EXPECT_EQ(std::get<Reg>(ret), Reg(target.GetParamRegId(0), INT32_TYPE)); in TEST_F()
[all …]
/arkcompiler/runtime_core/runtime/arch/
Dasm_support.h54 #define CFI_DEF_CFA(reg, offset) .cfi_def_cfa reg, (offset) argument
58 #define CFI_DEF_CFA_REGISTER(reg) .cfi_def_cfa_register reg argument
60 #define CFI_REL_OFFSET(reg, offset) .cfi_rel_offset reg, (offset) argument
62 #define CFI_OFFSET(reg, offset) .cfi_offset reg, (offset) argument
68 #define CFI_RESTORE(reg) .cfi_restore reg argument
70 #define CFI_REGISTER(reg, old_reg) .cfi_register reg, old_reg argument
79 #define CFI_DEF_CFA(reg, offset) argument
83 #define CFI_DEF_CFA_REGISTER(reg) argument
85 #define CFI_REL_OFFSET(reg, offset) argument
87 #define CFI_OFFSET(reg, offset) argument
[all …]
/arkcompiler/ets_runtime/ecmascript/compiler/assembler/x64/
Dassembler_x64.h181 void EmitRexPrefix(Register reg, Register rm) in EmitRexPrefix() argument
184 // 2: Extension to the MODRM.reg field R in EmitRexPrefix()
185 EmitU8(REX_PREFIX_W | (HighBit(reg) << 2) | HighBit(rm)); in EmitRexPrefix()
188 void EmitRexPrefixl(Register reg, Register rm) in EmitRexPrefixl() argument
191 if (HighBit(reg) != 0 || HighBit(rm) != 0) { in EmitRexPrefixl()
192 // 2: Extension to the MODRM.reg field R in EmitRexPrefixl()
193 EmitU8(REX_PREFIX_FIXED_BITS | (HighBit(reg) << 2) | HighBit(rm)); in EmitRexPrefixl()
197 void EmitRexPrefix(Register reg, Operand rm) in EmitRexPrefix() argument
200 // 2: Extension to the MODRM.reg field R in EmitRexPrefix()
201 EmitU8(REX_PREFIX_W | (HighBit(reg) << 2) | rm.rex_); in EmitRexPrefix()
[all …]
/arkcompiler/runtime_core/runtime/bridge/arch/aarch64/
Dhandle_call_imm16_v16_aarch64.S25 // ABI arg reg 1 (x1) <- num_args
29 // ABI arg reg 0 (x0) <- panda::Method*
44 // ABI arg reg 2 (r2) <- boxed arg0 from user's code
49 // ABI arg reg 3 (r3) <- boxed arg1 from user's code
54 // ABI arg reg 4 (r4) <- boxed arg2 from user's code
59 // ABI arg reg 5 (r5) <- boxed arg3 from user's code
64 // ABI arg reg 6 (r6) <- boxed arg4 from user's code
69 // ABI arg reg 7 (r7) <- boxed arg5 from user's code
Dhandle_call_imm4_v4_v4_v4_aarch64.S21 // ABI arg reg 0 (x0) <- panda::Method*
24 // ABI arg reg 1 (x1/w1) <- num_args
30 // ABI arg reg 2 (x2) <- boxed arg0 from user's code
41 // ABI arg reg 3 (x3) <- boxed arg1 from user's code
48 // ABI arg reg 4 (x4) <- boxed arg2 from user's code
59 // ABI arg reg 5 (x5) <- boxed arg3 from user's code
66 // ABI arg reg 6 (x6) <- boxed arg4 from user's code
/arkcompiler/ets_frontend/es2panda/compiler/templates/
Disa.h.erb119 % map['reg'].push("#{name}_")
188 % registers = op_map['reg'].map {|reg| "&#{reg}"}.join(", ")
202 % for reg in op_map['reg']
203 (*regs)[<%= reg_cnt %>] = &<%= reg %>;
212 % for reg in op_map['reg']
213 (*regs)[<%= reg_cnt %>] = &<%= reg %>;
222 % if op_map['reg'].length != 0
223 ins->regs.reserve(<%= op_map['reg'].length %>);
231 % for reg in op_map['reg']
232 ins->regs.emplace_back(<%= reg %>);
/arkcompiler/ets_runtime/ecmascript/deoptimizer/
DcalleeReg.cpp55 int CalleeReg::FindCallRegOrder(const DwarfRegType reg) const in FindCallRegOrder()
57 auto it = reg2Location_.find(static_cast<DwarfReg>(reg)); in FindCallRegOrder()
61 LOG_FULL(FATAL) << "reg:" << std::dec << reg; in FindCallRegOrder()
66 int CalleeReg::FindCallRegOrder(const DwarfReg reg) const in FindCallRegOrder()
68 auto order = FindCallRegOrder(static_cast<DwarfRegType>(reg)); in FindCallRegOrder()

12345678