• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_DEX_QUICK_MIR_TO_LIR_INL_H_
18 #define ART_COMPILER_DEX_QUICK_MIR_TO_LIR_INL_H_
19 
20 #include "mir_to_lir.h"
21 
22 #include "dex/compiler_internals.h"
23 
24 namespace art {
25 
26 /* Mark a temp register as dead.  Does not affect allocation state. */
ClobberBody(RegisterInfo * p)27 inline void Mir2Lir::ClobberBody(RegisterInfo* p) {
28   if (p->is_temp) {
29     DCHECK(!(p->live && p->dirty))  << "Live & dirty temp in clobber";
30     p->live = false;
31     p->s_reg = INVALID_SREG;
32     p->def_start = NULL;
33     p->def_end = NULL;
34     if (p->pair) {
35       p->pair = false;
36       Clobber(p->partner);
37     }
38   }
39 }
40 
RawLIR(int dalvik_offset,int opcode,int op0,int op1,int op2,int op3,int op4,LIR * target)41 inline LIR* Mir2Lir::RawLIR(int dalvik_offset, int opcode, int op0,
42                             int op1, int op2, int op3, int op4, LIR* target) {
43   LIR* insn = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocLIR));
44   insn->dalvik_offset = dalvik_offset;
45   insn->opcode = opcode;
46   insn->operands[0] = op0;
47   insn->operands[1] = op1;
48   insn->operands[2] = op2;
49   insn->operands[3] = op3;
50   insn->operands[4] = op4;
51   insn->target = target;
52   SetupResourceMasks(insn);
53   if ((opcode == kPseudoTargetLabel) || (opcode == kPseudoSafepointPC) ||
54       (opcode == kPseudoExportedPC)) {
55     // Always make labels scheduling barriers
56     insn->use_mask = insn->def_mask = ENCODE_ALL;
57   }
58   return insn;
59 }
60 
61 /*
62  * The following are building blocks to construct low-level IRs with 0 - 4
63  * operands.
64  */
NewLIR0(int opcode)65 inline LIR* Mir2Lir::NewLIR0(int opcode) {
66   DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & NO_OPERAND))
67       << GetTargetInstName(opcode) << " " << opcode << " "
68       << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
69       << current_dalvik_offset_;
70   LIR* insn = RawLIR(current_dalvik_offset_, opcode);
71   AppendLIR(insn);
72   return insn;
73 }
74 
NewLIR1(int opcode,int dest)75 inline LIR* Mir2Lir::NewLIR1(int opcode, int dest) {
76   DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_UNARY_OP))
77       << GetTargetInstName(opcode) << " " << opcode << " "
78       << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
79       << current_dalvik_offset_;
80   LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest);
81   AppendLIR(insn);
82   return insn;
83 }
84 
NewLIR2(int opcode,int dest,int src1)85 inline LIR* Mir2Lir::NewLIR2(int opcode, int dest, int src1) {
86   DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_BINARY_OP))
87       << GetTargetInstName(opcode) << " " << opcode << " "
88       << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
89       << current_dalvik_offset_;
90   LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest, src1);
91   AppendLIR(insn);
92   return insn;
93 }
94 
NewLIR3(int opcode,int dest,int src1,int src2)95 inline LIR* Mir2Lir::NewLIR3(int opcode, int dest, int src1, int src2) {
96   DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_TERTIARY_OP))
97       << GetTargetInstName(opcode) << " " << opcode << " "
98       << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
99       << current_dalvik_offset_;
100   LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest, src1, src2);
101   AppendLIR(insn);
102   return insn;
103 }
104 
NewLIR4(int opcode,int dest,int src1,int src2,int info)105 inline LIR* Mir2Lir::NewLIR4(int opcode, int dest, int src1, int src2, int info) {
106   DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_QUAD_OP))
107       << GetTargetInstName(opcode) << " " << opcode << " "
108       << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
109       << current_dalvik_offset_;
110   LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest, src1, src2, info);
111   AppendLIR(insn);
112   return insn;
113 }
114 
NewLIR5(int opcode,int dest,int src1,int src2,int info1,int info2)115 inline LIR* Mir2Lir::NewLIR5(int opcode, int dest, int src1, int src2, int info1,
116                              int info2) {
117   DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_QUIN_OP))
118       << GetTargetInstName(opcode) << " " << opcode << " "
119       << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
120       << current_dalvik_offset_;
121   LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest, src1, src2, info1, info2);
122   AppendLIR(insn);
123   return insn;
124 }
125 
126 /*
127  * Mark the corresponding bit(s).
128  */
SetupRegMask(uint64_t * mask,int reg)129 inline void Mir2Lir::SetupRegMask(uint64_t* mask, int reg) {
130   *mask |= GetRegMaskCommon(reg);
131 }
132 
133 /*
134  * Set up the proper fields in the resource mask
135  */
SetupResourceMasks(LIR * lir)136 inline void Mir2Lir::SetupResourceMasks(LIR* lir) {
137   int opcode = lir->opcode;
138 
139   if (opcode <= 0) {
140     lir->use_mask = lir->def_mask = 0;
141     return;
142   }
143 
144   uint64_t flags = GetTargetInstFlags(opcode);
145 
146   if (flags & NEEDS_FIXUP) {
147     lir->flags.pcRelFixup = true;
148   }
149 
150   /* Get the starting size of the instruction's template */
151   lir->flags.size = GetInsnSize(lir);
152 
153   /* Set up the mask for resources that are updated */
154   if (flags & (IS_LOAD | IS_STORE)) {
155     /* Default to heap - will catch specialized classes later */
156     SetMemRefType(lir, flags & IS_LOAD, kHeapRef);
157   }
158 
159   /*
160    * Conservatively assume the branch here will call out a function that in
161    * turn will trash everything.
162    */
163   if (flags & IS_BRANCH) {
164     lir->def_mask = lir->use_mask = ENCODE_ALL;
165     return;
166   }
167 
168   if (flags & REG_DEF0) {
169     SetupRegMask(&lir->def_mask, lir->operands[0]);
170   }
171 
172   if (flags & REG_DEF1) {
173     SetupRegMask(&lir->def_mask, lir->operands[1]);
174   }
175 
176 
177   if (flags & SETS_CCODES) {
178     lir->def_mask |= ENCODE_CCODE;
179   }
180 
181   if (flags & (REG_USE0 | REG_USE1 | REG_USE2 | REG_USE3)) {
182     int i;
183 
184     for (i = 0; i < 4; i++) {
185       if (flags & (1 << (kRegUse0 + i))) {
186         SetupRegMask(&lir->use_mask, lir->operands[i]);
187       }
188     }
189   }
190 
191   if (flags & USES_CCODES) {
192     lir->use_mask |= ENCODE_CCODE;
193   }
194 
195   // Handle target-specific actions
196   SetupTargetResourceMasks(lir);
197 }
198 
199 }  // namespace art
200 
201 #endif  // ART_COMPILER_DEX_QUICK_MIR_TO_LIR_INL_H_
202