1 /*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 /*
18 * This file contains codegen and support common to all supported
19 * ARM variants. It is included by:
20 *
21 * Codegen-$(TARGET_ARCH_VARIANT).c
22 *
23 * which combines this common code with specific support found in the
24 * applicable directory below this one.
25 */
26
27 #include "compiler/Loop.h"
28
29 /* Array holding the entry offset of each template relative to the first one */
30 static intptr_t templateEntryOffsets[TEMPLATE_LAST_MARK];
31
32 /* Track exercised opcodes */
33 static int opcodeCoverage[256];
34
setMemRefType(ArmLIR * lir,bool isLoad,int memType)35 static void setMemRefType(ArmLIR *lir, bool isLoad, int memType)
36 {
37 u8 *maskPtr;
38 u8 mask;
39 assert( EncodingMap[lir->opCode].flags & (IS_LOAD | IS_STORE));
40 if (isLoad) {
41 maskPtr = &lir->useMask;
42 mask = ENCODE_MEM_USE;
43 } else {
44 maskPtr = &lir->defMask;
45 mask = ENCODE_MEM_DEF;
46 }
47 /* Clear out the memref flags */
48 *maskPtr &= ~mask;
49 /* ..and then add back the one we need */
50 switch(memType) {
51 case kLiteral:
52 assert(isLoad);
53 *maskPtr |= (ENCODE_LITERAL | ENCODE_LITPOOL_REF);
54 break;
55 case kDalvikReg:
56 *maskPtr |= (ENCODE_DALVIK_REG | ENCODE_FRAME_REF);
57 break;
58 case kHeapRef:
59 *maskPtr |= ENCODE_HEAP_REF;
60 break;
61 default:
62 LOGE("Jit: invalid memref kind - %d", memType);
63 assert(0); // Bail if debug build, set worst-case in the field
64 *maskPtr |= ENCODE_ALL;
65 }
66 }
67
68 /*
69 * Mark load/store instructions that access Dalvik registers through rFP +
70 * offset.
71 */
annotateDalvikRegAccess(ArmLIR * lir,int regId,bool isLoad)72 static void annotateDalvikRegAccess(ArmLIR *lir, int regId, bool isLoad)
73 {
74 setMemRefType(lir, isLoad, kDalvikReg);
75
76 /*
77 * Store the Dalvik register id in aliasInfo. Mark he MSB if it is a 64-bit
78 * access.
79 */
80 lir->aliasInfo = regId;
81 if (DOUBLEREG(lir->operands[0])) {
82 lir->aliasInfo |= 0x80000000;
83 }
84 }
85
86 /*
87 * Decode the register id and mark the corresponding bit(s).
88 */
setupRegMask(u8 * mask,int reg)89 static inline void setupRegMask(u8 *mask, int reg)
90 {
91 u8 seed;
92 int shift;
93 int regId = reg & 0x1f;
94
95 /*
96 * Each double register is equal to a pair of single-precision FP registers
97 */
98 seed = DOUBLEREG(reg) ? 3 : 1;
99 /* FP register starts at bit position 16 */
100 shift = FPREG(reg) ? kFPReg0 : 0;
101 /* Expand the double register id into single offset */
102 shift += regId;
103 *mask |= seed << shift;
104 }
105
106 /*
107 * Set up the proper fields in the resource mask
108 */
setupResourceMasks(ArmLIR * lir)109 static void setupResourceMasks(ArmLIR *lir)
110 {
111 int opCode = lir->opCode;
112 int flags;
113
114 if (opCode <= 0) {
115 lir->useMask = lir->defMask = 0;
116 return;
117 }
118
119 flags = EncodingMap[lir->opCode].flags;
120
121 /* Set up the mask for resources that are updated */
122 if (flags & (IS_LOAD | IS_STORE)) {
123 /* Default to heap - will catch specialized classes later */
124 setMemRefType(lir, flags & IS_LOAD, kHeapRef);
125 }
126
127 if (flags & IS_BRANCH) {
128 lir->defMask |= ENCODE_REG_PC;
129 lir->useMask |= ENCODE_REG_PC;
130 }
131
132 if (flags & REG_DEF0) {
133 setupRegMask(&lir->defMask, lir->operands[0]);
134 }
135
136 if (flags & REG_DEF1) {
137 setupRegMask(&lir->defMask, lir->operands[1]);
138 }
139
140 if (flags & REG_DEF_SP) {
141 lir->defMask |= ENCODE_REG_SP;
142 }
143
144 if (flags & REG_DEF_LR) {
145 lir->defMask |= ENCODE_REG_LR;
146 }
147
148 if (flags & REG_DEF_LIST0) {
149 lir->defMask |= ENCODE_REG_LIST(lir->operands[0]);
150 }
151
152 if (flags & REG_DEF_LIST1) {
153 lir->defMask |= ENCODE_REG_LIST(lir->operands[1]);
154 }
155
156 if (flags & SETS_CCODES) {
157 lir->defMask |= ENCODE_CCODE;
158 }
159
160 /* Conservatively treat the IT block */
161 if (flags & IS_IT) {
162 lir->defMask = ENCODE_ALL;
163 }
164
165 /* Set up the mask for resources that are used */
166 if (flags & IS_BRANCH) {
167 lir->useMask |= ENCODE_REG_PC;
168 }
169
170 if (flags & (REG_USE0 | REG_USE1 | REG_USE2 | REG_USE3)) {
171 int i;
172
173 for (i = 0; i < 4; i++) {
174 if (flags & (1 << (kRegUse0 + i))) {
175 setupRegMask(&lir->useMask, lir->operands[i]);
176 }
177 }
178 }
179
180 if (flags & REG_USE_PC) {
181 lir->useMask |= ENCODE_REG_PC;
182 }
183
184 if (flags & REG_USE_SP) {
185 lir->useMask |= ENCODE_REG_SP;
186 }
187
188 if (flags & REG_USE_LIST0) {
189 lir->useMask |= ENCODE_REG_LIST(lir->operands[0]);
190 }
191
192 if (flags & REG_USE_LIST1) {
193 lir->useMask |= ENCODE_REG_LIST(lir->operands[1]);
194 }
195
196 if (flags & USES_CCODES) {
197 lir->useMask |= ENCODE_CCODE;
198 }
199 }
200
201 /*
202 * The following are building blocks to construct low-level IRs with 0 - 4
203 * operands.
204 */
newLIR0(CompilationUnit * cUnit,ArmOpCode opCode)205 static ArmLIR *newLIR0(CompilationUnit *cUnit, ArmOpCode opCode)
206 {
207 ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true);
208 assert(isPseudoOpCode(opCode) || (EncodingMap[opCode].flags & NO_OPERAND));
209 insn->opCode = opCode;
210 setupResourceMasks(insn);
211 dvmCompilerAppendLIR(cUnit, (LIR *) insn);
212 return insn;
213 }
214
newLIR1(CompilationUnit * cUnit,ArmOpCode opCode,int dest)215 static ArmLIR *newLIR1(CompilationUnit *cUnit, ArmOpCode opCode,
216 int dest)
217 {
218 ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true);
219 assert(isPseudoOpCode(opCode) || (EncodingMap[opCode].flags & IS_UNARY_OP));
220 insn->opCode = opCode;
221 insn->operands[0] = dest;
222 setupResourceMasks(insn);
223 dvmCompilerAppendLIR(cUnit, (LIR *) insn);
224 return insn;
225 }
226
newLIR2(CompilationUnit * cUnit,ArmOpCode opCode,int dest,int src1)227 static ArmLIR *newLIR2(CompilationUnit *cUnit, ArmOpCode opCode,
228 int dest, int src1)
229 {
230 ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true);
231 assert(isPseudoOpCode(opCode) ||
232 (EncodingMap[opCode].flags & IS_BINARY_OP));
233 insn->opCode = opCode;
234 insn->operands[0] = dest;
235 insn->operands[1] = src1;
236 setupResourceMasks(insn);
237 dvmCompilerAppendLIR(cUnit, (LIR *) insn);
238 return insn;
239 }
240
newLIR3(CompilationUnit * cUnit,ArmOpCode opCode,int dest,int src1,int src2)241 static ArmLIR *newLIR3(CompilationUnit *cUnit, ArmOpCode opCode,
242 int dest, int src1, int src2)
243 {
244 ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true);
245 if (!(EncodingMap[opCode].flags & IS_TERTIARY_OP)) {
246 LOGE("Bad LIR3: %s[%d]",EncodingMap[opCode].name,opCode);
247 }
248 assert(isPseudoOpCode(opCode) ||
249 (EncodingMap[opCode].flags & IS_TERTIARY_OP));
250 insn->opCode = opCode;
251 insn->operands[0] = dest;
252 insn->operands[1] = src1;
253 insn->operands[2] = src2;
254 setupResourceMasks(insn);
255 dvmCompilerAppendLIR(cUnit, (LIR *) insn);
256 return insn;
257 }
258
259 #if defined(_ARMV7_A) || defined(_ARMV7_A_NEON)
newLIR4(CompilationUnit * cUnit,ArmOpCode opCode,int dest,int src1,int src2,int info)260 static ArmLIR *newLIR4(CompilationUnit *cUnit, ArmOpCode opCode,
261 int dest, int src1, int src2, int info)
262 {
263 ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true);
264 assert(isPseudoOpCode(opCode) ||
265 (EncodingMap[opCode].flags & IS_QUAD_OP));
266 insn->opCode = opCode;
267 insn->operands[0] = dest;
268 insn->operands[1] = src1;
269 insn->operands[2] = src2;
270 insn->operands[3] = info;
271 setupResourceMasks(insn);
272 dvmCompilerAppendLIR(cUnit, (LIR *) insn);
273 return insn;
274 }
275 #endif
276
277 /*
278 * If the next instruction is a move-result or move-result-long,
279 * return the target Dalvik sReg[s] and convert the next to a
280 * nop. Otherwise, return INVALID_SREG. Used to optimize method inlining.
281 */
inlinedTarget(CompilationUnit * cUnit,MIR * mir,bool fpHint)282 static RegLocation inlinedTarget(CompilationUnit *cUnit, MIR *mir,
283 bool fpHint)
284 {
285 if (mir->next &&
286 ((mir->next->dalvikInsn.opCode == OP_MOVE_RESULT) ||
287 (mir->next->dalvikInsn.opCode == OP_MOVE_RESULT_OBJECT))) {
288 mir->next->dalvikInsn.opCode = OP_NOP;
289 return dvmCompilerGetDest(cUnit, mir->next, 0);
290 } else {
291 RegLocation res = LOC_DALVIK_RETURN_VAL;
292 res.fp = fpHint;
293 return res;
294 }
295 }
296
297 /*
298 * Search the existing constants in the literal pool for an exact or close match
299 * within specified delta (greater or equal to 0).
300 */
scanLiteralPool(CompilationUnit * cUnit,int value,unsigned int delta)301 static ArmLIR *scanLiteralPool(CompilationUnit *cUnit, int value,
302 unsigned int delta)
303 {
304 LIR *dataTarget = cUnit->wordList;
305 while (dataTarget) {
306 if (((unsigned) (value - ((ArmLIR *) dataTarget)->operands[0])) <=
307 delta)
308 return (ArmLIR *) dataTarget;
309 dataTarget = dataTarget->next;
310 }
311 return NULL;
312 }
313
314 /*
315 * The following are building blocks to insert constants into the pool or
316 * instruction streams.
317 */
318
319 /* Add a 32-bit constant either in the constant pool or mixed with code */
addWordData(CompilationUnit * cUnit,int value,bool inPlace)320 static ArmLIR *addWordData(CompilationUnit *cUnit, int value, bool inPlace)
321 {
322 /* Add the constant to the literal pool */
323 if (!inPlace) {
324 ArmLIR *newValue = dvmCompilerNew(sizeof(ArmLIR), true);
325 newValue->operands[0] = value;
326 newValue->generic.next = cUnit->wordList;
327 cUnit->wordList = (LIR *) newValue;
328 return newValue;
329 } else {
330 /* Add the constant in the middle of code stream */
331 newLIR1(cUnit, kArm16BitData, (value & 0xffff));
332 newLIR1(cUnit, kArm16BitData, (value >> 16));
333 }
334 return NULL;
335 }
336
inlinedTargetWide(CompilationUnit * cUnit,MIR * mir,bool fpHint)337 static RegLocation inlinedTargetWide(CompilationUnit *cUnit, MIR *mir,
338 bool fpHint)
339 {
340 if (mir->next &&
341 (mir->next->dalvikInsn.opCode == OP_MOVE_RESULT_WIDE)) {
342 mir->next->dalvikInsn.opCode = OP_NOP;
343 return dvmCompilerGetDestWide(cUnit, mir->next, 0, 1);
344 } else {
345 RegLocation res = LOC_DALVIK_RETURN_VAL_WIDE;
346 res.fp = fpHint;
347 return res;
348 }
349 }
350
351
352 /*
353 * Generate an kArmPseudoBarrier marker to indicate the boundary of special
354 * blocks.
355 */
genBarrier(CompilationUnit * cUnit)356 static void genBarrier(CompilationUnit *cUnit)
357 {
358 ArmLIR *barrier = newLIR0(cUnit, kArmPseudoBarrier);
359 /* Mark all resources as being clobbered */
360 barrier->defMask = -1;
361 }
362
363 /* Create the PC reconstruction slot if not already done */
genCheckCommon(CompilationUnit * cUnit,int dOffset,ArmLIR * branch,ArmLIR * pcrLabel)364 extern ArmLIR *genCheckCommon(CompilationUnit *cUnit, int dOffset,
365 ArmLIR *branch,
366 ArmLIR *pcrLabel)
367 {
368 /* Forget all def info (because we might rollback here. Bug #2367397 */
369 dvmCompilerResetDefTracking(cUnit);
370
371 /* Set up the place holder to reconstruct this Dalvik PC */
372 if (pcrLabel == NULL) {
373 int dPC = (int) (cUnit->method->insns + dOffset);
374 pcrLabel = dvmCompilerNew(sizeof(ArmLIR), true);
375 pcrLabel->opCode = kArmPseudoPCReconstructionCell;
376 pcrLabel->operands[0] = dPC;
377 pcrLabel->operands[1] = dOffset;
378 /* Insert the place holder to the growable list */
379 dvmInsertGrowableList(&cUnit->pcReconstructionList, pcrLabel);
380 }
381 /* Branch to the PC reconstruction code */
382 branch->generic.target = (LIR *) pcrLabel;
383 return pcrLabel;
384 }
385