1 /*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "Dalvik.h"
18 #include "libdex/DexOpcodes.h"
19 #include "libdex/DexCatch.h"
20 #include "interp/Jit.h"
21 #include "CompilerInternals.h"
22 #include "Dataflow.h"
23
contentIsInsn(const u2 * codePtr)24 static inline bool contentIsInsn(const u2 *codePtr) {
25 u2 instr = *codePtr;
26 Opcode opcode = (Opcode)(instr & 0xff);
27
28 /*
29 * Since the low 8-bit in metadata may look like OP_NOP, we need to check
30 * both the low and whole sub-word to determine whether it is code or data.
31 */
32 return (opcode != OP_NOP || instr == 0);
33 }
34
35 /*
36 * Parse an instruction, return the length of the instruction
37 */
parseInsn(const u2 * codePtr,DecodedInstruction * decInsn,bool printMe)38 static inline int parseInsn(const u2 *codePtr, DecodedInstruction *decInsn,
39 bool printMe)
40 {
41 // Don't parse instruction data
42 if (!contentIsInsn(codePtr)) {
43 return 0;
44 }
45
46 u2 instr = *codePtr;
47 Opcode opcode = dexOpcodeFromCodeUnit(instr);
48
49 dexDecodeInstruction(codePtr, decInsn);
50 if (printMe) {
51 char *decodedString = dvmCompilerGetDalvikDisassembly(decInsn, NULL);
52 ALOGD("%p: %#06x %s", codePtr, opcode, decodedString);
53 }
54 return dexGetWidthFromOpcode(opcode);
55 }
56
57 #define UNKNOWN_TARGET 0xffffffff
58
59 /*
60 * Identify block-ending instructions and collect supplemental information
61 * regarding the following instructions.
62 */
findBlockBoundary(const Method * caller,MIR * insn,unsigned int curOffset,unsigned int * target,bool * isInvoke,const Method ** callee)63 static inline bool findBlockBoundary(const Method *caller, MIR *insn,
64 unsigned int curOffset,
65 unsigned int *target, bool *isInvoke,
66 const Method **callee)
67 {
68 switch (insn->dalvikInsn.opcode) {
69 /* Target is not compile-time constant */
70 case OP_RETURN_VOID:
71 case OP_RETURN:
72 case OP_RETURN_WIDE:
73 case OP_RETURN_OBJECT:
74 case OP_THROW:
75 *target = UNKNOWN_TARGET;
76 break;
77 case OP_INVOKE_VIRTUAL:
78 case OP_INVOKE_VIRTUAL_RANGE:
79 case OP_INVOKE_INTERFACE:
80 case OP_INVOKE_INTERFACE_RANGE:
81 case OP_INVOKE_VIRTUAL_QUICK:
82 case OP_INVOKE_VIRTUAL_QUICK_RANGE:
83 *isInvoke = true;
84 break;
85 case OP_INVOKE_SUPER:
86 case OP_INVOKE_SUPER_RANGE: {
87 int mIndex = caller->clazz->pDvmDex->
88 pResMethods[insn->dalvikInsn.vB]->methodIndex;
89 const Method *calleeMethod =
90 caller->clazz->super->vtable[mIndex];
91
92 if (calleeMethod && !dvmIsNativeMethod(calleeMethod)) {
93 *target = (unsigned int) calleeMethod->insns;
94 }
95 *isInvoke = true;
96 *callee = calleeMethod;
97 break;
98 }
99 case OP_INVOKE_STATIC:
100 case OP_INVOKE_STATIC_RANGE: {
101 const Method *calleeMethod =
102 caller->clazz->pDvmDex->pResMethods[insn->dalvikInsn.vB];
103
104 if (calleeMethod && !dvmIsNativeMethod(calleeMethod)) {
105 *target = (unsigned int) calleeMethod->insns;
106 }
107 *isInvoke = true;
108 *callee = calleeMethod;
109 break;
110 }
111 case OP_INVOKE_SUPER_QUICK:
112 case OP_INVOKE_SUPER_QUICK_RANGE: {
113 const Method *calleeMethod =
114 caller->clazz->super->vtable[insn->dalvikInsn.vB];
115
116 if (calleeMethod && !dvmIsNativeMethod(calleeMethod)) {
117 *target = (unsigned int) calleeMethod->insns;
118 }
119 *isInvoke = true;
120 *callee = calleeMethod;
121 break;
122 }
123 case OP_INVOKE_DIRECT:
124 case OP_INVOKE_DIRECT_RANGE: {
125 const Method *calleeMethod =
126 caller->clazz->pDvmDex->pResMethods[insn->dalvikInsn.vB];
127 if (calleeMethod && !dvmIsNativeMethod(calleeMethod)) {
128 *target = (unsigned int) calleeMethod->insns;
129 }
130 *isInvoke = true;
131 *callee = calleeMethod;
132 break;
133 }
134 case OP_GOTO:
135 case OP_GOTO_16:
136 case OP_GOTO_32:
137 *target = curOffset + (int) insn->dalvikInsn.vA;
138 break;
139
140 case OP_IF_EQ:
141 case OP_IF_NE:
142 case OP_IF_LT:
143 case OP_IF_GE:
144 case OP_IF_GT:
145 case OP_IF_LE:
146 *target = curOffset + (int) insn->dalvikInsn.vC;
147 break;
148
149 case OP_IF_EQZ:
150 case OP_IF_NEZ:
151 case OP_IF_LTZ:
152 case OP_IF_GEZ:
153 case OP_IF_GTZ:
154 case OP_IF_LEZ:
155 *target = curOffset + (int) insn->dalvikInsn.vB;
156 break;
157
158 default:
159 return false;
160 }
161 return true;
162 }
163
isGoto(MIR * insn)164 static inline bool isGoto(MIR *insn)
165 {
166 switch (insn->dalvikInsn.opcode) {
167 case OP_GOTO:
168 case OP_GOTO_16:
169 case OP_GOTO_32:
170 return true;
171 default:
172 return false;
173 }
174 }
175
176 /*
177 * Identify unconditional branch instructions
178 */
isUnconditionalBranch(MIR * insn)179 static inline bool isUnconditionalBranch(MIR *insn)
180 {
181 switch (insn->dalvikInsn.opcode) {
182 case OP_RETURN_VOID:
183 case OP_RETURN:
184 case OP_RETURN_WIDE:
185 case OP_RETURN_OBJECT:
186 return true;
187 default:
188 return isGoto(insn);
189 }
190 }
191
192 /*
193 * dvmHashTableLookup() callback
194 */
compareMethod(const CompilerMethodStats * m1,const CompilerMethodStats * m2)195 static int compareMethod(const CompilerMethodStats *m1,
196 const CompilerMethodStats *m2)
197 {
198 return (int) m1->method - (int) m2->method;
199 }
200
201 /*
202 * Analyze the body of the method to collect high-level information regarding
203 * inlining:
204 * - is empty method?
205 * - is getter/setter?
206 * - can throw exception?
207 *
208 * Currently the inliner only handles getters and setters. When its capability
209 * becomes more sophisticated more information will be retrieved here.
210 */
analyzeInlineTarget(DecodedInstruction * dalvikInsn,int attributes,int offset)211 static int analyzeInlineTarget(DecodedInstruction *dalvikInsn, int attributes,
212 int offset)
213 {
214 int flags = dexGetFlagsFromOpcode(dalvikInsn->opcode);
215 int dalvikOpcode = dalvikInsn->opcode;
216
217 if (flags & kInstrInvoke) {
218 attributes &= ~METHOD_IS_LEAF;
219 }
220
221 if (!(flags & kInstrCanReturn)) {
222 if (!(dvmCompilerDataFlowAttributes[dalvikOpcode] &
223 DF_IS_GETTER)) {
224 attributes &= ~METHOD_IS_GETTER;
225 }
226 if (!(dvmCompilerDataFlowAttributes[dalvikOpcode] &
227 DF_IS_SETTER)) {
228 attributes &= ~METHOD_IS_SETTER;
229 }
230 }
231
232 /*
233 * The expected instruction sequence is setter will never return value and
234 * getter will also do. Clear the bits if the behavior is discovered
235 * otherwise.
236 */
237 if (flags & kInstrCanReturn) {
238 if (dalvikOpcode == OP_RETURN_VOID) {
239 attributes &= ~METHOD_IS_GETTER;
240 }
241 else {
242 attributes &= ~METHOD_IS_SETTER;
243 }
244 }
245
246 if (flags & kInstrCanThrow) {
247 attributes &= ~METHOD_IS_THROW_FREE;
248 }
249
250 if (offset == 0 && dalvikOpcode == OP_RETURN_VOID) {
251 attributes |= METHOD_IS_EMPTY;
252 }
253
254 /*
255 * Check if this opcode is selected for single stepping.
256 * If so, don't inline the callee as there is no stack frame for the
257 * interpreter to single-step through the instruction.
258 */
259 if (SINGLE_STEP_OP(dalvikOpcode)) {
260 attributes &= ~(METHOD_IS_GETTER | METHOD_IS_SETTER);
261 }
262
263 return attributes;
264 }
265
266 /*
267 * Analyze each method whose traces are ever compiled. Collect a variety of
268 * statistics like the ratio of exercised vs overall code and code bloat
269 * ratios. If isCallee is true, also analyze each instruction in more details
270 * to see if it is suitable for inlining.
271 */
dvmCompilerAnalyzeMethodBody(const Method * method,bool isCallee)272 CompilerMethodStats *dvmCompilerAnalyzeMethodBody(const Method *method,
273 bool isCallee)
274 {
275 const DexCode *dexCode = dvmGetMethodCode(method);
276 const u2 *codePtr = dexCode->insns;
277 const u2 *codeEnd = dexCode->insns + dexCode->insnsSize;
278 int insnSize = 0;
279 int hashValue = dvmComputeUtf8Hash(method->name);
280
281 CompilerMethodStats dummyMethodEntry; // For hash table lookup
282 CompilerMethodStats *realMethodEntry; // For hash table storage
283
284 /* For lookup only */
285 dummyMethodEntry.method = method;
286 realMethodEntry = (CompilerMethodStats *)
287 dvmHashTableLookup(gDvmJit.methodStatsTable,
288 hashValue,
289 &dummyMethodEntry,
290 (HashCompareFunc) compareMethod,
291 false);
292
293 /* This method has never been analyzed before - create an entry */
294 if (realMethodEntry == NULL) {
295 realMethodEntry =
296 (CompilerMethodStats *) calloc(1, sizeof(CompilerMethodStats));
297 realMethodEntry->method = method;
298
299 dvmHashTableLookup(gDvmJit.methodStatsTable, hashValue,
300 realMethodEntry,
301 (HashCompareFunc) compareMethod,
302 true);
303 }
304
305 /* This method is invoked as a callee and has been analyzed - just return */
306 if ((isCallee == true) && (realMethodEntry->attributes & METHOD_IS_CALLEE))
307 return realMethodEntry;
308
309 /*
310 * Similarly, return if this method has been compiled before as a hot
311 * method already.
312 */
313 if ((isCallee == false) &&
314 (realMethodEntry->attributes & METHOD_IS_HOT))
315 return realMethodEntry;
316
317 int attributes;
318
319 /* Method hasn't been analyzed for the desired purpose yet */
320 if (isCallee) {
321 /* Aggressively set the attributes until proven otherwise */
322 attributes = METHOD_IS_LEAF | METHOD_IS_THROW_FREE | METHOD_IS_CALLEE |
323 METHOD_IS_GETTER | METHOD_IS_SETTER;
324 } else {
325 attributes = METHOD_IS_HOT;
326 }
327
328 /* Count the number of instructions */
329 while (codePtr < codeEnd) {
330 DecodedInstruction dalvikInsn;
331 int width = parseInsn(codePtr, &dalvikInsn, false);
332
333 /* Terminate when the data section is seen */
334 if (width == 0)
335 break;
336
337 if (isCallee) {
338 attributes = analyzeInlineTarget(&dalvikInsn, attributes, insnSize);
339 }
340
341 insnSize += width;
342 codePtr += width;
343 }
344
345 /*
346 * Only handle simple getters/setters with one instruction followed by
347 * return
348 */
349 if ((attributes & (METHOD_IS_GETTER | METHOD_IS_SETTER)) &&
350 (insnSize != 3)) {
351 attributes &= ~(METHOD_IS_GETTER | METHOD_IS_SETTER);
352 }
353
354 realMethodEntry->dalvikSize = insnSize * 2;
355 realMethodEntry->attributes |= attributes;
356
357 #if 0
358 /* Uncomment the following to explore various callee patterns */
359 if (attributes & METHOD_IS_THROW_FREE) {
360 ALOGE("%s%s is inlinable%s", method->clazz->descriptor, method->name,
361 (attributes & METHOD_IS_EMPTY) ? " empty" : "");
362 }
363
364 if (attributes & METHOD_IS_LEAF) {
365 ALOGE("%s%s is leaf %d%s", method->clazz->descriptor, method->name,
366 insnSize, insnSize < 5 ? " (small)" : "");
367 }
368
369 if (attributes & (METHOD_IS_GETTER | METHOD_IS_SETTER)) {
370 ALOGE("%s%s is %s", method->clazz->descriptor, method->name,
371 attributes & METHOD_IS_GETTER ? "getter": "setter");
372 }
373 if (attributes ==
374 (METHOD_IS_LEAF | METHOD_IS_THROW_FREE | METHOD_IS_CALLEE)) {
375 ALOGE("%s%s is inlinable non setter/getter", method->clazz->descriptor,
376 method->name);
377 }
378 #endif
379
380 return realMethodEntry;
381 }
382
383 /*
384 * Crawl the stack of the thread that requesed compilation to see if any of the
385 * ancestors are on the blacklist.
386 */
filterMethodByCallGraph(Thread * thread,const char * curMethodName)387 static bool filterMethodByCallGraph(Thread *thread, const char *curMethodName)
388 {
389 /* Crawl the Dalvik stack frames and compare the method name*/
390 StackSaveArea *ssaPtr = ((StackSaveArea *) thread->interpSave.curFrame) - 1;
391 while (ssaPtr != ((StackSaveArea *) NULL) - 1) {
392 const Method *method = ssaPtr->method;
393 if (method) {
394 int hashValue = dvmComputeUtf8Hash(method->name);
395 bool found =
396 dvmHashTableLookup(gDvmJit.methodTable, hashValue,
397 (char *) method->name,
398 (HashCompareFunc) strcmp, false) !=
399 NULL;
400 if (found) {
401 ALOGD("Method %s (--> %s) found on the JIT %s list",
402 method->name, curMethodName,
403 gDvmJit.includeSelectedMethod ? "white" : "black");
404 return true;
405 }
406
407 }
408 ssaPtr = ((StackSaveArea *) ssaPtr->prevFrame) - 1;
409 };
410 return false;
411 }
412
413 /*
414 * Since we are including instructions from possibly a cold method into the
415 * current trace, we need to make sure that all the associated information
416 * with the callee is properly initialized. If not, we punt on this inline
417 * target.
418 *
419 * TODO: volatile instructions will be handled later.
420 */
dvmCompilerCanIncludeThisInstruction(const Method * method,const DecodedInstruction * insn)421 bool dvmCompilerCanIncludeThisInstruction(const Method *method,
422 const DecodedInstruction *insn)
423 {
424 switch (insn->opcode) {
425 case OP_NEW_INSTANCE:
426 case OP_CHECK_CAST: {
427 ClassObject *classPtr = (ClassObject *)(void*)
428 (method->clazz->pDvmDex->pResClasses[insn->vB]);
429
430 /* Class hasn't been initialized yet */
431 if (classPtr == NULL) {
432 return false;
433 }
434 return true;
435 }
436 case OP_SGET:
437 case OP_SGET_WIDE:
438 case OP_SGET_OBJECT:
439 case OP_SGET_BOOLEAN:
440 case OP_SGET_BYTE:
441 case OP_SGET_CHAR:
442 case OP_SGET_SHORT:
443 case OP_SPUT:
444 case OP_SPUT_WIDE:
445 case OP_SPUT_OBJECT:
446 case OP_SPUT_BOOLEAN:
447 case OP_SPUT_BYTE:
448 case OP_SPUT_CHAR:
449 case OP_SPUT_SHORT: {
450 void *fieldPtr = (void*)
451 (method->clazz->pDvmDex->pResFields[insn->vB]);
452
453 if (fieldPtr == NULL) {
454 return false;
455 }
456 return true;
457 }
458 case OP_INVOKE_SUPER:
459 case OP_INVOKE_SUPER_RANGE: {
460 int mIndex = method->clazz->pDvmDex->
461 pResMethods[insn->vB]->methodIndex;
462 const Method *calleeMethod = method->clazz->super->vtable[mIndex];
463 if (calleeMethod == NULL) {
464 return false;
465 }
466 return true;
467 }
468 case OP_INVOKE_SUPER_QUICK:
469 case OP_INVOKE_SUPER_QUICK_RANGE: {
470 const Method *calleeMethod = method->clazz->super->vtable[insn->vB];
471 if (calleeMethod == NULL) {
472 return false;
473 }
474 return true;
475 }
476 case OP_INVOKE_STATIC:
477 case OP_INVOKE_STATIC_RANGE:
478 case OP_INVOKE_DIRECT:
479 case OP_INVOKE_DIRECT_RANGE: {
480 const Method *calleeMethod =
481 method->clazz->pDvmDex->pResMethods[insn->vB];
482 if (calleeMethod == NULL) {
483 return false;
484 }
485 return true;
486 }
487 case OP_CONST_CLASS: {
488 void *classPtr = (void*)
489 (method->clazz->pDvmDex->pResClasses[insn->vB]);
490
491 if (classPtr == NULL) {
492 return false;
493 }
494 return true;
495 }
496 case OP_CONST_STRING_JUMBO:
497 case OP_CONST_STRING: {
498 void *strPtr = (void*)
499 (method->clazz->pDvmDex->pResStrings[insn->vB]);
500
501 if (strPtr == NULL) {
502 return false;
503 }
504 return true;
505 }
506 default:
507 return true;
508 }
509 }
510
511 /* Split an existing block from the specified code offset into two */
splitBlock(CompilationUnit * cUnit,unsigned int codeOffset,BasicBlock * origBlock,BasicBlock ** immedPredBlockP)512 static BasicBlock *splitBlock(CompilationUnit *cUnit,
513 unsigned int codeOffset,
514 BasicBlock *origBlock,
515 BasicBlock **immedPredBlockP)
516 {
517 MIR *insn = origBlock->firstMIRInsn;
518 while (insn) {
519 if (insn->offset == codeOffset) break;
520 insn = insn->next;
521 }
522 if (insn == NULL) {
523 ALOGE("Break split failed");
524 dvmAbort();
525 }
526 BasicBlock *bottomBlock = dvmCompilerNewBB(kDalvikByteCode,
527 cUnit->numBlocks++);
528 dvmInsertGrowableList(&cUnit->blockList, (intptr_t) bottomBlock);
529
530 bottomBlock->startOffset = codeOffset;
531 bottomBlock->firstMIRInsn = insn;
532 bottomBlock->lastMIRInsn = origBlock->lastMIRInsn;
533
534 /* Handle the taken path */
535 bottomBlock->taken = origBlock->taken;
536 if (bottomBlock->taken) {
537 origBlock->taken = NULL;
538 dvmCompilerClearBit(bottomBlock->taken->predecessors, origBlock->id);
539 dvmCompilerSetBit(bottomBlock->taken->predecessors, bottomBlock->id);
540 }
541
542 /* Handle the fallthrough path */
543 bottomBlock->needFallThroughBranch = origBlock->needFallThroughBranch;
544 bottomBlock->fallThrough = origBlock->fallThrough;
545 origBlock->fallThrough = bottomBlock;
546 origBlock->needFallThroughBranch = true;
547 dvmCompilerSetBit(bottomBlock->predecessors, origBlock->id);
548 if (bottomBlock->fallThrough) {
549 dvmCompilerClearBit(bottomBlock->fallThrough->predecessors,
550 origBlock->id);
551 dvmCompilerSetBit(bottomBlock->fallThrough->predecessors,
552 bottomBlock->id);
553 }
554
555 /* Handle the successor list */
556 if (origBlock->successorBlockList.blockListType != kNotUsed) {
557 bottomBlock->successorBlockList = origBlock->successorBlockList;
558 origBlock->successorBlockList.blockListType = kNotUsed;
559 GrowableListIterator iterator;
560
561 dvmGrowableListIteratorInit(&bottomBlock->successorBlockList.blocks,
562 &iterator);
563 while (true) {
564 SuccessorBlockInfo *successorBlockInfo =
565 (SuccessorBlockInfo *) dvmGrowableListIteratorNext(&iterator);
566 if (successorBlockInfo == NULL) break;
567 BasicBlock *bb = successorBlockInfo->block;
568 dvmCompilerClearBit(bb->predecessors, origBlock->id);
569 dvmCompilerSetBit(bb->predecessors, bottomBlock->id);
570 }
571 }
572
573 origBlock->lastMIRInsn = insn->prev;
574
575 insn->prev->next = NULL;
576 insn->prev = NULL;
577
578 /*
579 * Update the immediate predecessor block pointer so that outgoing edges
580 * can be applied to the proper block.
581 */
582 if (immedPredBlockP) {
583 assert(*immedPredBlockP == origBlock);
584 *immedPredBlockP = bottomBlock;
585 }
586 return bottomBlock;
587 }
588
589 /*
590 * Given a code offset, find out the block that starts with it. If the offset
591 * is in the middle of an existing block, split it into two. If immedPredBlockP
592 * is non-null and is the block being split, update *immedPredBlockP to point
593 * to the bottom block so that outgoing edges can be setup properly (by the
594 * caller).
595 */
findBlock(CompilationUnit * cUnit,unsigned int codeOffset,bool split,bool create,BasicBlock ** immedPredBlockP)596 static BasicBlock *findBlock(CompilationUnit *cUnit,
597 unsigned int codeOffset,
598 bool split, bool create,
599 BasicBlock **immedPredBlockP)
600 {
601 GrowableList *blockList = &cUnit->blockList;
602 BasicBlock *bb;
603 unsigned int i;
604
605 for (i = 0; i < blockList->numUsed; i++) {
606 bb = (BasicBlock *) blockList->elemList[i];
607 if (bb->blockType != kDalvikByteCode) continue;
608 if (bb->startOffset == codeOffset) return bb;
609 /* Check if a branch jumps into the middle of an existing block */
610 if ((split == true) && (codeOffset > bb->startOffset) &&
611 (bb->lastMIRInsn != NULL) &&
612 (codeOffset <= bb->lastMIRInsn->offset)) {
613 BasicBlock *newBB = splitBlock(cUnit, codeOffset, bb,
614 bb == *immedPredBlockP ?
615 immedPredBlockP : NULL);
616 return newBB;
617 }
618 }
619 if (create) {
620 bb = dvmCompilerNewBB(kDalvikByteCode, cUnit->numBlocks++);
621 dvmInsertGrowableList(&cUnit->blockList, (intptr_t) bb);
622 bb->startOffset = codeOffset;
623 return bb;
624 }
625 return NULL;
626 }
627
628 /* Dump the CFG into a DOT graph */
dvmDumpCFG(CompilationUnit * cUnit,const char * dirPrefix)629 void dvmDumpCFG(CompilationUnit *cUnit, const char *dirPrefix)
630 {
631 const Method *method = cUnit->method;
632 FILE *file;
633 char *signature = dexProtoCopyMethodDescriptor(&method->prototype);
634 char startOffset[80];
635 sprintf(startOffset, "_%x", cUnit->entryBlock->fallThrough->startOffset);
636 char *fileName = (char *) dvmCompilerNew(
637 strlen(dirPrefix) +
638 strlen(method->clazz->descriptor) +
639 strlen(method->name) +
640 strlen(signature) +
641 strlen(startOffset) +
642 strlen(".dot") + 1, true);
643 sprintf(fileName, "%s%s%s%s%s.dot", dirPrefix,
644 method->clazz->descriptor, method->name, signature, startOffset);
645 free(signature);
646
647 /*
648 * Convert the special characters into a filesystem- and shell-friendly
649 * format.
650 */
651 int i;
652 for (i = strlen(dirPrefix); fileName[i]; i++) {
653 if (fileName[i] == '/') {
654 fileName[i] = '_';
655 } else if (fileName[i] == ';') {
656 fileName[i] = '#';
657 } else if (fileName[i] == '$') {
658 fileName[i] = '+';
659 } else if (fileName[i] == '(' || fileName[i] == ')') {
660 fileName[i] = '@';
661 } else if (fileName[i] == '<' || fileName[i] == '>') {
662 fileName[i] = '=';
663 }
664 }
665 file = fopen(fileName, "w");
666 if (file == NULL) {
667 return;
668 }
669 fprintf(file, "digraph G {\n");
670
671 fprintf(file, " rankdir=TB\n");
672
673 int numReachableBlocks = cUnit->numReachableBlocks;
674 int idx;
675 const GrowableList *blockList = &cUnit->blockList;
676
677 for (idx = 0; idx < numReachableBlocks; idx++) {
678 int blockIdx = cUnit->dfsOrder.elemList[idx];
679 BasicBlock *bb = (BasicBlock *) dvmGrowableListGetElement(blockList,
680 blockIdx);
681 if (bb == NULL) break;
682 if (bb->blockType == kEntryBlock) {
683 fprintf(file, " entry [shape=Mdiamond];\n");
684 } else if (bb->blockType == kExitBlock) {
685 fprintf(file, " exit [shape=Mdiamond];\n");
686 } else if (bb->blockType == kDalvikByteCode) {
687 fprintf(file, " block%04x [shape=record,label = \"{ \\\n",
688 bb->startOffset);
689 const MIR *mir;
690 fprintf(file, " {block id %d\\l}%s\\\n", bb->id,
691 bb->firstMIRInsn ? " | " : " ");
692 for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
693 fprintf(file, " {%04x %s\\l}%s\\\n", mir->offset,
694 mir->ssaRep ?
695 dvmCompilerFullDisassembler(cUnit, mir) :
696 dexGetOpcodeName(mir->dalvikInsn.opcode),
697 mir->next ? " | " : " ");
698 }
699 fprintf(file, " }\"];\n\n");
700 } else if (bb->blockType == kExceptionHandling) {
701 char blockName[BLOCK_NAME_LEN];
702
703 dvmGetBlockName(bb, blockName);
704 fprintf(file, " %s [shape=invhouse];\n", blockName);
705 }
706
707 char blockName1[BLOCK_NAME_LEN], blockName2[BLOCK_NAME_LEN];
708
709 if (bb->taken) {
710 dvmGetBlockName(bb, blockName1);
711 dvmGetBlockName(bb->taken, blockName2);
712 fprintf(file, " %s:s -> %s:n [style=dotted]\n",
713 blockName1, blockName2);
714 }
715 if (bb->fallThrough) {
716 dvmGetBlockName(bb, blockName1);
717 dvmGetBlockName(bb->fallThrough, blockName2);
718 fprintf(file, " %s:s -> %s:n\n", blockName1, blockName2);
719 }
720
721 if (bb->successorBlockList.blockListType != kNotUsed) {
722 fprintf(file, " succ%04x [shape=%s,label = \"{ \\\n",
723 bb->startOffset,
724 (bb->successorBlockList.blockListType == kCatch) ?
725 "Mrecord" : "record");
726 GrowableListIterator iterator;
727 dvmGrowableListIteratorInit(&bb->successorBlockList.blocks,
728 &iterator);
729 SuccessorBlockInfo *successorBlockInfo =
730 (SuccessorBlockInfo *) dvmGrowableListIteratorNext(&iterator);
731
732 int succId = 0;
733 while (true) {
734 if (successorBlockInfo == NULL) break;
735
736 BasicBlock *destBlock = successorBlockInfo->block;
737 SuccessorBlockInfo *nextSuccessorBlockInfo =
738 (SuccessorBlockInfo *) dvmGrowableListIteratorNext(&iterator);
739
740 fprintf(file, " {<f%d> %04x: %04x\\l}%s\\\n",
741 succId++,
742 successorBlockInfo->key,
743 destBlock->startOffset,
744 (nextSuccessorBlockInfo != NULL) ? " | " : " ");
745
746 successorBlockInfo = nextSuccessorBlockInfo;
747 }
748 fprintf(file, " }\"];\n\n");
749
750 dvmGetBlockName(bb, blockName1);
751 fprintf(file, " %s:s -> succ%04x:n [style=dashed]\n",
752 blockName1, bb->startOffset);
753
754 if (bb->successorBlockList.blockListType == kPackedSwitch ||
755 bb->successorBlockList.blockListType == kSparseSwitch) {
756
757 dvmGrowableListIteratorInit(&bb->successorBlockList.blocks,
758 &iterator);
759
760 succId = 0;
761 while (true) {
762 SuccessorBlockInfo *successorBlockInfo =
763 (SuccessorBlockInfo *)
764 dvmGrowableListIteratorNext(&iterator);
765 if (successorBlockInfo == NULL) break;
766
767 BasicBlock *destBlock = successorBlockInfo->block;
768
769 dvmGetBlockName(destBlock, blockName2);
770 fprintf(file, " succ%04x:f%d:e -> %s:n\n",
771 bb->startOffset, succId++,
772 blockName2);
773 }
774 }
775 }
776 fprintf(file, "\n");
777
778 /*
779 * If we need to debug the dominator tree, uncomment the following code
780 */
781 #if 1
782 dvmGetBlockName(bb, blockName1);
783 fprintf(file, " cfg%s [label=\"%s\", shape=none];\n",
784 blockName1, blockName1);
785 if (bb->iDom) {
786 dvmGetBlockName(bb->iDom, blockName2);
787 fprintf(file, " cfg%s:s -> cfg%s:n\n\n",
788 blockName2, blockName1);
789 }
790 #endif
791 }
792 fprintf(file, "}\n");
793 fclose(file);
794 }
795
796 /* Verify if all the successor is connected with all the claimed predecessors */
verifyPredInfo(CompilationUnit * cUnit,BasicBlock * bb)797 static bool verifyPredInfo(CompilationUnit *cUnit, BasicBlock *bb)
798 {
799 BitVectorIterator bvIterator;
800
801 dvmBitVectorIteratorInit(bb->predecessors, &bvIterator);
802 while (true) {
803 int blockIdx = dvmBitVectorIteratorNext(&bvIterator);
804 if (blockIdx == -1) break;
805 BasicBlock *predBB = (BasicBlock *)
806 dvmGrowableListGetElement(&cUnit->blockList, blockIdx);
807 bool found = false;
808 if (predBB->taken == bb) {
809 found = true;
810 } else if (predBB->fallThrough == bb) {
811 found = true;
812 } else if (predBB->successorBlockList.blockListType != kNotUsed) {
813 GrowableListIterator iterator;
814 dvmGrowableListIteratorInit(&predBB->successorBlockList.blocks,
815 &iterator);
816 while (true) {
817 SuccessorBlockInfo *successorBlockInfo =
818 (SuccessorBlockInfo *)
819 dvmGrowableListIteratorNext(&iterator);
820 if (successorBlockInfo == NULL) break;
821 BasicBlock *succBB = successorBlockInfo->block;
822 if (succBB == bb) {
823 found = true;
824 break;
825 }
826 }
827 }
828 if (found == false) {
829 char blockName1[BLOCK_NAME_LEN], blockName2[BLOCK_NAME_LEN];
830 dvmGetBlockName(bb, blockName1);
831 dvmGetBlockName(predBB, blockName2);
832 dvmDumpCFG(cUnit, "/sdcard/cfg/");
833 ALOGE("Successor %s not found from %s",
834 blockName1, blockName2);
835 dvmAbort();
836 }
837 }
838 return true;
839 }
840
841 /* Identify code range in try blocks and set up the empty catch blocks */
processTryCatchBlocks(CompilationUnit * cUnit)842 static void processTryCatchBlocks(CompilationUnit *cUnit)
843 {
844 const Method *meth = cUnit->method;
845 const DexCode *pCode = dvmGetMethodCode(meth);
846 int triesSize = pCode->triesSize;
847 int i;
848 int offset;
849
850 if (triesSize == 0) {
851 return;
852 }
853
854 const DexTry *pTries = dexGetTries(pCode);
855 BitVector *tryBlockAddr = cUnit->tryBlockAddr;
856
857 /* Mark all the insn offsets in Try blocks */
858 for (i = 0; i < triesSize; i++) {
859 const DexTry* pTry = &pTries[i];
860 /* all in 16-bit units */
861 int startOffset = pTry->startAddr;
862 int endOffset = startOffset + pTry->insnCount;
863
864 for (offset = startOffset; offset < endOffset; offset++) {
865 dvmCompilerSetBit(tryBlockAddr, offset);
866 }
867 }
868
869 /* Iterate over each of the handlers to enqueue the empty Catch blocks */
870 offset = dexGetFirstHandlerOffset(pCode);
871 int handlersSize = dexGetHandlersSize(pCode);
872
873 for (i = 0; i < handlersSize; i++) {
874 DexCatchIterator iterator;
875 dexCatchIteratorInit(&iterator, pCode, offset);
876
877 for (;;) {
878 DexCatchHandler* handler = dexCatchIteratorNext(&iterator);
879
880 if (handler == NULL) {
881 break;
882 }
883
884 /*
885 * Create dummy catch blocks first. Since these are created before
886 * other blocks are processed, "split" is specified as false.
887 */
888 findBlock(cUnit, handler->address,
889 /* split */
890 false,
891 /* create */
892 true,
893 /* immedPredBlockP */
894 NULL);
895 }
896
897 offset = dexCatchIteratorGetEndOffset(&iterator, pCode);
898 }
899 }
900
901 /* Process instructions with the kInstrCanBranch flag */
processCanBranch(CompilationUnit * cUnit,BasicBlock * curBlock,MIR * insn,int curOffset,int width,int flags,const u2 * codePtr,const u2 * codeEnd)902 static void processCanBranch(CompilationUnit *cUnit, BasicBlock *curBlock,
903 MIR *insn, int curOffset, int width, int flags,
904 const u2* codePtr, const u2* codeEnd)
905 {
906 int target = curOffset;
907 switch (insn->dalvikInsn.opcode) {
908 case OP_GOTO:
909 case OP_GOTO_16:
910 case OP_GOTO_32:
911 target += (int) insn->dalvikInsn.vA;
912 break;
913 case OP_IF_EQ:
914 case OP_IF_NE:
915 case OP_IF_LT:
916 case OP_IF_GE:
917 case OP_IF_GT:
918 case OP_IF_LE:
919 target += (int) insn->dalvikInsn.vC;
920 break;
921 case OP_IF_EQZ:
922 case OP_IF_NEZ:
923 case OP_IF_LTZ:
924 case OP_IF_GEZ:
925 case OP_IF_GTZ:
926 case OP_IF_LEZ:
927 target += (int) insn->dalvikInsn.vB;
928 break;
929 default:
930 ALOGE("Unexpected opcode(%d) with kInstrCanBranch set",
931 insn->dalvikInsn.opcode);
932 dvmAbort();
933 }
934 BasicBlock *takenBlock = findBlock(cUnit, target,
935 /* split */
936 true,
937 /* create */
938 true,
939 /* immedPredBlockP */
940 &curBlock);
941 curBlock->taken = takenBlock;
942 dvmCompilerSetBit(takenBlock->predecessors, curBlock->id);
943
944 /* Always terminate the current block for conditional branches */
945 if (flags & kInstrCanContinue) {
946 BasicBlock *fallthroughBlock = findBlock(cUnit,
947 curOffset + width,
948 /*
949 * If the method is processed
950 * in sequential order from the
951 * beginning, we don't need to
952 * specify split for continue
953 * blocks. However, this
954 * routine can be called by
955 * compileLoop, which starts
956 * parsing the method from an
957 * arbitrary address in the
958 * method body.
959 */
960 true,
961 /* create */
962 true,
963 /* immedPredBlockP */
964 &curBlock);
965 curBlock->fallThrough = fallthroughBlock;
966 dvmCompilerSetBit(fallthroughBlock->predecessors, curBlock->id);
967 } else if (codePtr < codeEnd) {
968 /* Create a fallthrough block for real instructions (incl. OP_NOP) */
969 if (contentIsInsn(codePtr)) {
970 findBlock(cUnit, curOffset + width,
971 /* split */
972 false,
973 /* create */
974 true,
975 /* immedPredBlockP */
976 NULL);
977 }
978 }
979 }
980
981 /* Process instructions with the kInstrCanSwitch flag */
processCanSwitch(CompilationUnit * cUnit,BasicBlock * curBlock,MIR * insn,int curOffset,int width,int flags)982 static void processCanSwitch(CompilationUnit *cUnit, BasicBlock *curBlock,
983 MIR *insn, int curOffset, int width, int flags)
984 {
985 u2 *switchData= (u2 *) (cUnit->method->insns + curOffset +
986 insn->dalvikInsn.vB);
987 int size;
988 int *keyTable;
989 int *targetTable;
990 int i;
991 int firstKey;
992
993 /*
994 * Packed switch data format:
995 * ushort ident = 0x0100 magic value
996 * ushort size number of entries in the table
997 * int first_key first (and lowest) switch case value
998 * int targets[size] branch targets, relative to switch opcode
999 *
1000 * Total size is (4+size*2) 16-bit code units.
1001 */
1002 if (insn->dalvikInsn.opcode == OP_PACKED_SWITCH) {
1003 assert(switchData[0] == kPackedSwitchSignature);
1004 size = switchData[1];
1005 firstKey = switchData[2] | (switchData[3] << 16);
1006 targetTable = (int *) &switchData[4];
1007 keyTable = NULL; // Make the compiler happy
1008 /*
1009 * Sparse switch data format:
1010 * ushort ident = 0x0200 magic value
1011 * ushort size number of entries in the table; > 0
1012 * int keys[size] keys, sorted low-to-high; 32-bit aligned
1013 * int targets[size] branch targets, relative to switch opcode
1014 *
1015 * Total size is (2+size*4) 16-bit code units.
1016 */
1017 } else {
1018 assert(switchData[0] == kSparseSwitchSignature);
1019 size = switchData[1];
1020 keyTable = (int *) &switchData[2];
1021 targetTable = (int *) &switchData[2 + size*2];
1022 firstKey = 0; // To make the compiler happy
1023 }
1024
1025 if (curBlock->successorBlockList.blockListType != kNotUsed) {
1026 ALOGE("Successor block list already in use: %d",
1027 curBlock->successorBlockList.blockListType);
1028 dvmAbort();
1029 }
1030 curBlock->successorBlockList.blockListType =
1031 (insn->dalvikInsn.opcode == OP_PACKED_SWITCH) ?
1032 kPackedSwitch : kSparseSwitch;
1033 dvmInitGrowableList(&curBlock->successorBlockList.blocks, size);
1034
1035 for (i = 0; i < size; i++) {
1036 BasicBlock *caseBlock = findBlock(cUnit, curOffset + targetTable[i],
1037 /* split */
1038 true,
1039 /* create */
1040 true,
1041 /* immedPredBlockP */
1042 &curBlock);
1043 SuccessorBlockInfo *successorBlockInfo =
1044 (SuccessorBlockInfo *) dvmCompilerNew(sizeof(SuccessorBlockInfo),
1045 false);
1046 successorBlockInfo->block = caseBlock;
1047 successorBlockInfo->key = (insn->dalvikInsn.opcode == OP_PACKED_SWITCH)?
1048 firstKey + i : keyTable[i];
1049 dvmInsertGrowableList(&curBlock->successorBlockList.blocks,
1050 (intptr_t) successorBlockInfo);
1051 dvmCompilerSetBit(caseBlock->predecessors, curBlock->id);
1052 }
1053
1054 /* Fall-through case */
1055 BasicBlock *fallthroughBlock = findBlock(cUnit,
1056 curOffset + width,
1057 /* split */
1058 false,
1059 /* create */
1060 true,
1061 /* immedPredBlockP */
1062 NULL);
1063 curBlock->fallThrough = fallthroughBlock;
1064 dvmCompilerSetBit(fallthroughBlock->predecessors, curBlock->id);
1065 }
1066
1067 /* Process instructions with the kInstrCanThrow flag */
processCanThrow(CompilationUnit * cUnit,BasicBlock * curBlock,MIR * insn,int curOffset,int width,int flags,BitVector * tryBlockAddr,const u2 * codePtr,const u2 * codeEnd)1068 static void processCanThrow(CompilationUnit *cUnit, BasicBlock *curBlock,
1069 MIR *insn, int curOffset, int width, int flags,
1070 BitVector *tryBlockAddr, const u2 *codePtr,
1071 const u2* codeEnd)
1072 {
1073 const Method *method = cUnit->method;
1074 const DexCode *dexCode = dvmGetMethodCode(method);
1075
1076 /* In try block */
1077 if (dvmIsBitSet(tryBlockAddr, curOffset)) {
1078 DexCatchIterator iterator;
1079
1080 if (!dexFindCatchHandler(&iterator, dexCode, curOffset)) {
1081 ALOGE("Catch block not found in dexfile for insn %x in %s",
1082 curOffset, method->name);
1083 dvmAbort();
1084
1085 }
1086 if (curBlock->successorBlockList.blockListType != kNotUsed) {
1087 ALOGE("Successor block list already in use: %d",
1088 curBlock->successorBlockList.blockListType);
1089 dvmAbort();
1090 }
1091 curBlock->successorBlockList.blockListType = kCatch;
1092 dvmInitGrowableList(&curBlock->successorBlockList.blocks, 2);
1093
1094 for (;;) {
1095 DexCatchHandler* handler = dexCatchIteratorNext(&iterator);
1096
1097 if (handler == NULL) {
1098 break;
1099 }
1100
1101 BasicBlock *catchBlock = findBlock(cUnit, handler->address,
1102 /* split */
1103 false,
1104 /* create */
1105 false,
1106 /* immedPredBlockP */
1107 NULL);
1108
1109 SuccessorBlockInfo *successorBlockInfo =
1110 (SuccessorBlockInfo *) dvmCompilerNew(sizeof(SuccessorBlockInfo),
1111 false);
1112 successorBlockInfo->block = catchBlock;
1113 successorBlockInfo->key = handler->typeIdx;
1114 dvmInsertGrowableList(&curBlock->successorBlockList.blocks,
1115 (intptr_t) successorBlockInfo);
1116 dvmCompilerSetBit(catchBlock->predecessors, curBlock->id);
1117 }
1118 } else {
1119 BasicBlock *ehBlock = dvmCompilerNewBB(kExceptionHandling,
1120 cUnit->numBlocks++);
1121 curBlock->taken = ehBlock;
1122 dvmInsertGrowableList(&cUnit->blockList, (intptr_t) ehBlock);
1123 ehBlock->startOffset = curOffset;
1124 dvmCompilerSetBit(ehBlock->predecessors, curBlock->id);
1125 }
1126
1127 /*
1128 * Force the current block to terminate.
1129 *
1130 * Data may be present before codeEnd, so we need to parse it to know
1131 * whether it is code or data.
1132 */
1133 if (codePtr < codeEnd) {
1134 /* Create a fallthrough block for real instructions (incl. OP_NOP) */
1135 if (contentIsInsn(codePtr)) {
1136 BasicBlock *fallthroughBlock = findBlock(cUnit,
1137 curOffset + width,
1138 /* split */
1139 false,
1140 /* create */
1141 true,
1142 /* immedPredBlockP */
1143 NULL);
1144 /*
1145 * OP_THROW and OP_THROW_VERIFICATION_ERROR are unconditional
1146 * branches.
1147 */
1148 if (insn->dalvikInsn.opcode != OP_THROW_VERIFICATION_ERROR &&
1149 insn->dalvikInsn.opcode != OP_THROW) {
1150 curBlock->fallThrough = fallthroughBlock;
1151 dvmCompilerSetBit(fallthroughBlock->predecessors, curBlock->id);
1152 }
1153 }
1154 }
1155 }
1156
1157 /*
1158 * Similar to dvmCompileTrace, but the entity processed here is the whole
1159 * method.
1160 *
1161 * TODO: implementation will be revisited when the trace builder can provide
1162 * whole-method traces.
1163 */
dvmCompileMethod(const Method * method,JitTranslationInfo * info)1164 bool dvmCompileMethod(const Method *method, JitTranslationInfo *info)
1165 {
1166 CompilationUnit cUnit;
1167 const DexCode *dexCode = dvmGetMethodCode(method);
1168 const u2 *codePtr = dexCode->insns;
1169 const u2 *codeEnd = dexCode->insns + dexCode->insnsSize;
1170 int numBlocks = 0;
1171 unsigned int curOffset = 0;
1172
1173 /* Method already compiled */
1174 if (dvmJitGetMethodAddr(codePtr)) {
1175 info->codeAddress = NULL;
1176 return false;
1177 }
1178
1179 memset(&cUnit, 0, sizeof(cUnit));
1180 cUnit.method = method;
1181
1182 cUnit.jitMode = kJitMethod;
1183
1184 /* Initialize the block list */
1185 dvmInitGrowableList(&cUnit.blockList, 4);
1186
1187 /*
1188 * FIXME - PC reconstruction list won't be needed after the codegen routines
1189 * are enhanced to true method mode.
1190 */
1191 /* Initialize the PC reconstruction list */
1192 dvmInitGrowableList(&cUnit.pcReconstructionList, 8);
1193
1194 /* Allocate the bit-vector to track the beginning of basic blocks */
1195 BitVector *tryBlockAddr = dvmCompilerAllocBitVector(dexCode->insnsSize,
1196 true /* expandable */);
1197 cUnit.tryBlockAddr = tryBlockAddr;
1198
1199 /* Create the default entry and exit blocks and enter them to the list */
1200 BasicBlock *entryBlock = dvmCompilerNewBB(kEntryBlock, numBlocks++);
1201 BasicBlock *exitBlock = dvmCompilerNewBB(kExitBlock, numBlocks++);
1202
1203 cUnit.entryBlock = entryBlock;
1204 cUnit.exitBlock = exitBlock;
1205
1206 dvmInsertGrowableList(&cUnit.blockList, (intptr_t) entryBlock);
1207 dvmInsertGrowableList(&cUnit.blockList, (intptr_t) exitBlock);
1208
1209 /* Current block to record parsed instructions */
1210 BasicBlock *curBlock = dvmCompilerNewBB(kDalvikByteCode, numBlocks++);
1211 curBlock->startOffset = 0;
1212 dvmInsertGrowableList(&cUnit.blockList, (intptr_t) curBlock);
1213 entryBlock->fallThrough = curBlock;
1214 dvmCompilerSetBit(curBlock->predecessors, entryBlock->id);
1215
1216 /*
1217 * Store back the number of blocks since new blocks may be created of
1218 * accessing cUnit.
1219 */
1220 cUnit.numBlocks = numBlocks;
1221
1222 /* Identify code range in try blocks and set up the empty catch blocks */
1223 processTryCatchBlocks(&cUnit);
1224
1225 /* Parse all instructions and put them into containing basic blocks */
1226 while (codePtr < codeEnd) {
1227 MIR *insn = (MIR *) dvmCompilerNew(sizeof(MIR), true);
1228 insn->offset = curOffset;
1229 int width = parseInsn(codePtr, &insn->dalvikInsn, false);
1230 insn->width = width;
1231
1232 /* Terminate when the data section is seen */
1233 if (width == 0)
1234 break;
1235
1236 dvmCompilerAppendMIR(curBlock, insn);
1237
1238 codePtr += width;
1239 int flags = dexGetFlagsFromOpcode(insn->dalvikInsn.opcode);
1240
1241 if (flags & kInstrCanBranch) {
1242 processCanBranch(&cUnit, curBlock, insn, curOffset, width, flags,
1243 codePtr, codeEnd);
1244 } else if (flags & kInstrCanReturn) {
1245 curBlock->fallThrough = exitBlock;
1246 dvmCompilerSetBit(exitBlock->predecessors, curBlock->id);
1247 /*
1248 * Terminate the current block if there are instructions
1249 * afterwards.
1250 */
1251 if (codePtr < codeEnd) {
1252 /*
1253 * Create a fallthrough block for real instructions
1254 * (incl. OP_NOP).
1255 */
1256 if (contentIsInsn(codePtr)) {
1257 findBlock(&cUnit, curOffset + width,
1258 /* split */
1259 false,
1260 /* create */
1261 true,
1262 /* immedPredBlockP */
1263 NULL);
1264 }
1265 }
1266 } else if (flags & kInstrCanThrow) {
1267 processCanThrow(&cUnit, curBlock, insn, curOffset, width, flags,
1268 tryBlockAddr, codePtr, codeEnd);
1269 } else if (flags & kInstrCanSwitch) {
1270 processCanSwitch(&cUnit, curBlock, insn, curOffset, width, flags);
1271 }
1272 curOffset += width;
1273 BasicBlock *nextBlock = findBlock(&cUnit, curOffset,
1274 /* split */
1275 false,
1276 /* create */
1277 false,
1278 /* immedPredBlockP */
1279 NULL);
1280 if (nextBlock) {
1281 /*
1282 * The next instruction could be the target of a previously parsed
1283 * forward branch so a block is already created. If the current
1284 * instruction is not an unconditional branch, connect them through
1285 * the fall-through link.
1286 */
1287 assert(curBlock->fallThrough == NULL ||
1288 curBlock->fallThrough == nextBlock ||
1289 curBlock->fallThrough == exitBlock);
1290
1291 if ((curBlock->fallThrough == NULL) &&
1292 (flags & kInstrCanContinue)) {
1293 curBlock->fallThrough = nextBlock;
1294 dvmCompilerSetBit(nextBlock->predecessors, curBlock->id);
1295 }
1296 curBlock = nextBlock;
1297 }
1298 }
1299
1300 if (cUnit.printMe) {
1301 dvmCompilerDumpCompilationUnit(&cUnit);
1302 }
1303
1304 /* Adjust this value accordingly once inlining is performed */
1305 cUnit.numDalvikRegisters = cUnit.method->registersSize;
1306
1307 /* Verify if all blocks are connected as claimed */
1308 /* FIXME - to be disabled in the future */
1309 dvmCompilerDataFlowAnalysisDispatcher(&cUnit, verifyPredInfo,
1310 kAllNodes,
1311 false /* isIterative */);
1312
1313
1314 /* Perform SSA transformation for the whole method */
1315 dvmCompilerMethodSSATransformation(&cUnit);
1316
1317 #ifndef ARCH_IA32
1318 dvmCompilerInitializeRegAlloc(&cUnit); // Needs to happen after SSA naming
1319
1320 /* Allocate Registers using simple local allocation scheme */
1321 dvmCompilerLocalRegAlloc(&cUnit);
1322 #endif
1323
1324 /* Convert MIR to LIR, etc. */
1325 dvmCompilerMethodMIR2LIR(&cUnit);
1326
1327 // Debugging only
1328 //dvmDumpCFG(&cUnit, "/sdcard/cfg/");
1329
1330 /* Method is not empty */
1331 if (cUnit.firstLIRInsn) {
1332 /* Convert LIR into machine code. Loop for recoverable retries */
1333 do {
1334 dvmCompilerAssembleLIR(&cUnit, info);
1335 cUnit.assemblerRetries++;
1336 if (cUnit.printMe && cUnit.assemblerStatus != kSuccess)
1337 ALOGD("Assembler abort #%d on %d",cUnit.assemblerRetries,
1338 cUnit.assemblerStatus);
1339 } while (cUnit.assemblerStatus == kRetryAll);
1340
1341 if (cUnit.printMe) {
1342 dvmCompilerCodegenDump(&cUnit);
1343 }
1344
1345 if (info->codeAddress) {
1346 dvmJitSetCodeAddr(dexCode->insns, info->codeAddress,
1347 info->instructionSet, true, 0);
1348 /*
1349 * Clear the codeAddress for the enclosing trace to reuse the info
1350 */
1351 info->codeAddress = NULL;
1352 }
1353 }
1354
1355 return false;
1356 }
1357
1358 /* Extending the trace by crawling the code from curBlock */
exhaustTrace(CompilationUnit * cUnit,BasicBlock * curBlock)1359 static bool exhaustTrace(CompilationUnit *cUnit, BasicBlock *curBlock)
1360 {
1361 unsigned int curOffset = curBlock->startOffset;
1362 const u2 *codePtr = cUnit->method->insns + curOffset;
1363
1364 if (curBlock->visited == true) return false;
1365
1366 curBlock->visited = true;
1367
1368 if (curBlock->blockType == kEntryBlock ||
1369 curBlock->blockType == kExitBlock) {
1370 return false;
1371 }
1372
1373 /*
1374 * Block has been parsed - check the taken/fallThrough in case it is a split
1375 * block.
1376 */
1377 if (curBlock->firstMIRInsn != NULL) {
1378 bool changed = false;
1379 if (curBlock->taken)
1380 changed |= exhaustTrace(cUnit, curBlock->taken);
1381 if (curBlock->fallThrough)
1382 changed |= exhaustTrace(cUnit, curBlock->fallThrough);
1383 return changed;
1384 }
1385 while (true) {
1386 MIR *insn = (MIR *) dvmCompilerNew(sizeof(MIR), true);
1387 insn->offset = curOffset;
1388 int width = parseInsn(codePtr, &insn->dalvikInsn, false);
1389 insn->width = width;
1390
1391 /* Terminate when the data section is seen */
1392 if (width == 0)
1393 break;
1394
1395 dvmCompilerAppendMIR(curBlock, insn);
1396
1397 codePtr += width;
1398 int flags = dexGetFlagsFromOpcode(insn->dalvikInsn.opcode);
1399
1400 /* Stop extending the trace after seeing these instructions */
1401 if (flags & (kInstrCanReturn | kInstrCanSwitch | kInstrInvoke)) {
1402 curBlock->fallThrough = cUnit->exitBlock;
1403 dvmCompilerSetBit(cUnit->exitBlock->predecessors, curBlock->id);
1404 break;
1405 } else if (flags & kInstrCanBranch) {
1406 processCanBranch(cUnit, curBlock, insn, curOffset, width, flags,
1407 codePtr, NULL);
1408 if (curBlock->taken) {
1409 exhaustTrace(cUnit, curBlock->taken);
1410 }
1411 if (curBlock->fallThrough) {
1412 exhaustTrace(cUnit, curBlock->fallThrough);
1413 }
1414 break;
1415 }
1416 curOffset += width;
1417 BasicBlock *nextBlock = findBlock(cUnit, curOffset,
1418 /* split */
1419 false,
1420 /* create */
1421 false,
1422 /* immedPredBlockP */
1423 NULL);
1424 if (nextBlock) {
1425 /*
1426 * The next instruction could be the target of a previously parsed
1427 * forward branch so a block is already created. If the current
1428 * instruction is not an unconditional branch, connect them through
1429 * the fall-through link.
1430 */
1431 assert(curBlock->fallThrough == NULL ||
1432 curBlock->fallThrough == nextBlock ||
1433 curBlock->fallThrough == cUnit->exitBlock);
1434
1435 if ((curBlock->fallThrough == NULL) &&
1436 (flags & kInstrCanContinue)) {
1437 curBlock->needFallThroughBranch = true;
1438 curBlock->fallThrough = nextBlock;
1439 dvmCompilerSetBit(nextBlock->predecessors, curBlock->id);
1440 }
1441 /* Block has been visited - no more parsing needed */
1442 if (nextBlock->visited == true) {
1443 return true;
1444 }
1445 curBlock = nextBlock;
1446 }
1447 }
1448 return true;
1449 }
1450
1451 /* Compile a loop */
compileLoop(CompilationUnit * cUnit,unsigned int startOffset,JitTraceDescription * desc,int numMaxInsts,JitTranslationInfo * info,jmp_buf * bailPtr,int optHints)1452 static bool compileLoop(CompilationUnit *cUnit, unsigned int startOffset,
1453 JitTraceDescription *desc, int numMaxInsts,
1454 JitTranslationInfo *info, jmp_buf *bailPtr,
1455 int optHints)
1456 {
1457 int numBlocks = 0;
1458 unsigned int curOffset = startOffset;
1459 bool changed;
1460 BasicBlock *bb;
1461 #if defined(WITH_JIT_TUNING)
1462 CompilerMethodStats *methodStats;
1463 #endif
1464
1465 cUnit->jitMode = kJitLoop;
1466
1467 /* Initialize the block list */
1468 dvmInitGrowableList(&cUnit->blockList, 4);
1469
1470 /* Initialize the PC reconstruction list */
1471 dvmInitGrowableList(&cUnit->pcReconstructionList, 8);
1472
1473 /* Create the default entry and exit blocks and enter them to the list */
1474 BasicBlock *entryBlock = dvmCompilerNewBB(kEntryBlock, numBlocks++);
1475 entryBlock->startOffset = curOffset;
1476 BasicBlock *exitBlock = dvmCompilerNewBB(kExitBlock, numBlocks++);
1477
1478 cUnit->entryBlock = entryBlock;
1479 cUnit->exitBlock = exitBlock;
1480
1481 dvmInsertGrowableList(&cUnit->blockList, (intptr_t) entryBlock);
1482 dvmInsertGrowableList(&cUnit->blockList, (intptr_t) exitBlock);
1483
1484 /* Current block to record parsed instructions */
1485 BasicBlock *curBlock = dvmCompilerNewBB(kDalvikByteCode, numBlocks++);
1486 curBlock->startOffset = curOffset;
1487
1488 dvmInsertGrowableList(&cUnit->blockList, (intptr_t) curBlock);
1489 entryBlock->fallThrough = curBlock;
1490 dvmCompilerSetBit(curBlock->predecessors, entryBlock->id);
1491
1492 /*
1493 * Store back the number of blocks since new blocks may be created of
1494 * accessing cUnit.
1495 */
1496 cUnit->numBlocks = numBlocks;
1497
1498 do {
1499 dvmCompilerDataFlowAnalysisDispatcher(cUnit,
1500 dvmCompilerClearVisitedFlag,
1501 kAllNodes,
1502 false /* isIterative */);
1503 changed = exhaustTrace(cUnit, curBlock);
1504 } while (changed);
1505
1506 /* Backward chaining block */
1507 bb = dvmCompilerNewBB(kChainingCellBackwardBranch, cUnit->numBlocks++);
1508 dvmInsertGrowableList(&cUnit->blockList, (intptr_t) bb);
1509 cUnit->backChainBlock = bb;
1510
1511 /* A special block to host PC reconstruction code */
1512 bb = dvmCompilerNewBB(kPCReconstruction, cUnit->numBlocks++);
1513 dvmInsertGrowableList(&cUnit->blockList, (intptr_t) bb);
1514
1515 /* And one final block that publishes the PC and raises the exception */
1516 bb = dvmCompilerNewBB(kExceptionHandling, cUnit->numBlocks++);
1517 dvmInsertGrowableList(&cUnit->blockList, (intptr_t) bb);
1518 cUnit->puntBlock = bb;
1519
1520 cUnit->numDalvikRegisters = cUnit->method->registersSize;
1521
1522 /* Verify if all blocks are connected as claimed */
1523 /* FIXME - to be disabled in the future */
1524 dvmCompilerDataFlowAnalysisDispatcher(cUnit, verifyPredInfo,
1525 kAllNodes,
1526 false /* isIterative */);
1527
1528
1529 /* Try to identify a loop */
1530 if (!dvmCompilerBuildLoop(cUnit))
1531 goto bail;
1532
1533 dvmCompilerLoopOpt(cUnit);
1534
1535 /*
1536 * Change the backward branch to the backward chaining cell after dataflow
1537 * analsys/optimizations are done.
1538 */
1539 dvmCompilerInsertBackwardChaining(cUnit);
1540
1541 #if defined(ARCH_IA32)
1542 /* Convert MIR to LIR, etc. */
1543 dvmCompilerMIR2LIR(cUnit, info);
1544 #else
1545 dvmCompilerInitializeRegAlloc(cUnit);
1546
1547 /* Allocate Registers using simple local allocation scheme */
1548 dvmCompilerLocalRegAlloc(cUnit);
1549
1550 /* Convert MIR to LIR, etc. */
1551 dvmCompilerMIR2LIR(cUnit);
1552 #endif
1553
1554 /* Loop contains never executed blocks / heavy instructions */
1555 if (cUnit->quitLoopMode) {
1556 if (cUnit->printMe || gDvmJit.receivedSIGUSR2) {
1557 ALOGD("Loop trace @ offset %04x aborted due to unresolved code info",
1558 cUnit->entryBlock->startOffset);
1559 }
1560 goto bail;
1561 }
1562
1563 /* Convert LIR into machine code. Loop for recoverable retries */
1564 do {
1565 dvmCompilerAssembleLIR(cUnit, info);
1566 cUnit->assemblerRetries++;
1567 if (cUnit->printMe && cUnit->assemblerStatus != kSuccess)
1568 ALOGD("Assembler abort #%d on %d", cUnit->assemblerRetries,
1569 cUnit->assemblerStatus);
1570 } while (cUnit->assemblerStatus == kRetryAll);
1571
1572 /* Loop is too big - bail out */
1573 if (cUnit->assemblerStatus == kRetryHalve) {
1574 goto bail;
1575 }
1576
1577 if (cUnit->printMe || gDvmJit.receivedSIGUSR2) {
1578 ALOGD("Loop trace @ offset %04x", cUnit->entryBlock->startOffset);
1579 dvmCompilerCodegenDump(cUnit);
1580 }
1581
1582 /*
1583 * If this trace uses class objects as constants,
1584 * dvmJitInstallClassObjectPointers will switch the thread state
1585 * to running and look up the class pointers using the descriptor/loader
1586 * tuple stored in the callsite info structure. We need to make this window
1587 * as short as possible since it is blocking GC.
1588 */
1589 if (cUnit->hasClassLiterals && info->codeAddress) {
1590 dvmJitInstallClassObjectPointers(cUnit, (char *) info->codeAddress);
1591 }
1592
1593 /*
1594 * Since callsiteinfo is allocated from the arena, delay the reset until
1595 * class pointers are resolved.
1596 */
1597 dvmCompilerArenaReset();
1598
1599 assert(cUnit->assemblerStatus == kSuccess);
1600 #if defined(WITH_JIT_TUNING)
1601 /* Locate the entry to store compilation statistics for this method */
1602 methodStats = dvmCompilerAnalyzeMethodBody(desc->method, false);
1603 methodStats->nativeSize += cUnit->totalSize;
1604 #endif
1605 return info->codeAddress != NULL;
1606
1607 bail:
1608 /* Retry the original trace with JIT_OPT_NO_LOOP disabled */
1609 dvmCompilerArenaReset();
1610 return dvmCompileTrace(desc, numMaxInsts, info, bailPtr,
1611 optHints | JIT_OPT_NO_LOOP);
1612 }
1613
searchClassTablePrefix(const Method * method)1614 static bool searchClassTablePrefix(const Method* method) {
1615 if (gDvmJit.classTable == NULL) {
1616 return false;
1617 }
1618 HashIter iter;
1619 HashTable* pTab = gDvmJit.classTable;
1620 for (dvmHashIterBegin(pTab, &iter); !dvmHashIterDone(&iter);
1621 dvmHashIterNext(&iter))
1622 {
1623 const char* str = (const char*) dvmHashIterData(&iter);
1624 if (strncmp(method->clazz->descriptor, str, strlen(str)) == 0) {
1625 return true;
1626 }
1627 }
1628 return false;
1629 }
1630
1631 /*
1632 * Main entry point to start trace compilation. Basic blocks are constructed
1633 * first and they will be passed to the codegen routines to convert Dalvik
1634 * bytecode into machine code.
1635 */
dvmCompileTrace(JitTraceDescription * desc,int numMaxInsts,JitTranslationInfo * info,jmp_buf * bailPtr,int optHints)1636 bool dvmCompileTrace(JitTraceDescription *desc, int numMaxInsts,
1637 JitTranslationInfo *info, jmp_buf *bailPtr,
1638 int optHints)
1639 {
1640 const DexCode *dexCode = dvmGetMethodCode(desc->method);
1641 const JitTraceRun* currRun = &desc->trace[0];
1642 unsigned int curOffset = currRun->info.frag.startOffset;
1643 unsigned int startOffset = curOffset;
1644 unsigned int numInsts = currRun->info.frag.numInsts;
1645 const u2 *codePtr = dexCode->insns + curOffset;
1646 int traceSize = 0; // # of half-words
1647 const u2 *startCodePtr = codePtr;
1648 BasicBlock *curBB, *entryCodeBB;
1649 int numBlocks = 0;
1650 static int compilationId;
1651 CompilationUnit cUnit;
1652 GrowableList *blockList;
1653 #if defined(WITH_JIT_TUNING)
1654 CompilerMethodStats *methodStats;
1655 #endif
1656
1657 /* If we've already compiled this trace, just return success */
1658 if (dvmJitGetTraceAddr(startCodePtr) && !info->discardResult) {
1659 /*
1660 * Make sure the codeAddress is NULL so that it won't clobber the
1661 * existing entry.
1662 */
1663 info->codeAddress = NULL;
1664 return true;
1665 }
1666
1667 /* If the work order is stale, discard it */
1668 if (info->cacheVersion != gDvmJit.cacheVersion) {
1669 return false;
1670 }
1671
1672 compilationId++;
1673 memset(&cUnit, 0, sizeof(CompilationUnit));
1674
1675 #if defined(WITH_JIT_TUNING)
1676 /* Locate the entry to store compilation statistics for this method */
1677 methodStats = dvmCompilerAnalyzeMethodBody(desc->method, false);
1678 #endif
1679
1680 /* Set the recover buffer pointer */
1681 cUnit.bailPtr = bailPtr;
1682
1683 /* Initialize the printMe flag */
1684 cUnit.printMe = gDvmJit.printMe;
1685
1686 /* Setup the method */
1687 cUnit.method = desc->method;
1688
1689 /* Store the trace descriptor and set the initial mode */
1690 cUnit.traceDesc = desc;
1691 cUnit.jitMode = kJitTrace;
1692
1693 /* Initialize the PC reconstruction list */
1694 dvmInitGrowableList(&cUnit.pcReconstructionList, 8);
1695
1696 /* Initialize the basic block list */
1697 blockList = &cUnit.blockList;
1698 dvmInitGrowableList(blockList, 8);
1699
1700 /* Identify traces that we don't want to compile */
1701 if (gDvmJit.classTable) {
1702 bool classFound = searchClassTablePrefix(desc->method);
1703 if (gDvmJit.classTable && gDvmJit.includeSelectedMethod != classFound) {
1704 return false;
1705 }
1706 }
1707 if (gDvmJit.methodTable) {
1708 int len = strlen(desc->method->clazz->descriptor) +
1709 strlen(desc->method->name) + 1;
1710 char *fullSignature = (char *)dvmCompilerNew(len, true);
1711 strcpy(fullSignature, desc->method->clazz->descriptor);
1712 strcat(fullSignature, desc->method->name);
1713
1714 int hashValue = dvmComputeUtf8Hash(fullSignature);
1715
1716 /*
1717 * Doing three levels of screening to see whether we want to skip
1718 * compiling this method
1719 */
1720
1721 /* First, check the full "class;method" signature */
1722 bool methodFound =
1723 dvmHashTableLookup(gDvmJit.methodTable, hashValue,
1724 fullSignature, (HashCompareFunc) strcmp,
1725 false) !=
1726 NULL;
1727
1728 /* Full signature not found - check the enclosing class */
1729 if (methodFound == false) {
1730 int hashValue = dvmComputeUtf8Hash(desc->method->clazz->descriptor);
1731 methodFound =
1732 dvmHashTableLookup(gDvmJit.methodTable, hashValue,
1733 (char *) desc->method->clazz->descriptor,
1734 (HashCompareFunc) strcmp, false) !=
1735 NULL;
1736 /* Enclosing class not found - check the method name */
1737 if (methodFound == false) {
1738 int hashValue = dvmComputeUtf8Hash(desc->method->name);
1739 methodFound =
1740 dvmHashTableLookup(gDvmJit.methodTable, hashValue,
1741 (char *) desc->method->name,
1742 (HashCompareFunc) strcmp, false) !=
1743 NULL;
1744
1745 /*
1746 * Debug by call-graph is enabled. Check if the debug list
1747 * covers any methods on the VM stack.
1748 */
1749 if (methodFound == false && gDvmJit.checkCallGraph == true) {
1750 methodFound =
1751 filterMethodByCallGraph(info->requestingThread,
1752 desc->method->name);
1753 }
1754 }
1755 }
1756
1757 /*
1758 * Under the following conditions, the trace will be *conservatively*
1759 * compiled by only containing single-step instructions to and from the
1760 * interpreter.
1761 * 1) If includeSelectedMethod == false, the method matches the full or
1762 * partial signature stored in the hash table.
1763 *
1764 * 2) If includeSelectedMethod == true, the method does not match the
1765 * full and partial signature stored in the hash table.
1766 */
1767 if (gDvmJit.methodTable && gDvmJit.includeSelectedMethod != methodFound) {
1768 #ifdef ARCH_IA32
1769 return false;
1770 #else
1771 cUnit.allSingleStep = true;
1772 #endif
1773 } else {
1774 /* Compile the trace as normal */
1775
1776 /* Print the method we cherry picked */
1777 if (gDvmJit.includeSelectedMethod == true) {
1778 cUnit.printMe = true;
1779 }
1780 }
1781 }
1782
1783 // Each pair is a range, check whether curOffset falls into a range.
1784 bool includeOffset = (gDvmJit.num_entries_pcTable < 2);
1785 for (int pcOff = 0; pcOff < gDvmJit.num_entries_pcTable; ) {
1786 if (pcOff+1 >= gDvmJit.num_entries_pcTable) {
1787 break;
1788 }
1789 if (curOffset >= gDvmJit.pcTable[pcOff] && curOffset <= gDvmJit.pcTable[pcOff+1]) {
1790 includeOffset = true;
1791 break;
1792 }
1793 pcOff += 2;
1794 }
1795 if (!includeOffset) {
1796 return false;
1797 }
1798
1799 /* Allocate the entry block */
1800 curBB = dvmCompilerNewBB(kEntryBlock, numBlocks++);
1801 dvmInsertGrowableList(blockList, (intptr_t) curBB);
1802 curBB->startOffset = curOffset;
1803
1804 entryCodeBB = dvmCompilerNewBB(kDalvikByteCode, numBlocks++);
1805 dvmInsertGrowableList(blockList, (intptr_t) entryCodeBB);
1806 entryCodeBB->startOffset = curOffset;
1807 curBB->fallThrough = entryCodeBB;
1808 curBB = entryCodeBB;
1809
1810 if (cUnit.printMe) {
1811 ALOGD("--------\nCompiler: Building trace for %s, offset %#x",
1812 desc->method->name, curOffset);
1813 }
1814
1815 /*
1816 * Analyze the trace descriptor and include up to the maximal number
1817 * of Dalvik instructions into the IR.
1818 */
1819 while (1) {
1820 MIR *insn;
1821 int width;
1822 insn = (MIR *)dvmCompilerNew(sizeof(MIR), true);
1823 insn->offset = curOffset;
1824 width = parseInsn(codePtr, &insn->dalvikInsn, cUnit.printMe);
1825
1826 /* The trace should never incude instruction data */
1827 assert(width);
1828 insn->width = width;
1829 traceSize += width;
1830 dvmCompilerAppendMIR(curBB, insn);
1831 cUnit.numInsts++;
1832
1833 int flags = dexGetFlagsFromOpcode(insn->dalvikInsn.opcode);
1834
1835 if (flags & kInstrInvoke) {
1836 const Method *calleeMethod = (const Method *)
1837 currRun[JIT_TRACE_CUR_METHOD].info.meta;
1838 assert(numInsts == 1);
1839 CallsiteInfo *callsiteInfo =
1840 (CallsiteInfo *)dvmCompilerNew(sizeof(CallsiteInfo), true);
1841 callsiteInfo->classDescriptor = (const char *)
1842 currRun[JIT_TRACE_CLASS_DESC].info.meta;
1843 callsiteInfo->classLoader = (Object *)
1844 currRun[JIT_TRACE_CLASS_LOADER].info.meta;
1845 callsiteInfo->method = calleeMethod;
1846 insn->meta.callsiteInfo = callsiteInfo;
1847 }
1848
1849 /* Instruction limit reached - terminate the trace here */
1850 if (cUnit.numInsts >= numMaxInsts) {
1851 break;
1852 }
1853 if (--numInsts == 0) {
1854 if (currRun->info.frag.runEnd) {
1855 break;
1856 } else {
1857 /* Advance to the next trace description (ie non-meta info) */
1858 do {
1859 currRun++;
1860 } while (!currRun->isCode);
1861
1862 /* Dummy end-of-run marker seen */
1863 if (currRun->info.frag.numInsts == 0) {
1864 break;
1865 }
1866
1867 curBB = dvmCompilerNewBB(kDalvikByteCode, numBlocks++);
1868 dvmInsertGrowableList(blockList, (intptr_t) curBB);
1869 curOffset = currRun->info.frag.startOffset;
1870 numInsts = currRun->info.frag.numInsts;
1871 curBB->startOffset = curOffset;
1872 codePtr = dexCode->insns + curOffset;
1873 }
1874 } else {
1875 curOffset += width;
1876 codePtr += width;
1877 }
1878 }
1879
1880 #if defined(WITH_JIT_TUNING)
1881 /* Convert # of half-word to bytes */
1882 methodStats->compiledDalvikSize += traceSize * 2;
1883 #endif
1884
1885 /*
1886 * Now scan basic blocks containing real code to connect the
1887 * taken/fallthrough links. Also create chaining cells for code not included
1888 * in the trace.
1889 */
1890 size_t blockId;
1891 for (blockId = 0; blockId < blockList->numUsed; blockId++) {
1892 curBB = (BasicBlock *) dvmGrowableListGetElement(blockList, blockId);
1893 MIR *lastInsn = curBB->lastMIRInsn;
1894 /* Skip empty blocks */
1895 if (lastInsn == NULL) {
1896 continue;
1897 }
1898 curOffset = lastInsn->offset;
1899 unsigned int targetOffset = curOffset;
1900 unsigned int fallThroughOffset = curOffset + lastInsn->width;
1901 bool isInvoke = false;
1902 const Method *callee = NULL;
1903
1904 findBlockBoundary(desc->method, curBB->lastMIRInsn, curOffset,
1905 &targetOffset, &isInvoke, &callee);
1906
1907 /* Link the taken and fallthrough blocks */
1908 BasicBlock *searchBB;
1909
1910 int flags = dexGetFlagsFromOpcode(lastInsn->dalvikInsn.opcode);
1911
1912 if (flags & kInstrInvoke) {
1913 cUnit.hasInvoke = true;
1914 }
1915
1916 /* Backward branch seen */
1917 if (isInvoke == false &&
1918 (flags & kInstrCanBranch) != 0 &&
1919 targetOffset < curOffset &&
1920 (optHints & JIT_OPT_NO_LOOP) == 0) {
1921 dvmCompilerArenaReset();
1922 return compileLoop(&cUnit, startOffset, desc, numMaxInsts,
1923 info, bailPtr, optHints);
1924 }
1925
1926 /* No backward branch in the trace - start searching the next BB */
1927 size_t searchBlockId;
1928 for (searchBlockId = blockId+1; searchBlockId < blockList->numUsed;
1929 searchBlockId++) {
1930 searchBB = (BasicBlock *) dvmGrowableListGetElement(blockList,
1931 searchBlockId);
1932 if (targetOffset == searchBB->startOffset) {
1933 curBB->taken = searchBB;
1934 dvmCompilerSetBit(searchBB->predecessors, curBB->id);
1935 }
1936 if (fallThroughOffset == searchBB->startOffset) {
1937 curBB->fallThrough = searchBB;
1938 dvmCompilerSetBit(searchBB->predecessors, curBB->id);
1939
1940 /*
1941 * Fallthrough block of an invoke instruction needs to be
1942 * aligned to 4-byte boundary (alignment instruction to be
1943 * inserted later.
1944 */
1945 if (flags & kInstrInvoke) {
1946 searchBB->isFallThroughFromInvoke = true;
1947 }
1948 }
1949 }
1950
1951 /*
1952 * Some blocks are ended by non-control-flow-change instructions,
1953 * currently only due to trace length constraint. In this case we need
1954 * to generate an explicit branch at the end of the block to jump to
1955 * the chaining cell.
1956 */
1957 curBB->needFallThroughBranch =
1958 ((flags & (kInstrCanBranch | kInstrCanSwitch | kInstrCanReturn |
1959 kInstrInvoke)) == 0);
1960 if (lastInsn->dalvikInsn.opcode == OP_PACKED_SWITCH ||
1961 lastInsn->dalvikInsn.opcode == OP_SPARSE_SWITCH) {
1962 int i;
1963 const u2 *switchData = desc->method->insns + lastInsn->offset +
1964 lastInsn->dalvikInsn.vB;
1965 int size = switchData[1];
1966 int maxChains = MIN(size, MAX_CHAINED_SWITCH_CASES);
1967
1968 /*
1969 * Generate the landing pad for cases whose ranks are higher than
1970 * MAX_CHAINED_SWITCH_CASES. The code will re-enter the interpreter
1971 * through the NoChain point.
1972 */
1973 if (maxChains != size) {
1974 cUnit.switchOverflowPad =
1975 desc->method->insns + lastInsn->offset;
1976 }
1977
1978 s4 *targets = (s4 *) (switchData + 2 +
1979 (lastInsn->dalvikInsn.opcode == OP_PACKED_SWITCH ?
1980 2 : size * 2));
1981
1982 /* One chaining cell for the first MAX_CHAINED_SWITCH_CASES cases */
1983 for (i = 0; i < maxChains; i++) {
1984 BasicBlock *caseChain = dvmCompilerNewBB(kChainingCellNormal,
1985 numBlocks++);
1986 dvmInsertGrowableList(blockList, (intptr_t) caseChain);
1987 caseChain->startOffset = lastInsn->offset + targets[i];
1988 }
1989
1990 /* One more chaining cell for the default case */
1991 BasicBlock *caseChain = dvmCompilerNewBB(kChainingCellNormal,
1992 numBlocks++);
1993 dvmInsertGrowableList(blockList, (intptr_t) caseChain);
1994 caseChain->startOffset = lastInsn->offset + lastInsn->width;
1995 /* Fallthrough block not included in the trace */
1996 } else if (!isUnconditionalBranch(lastInsn) &&
1997 curBB->fallThrough == NULL) {
1998 BasicBlock *fallThroughBB;
1999 /*
2000 * If the chaining cell is after an invoke or
2001 * instruction that cannot change the control flow, request a hot
2002 * chaining cell.
2003 */
2004 if (isInvoke || curBB->needFallThroughBranch) {
2005 fallThroughBB = dvmCompilerNewBB(kChainingCellHot, numBlocks++);
2006 } else {
2007 fallThroughBB = dvmCompilerNewBB(kChainingCellNormal,
2008 numBlocks++);
2009 }
2010 dvmInsertGrowableList(blockList, (intptr_t) fallThroughBB);
2011 fallThroughBB->startOffset = fallThroughOffset;
2012 curBB->fallThrough = fallThroughBB;
2013 dvmCompilerSetBit(fallThroughBB->predecessors, curBB->id);
2014 }
2015 /* Target block not included in the trace */
2016 if (curBB->taken == NULL &&
2017 (isGoto(lastInsn) || isInvoke ||
2018 (targetOffset != UNKNOWN_TARGET && targetOffset != curOffset))) {
2019 BasicBlock *newBB = NULL;
2020 if (isInvoke) {
2021 /* Monomorphic callee */
2022 if (callee) {
2023 /* JNI call doesn't need a chaining cell */
2024 if (!dvmIsNativeMethod(callee)) {
2025 newBB = dvmCompilerNewBB(kChainingCellInvokeSingleton,
2026 numBlocks++);
2027 newBB->startOffset = 0;
2028 newBB->containingMethod = callee;
2029 }
2030 /* Will resolve at runtime */
2031 } else {
2032 newBB = dvmCompilerNewBB(kChainingCellInvokePredicted,
2033 numBlocks++);
2034 newBB->startOffset = 0;
2035 }
2036 /* For unconditional branches, request a hot chaining cell */
2037 } else {
2038 #if !defined(WITH_SELF_VERIFICATION)
2039 newBB = dvmCompilerNewBB(dexIsGoto(flags) ?
2040 kChainingCellHot :
2041 kChainingCellNormal,
2042 numBlocks++);
2043 newBB->startOffset = targetOffset;
2044 #else
2045 /* Handle branches that branch back into the block */
2046 if (targetOffset >= curBB->firstMIRInsn->offset &&
2047 targetOffset <= curBB->lastMIRInsn->offset) {
2048 newBB = dvmCompilerNewBB(kChainingCellBackwardBranch,
2049 numBlocks++);
2050 } else {
2051 newBB = dvmCompilerNewBB(dexIsGoto(flags) ?
2052 kChainingCellHot :
2053 kChainingCellNormal,
2054 numBlocks++);
2055 }
2056 newBB->startOffset = targetOffset;
2057 #endif
2058 }
2059 if (newBB) {
2060 curBB->taken = newBB;
2061 dvmCompilerSetBit(newBB->predecessors, curBB->id);
2062 dvmInsertGrowableList(blockList, (intptr_t) newBB);
2063 }
2064 }
2065 }
2066
2067 /* Now create a special block to host PC reconstruction code */
2068 curBB = dvmCompilerNewBB(kPCReconstruction, numBlocks++);
2069 dvmInsertGrowableList(blockList, (intptr_t) curBB);
2070
2071 /* And one final block that publishes the PC and raise the exception */
2072 curBB = dvmCompilerNewBB(kExceptionHandling, numBlocks++);
2073 dvmInsertGrowableList(blockList, (intptr_t) curBB);
2074 cUnit.puntBlock = curBB;
2075
2076 if (cUnit.printMe) {
2077 char* signature =
2078 dexProtoCopyMethodDescriptor(&desc->method->prototype);
2079 ALOGD("TRACEINFO (%d): 0x%08x %s%s.%s %#x %d of %d, %d blocks",
2080 compilationId,
2081 (intptr_t) desc->method->insns,
2082 desc->method->clazz->descriptor,
2083 desc->method->name,
2084 signature,
2085 desc->trace[0].info.frag.startOffset,
2086 traceSize,
2087 dexCode->insnsSize,
2088 numBlocks);
2089 free(signature);
2090 }
2091
2092 cUnit.numBlocks = numBlocks;
2093
2094 /* Set the instruction set to use (NOTE: later components may change it) */
2095 cUnit.instructionSet = dvmCompilerInstructionSet();
2096
2097 /* Inline transformation @ the MIR level */
2098 if (cUnit.hasInvoke && !(gDvmJit.disableOpt & (1 << kMethodInlining))) {
2099 dvmCompilerInlineMIR(&cUnit, info);
2100 }
2101
2102 cUnit.numDalvikRegisters = cUnit.method->registersSize;
2103
2104 /* Preparation for SSA conversion */
2105 dvmInitializeSSAConversion(&cUnit);
2106
2107 dvmCompilerNonLoopAnalysis(&cUnit);
2108
2109 #ifndef ARCH_IA32
2110 dvmCompilerInitializeRegAlloc(&cUnit); // Needs to happen after SSA naming
2111 #endif
2112
2113 if (cUnit.printMe) {
2114 dvmCompilerDumpCompilationUnit(&cUnit);
2115 }
2116
2117 #ifndef ARCH_IA32
2118 /* Allocate Registers using simple local allocation scheme */
2119 dvmCompilerLocalRegAlloc(&cUnit);
2120
2121 /* Convert MIR to LIR, etc. */
2122 dvmCompilerMIR2LIR(&cUnit);
2123 #else /* ARCH_IA32 */
2124 /* Convert MIR to LIR, etc. */
2125 dvmCompilerMIR2LIR(&cUnit, info);
2126 #endif
2127
2128 /* Convert LIR into machine code. Loop for recoverable retries */
2129 do {
2130 dvmCompilerAssembleLIR(&cUnit, info);
2131 cUnit.assemblerRetries++;
2132 if (cUnit.printMe && cUnit.assemblerStatus != kSuccess)
2133 ALOGD("Assembler abort #%d on %d",cUnit.assemblerRetries,
2134 cUnit.assemblerStatus);
2135 } while (cUnit.assemblerStatus == kRetryAll);
2136
2137 if (cUnit.printMe) {
2138 ALOGD("Trace Dalvik PC: %p", startCodePtr);
2139 dvmCompilerCodegenDump(&cUnit);
2140 ALOGD("End %s%s, %d Dalvik instructions",
2141 desc->method->clazz->descriptor, desc->method->name,
2142 cUnit.numInsts);
2143 }
2144
2145 if (cUnit.assemblerStatus == kRetryHalve) {
2146 /* Reset the compiler resource pool before retry */
2147 dvmCompilerArenaReset();
2148
2149 /* Halve the instruction count and start from the top */
2150 return dvmCompileTrace(desc, cUnit.numInsts / 2, info, bailPtr,
2151 optHints);
2152 }
2153
2154 /*
2155 * If this trace uses class objects as constants,
2156 * dvmJitInstallClassObjectPointers will switch the thread state
2157 * to running and look up the class pointers using the descriptor/loader
2158 * tuple stored in the callsite info structure. We need to make this window
2159 * as short as possible since it is blocking GC.
2160 */
2161 if (cUnit.hasClassLiterals && info->codeAddress) {
2162 dvmJitInstallClassObjectPointers(&cUnit, (char *) info->codeAddress);
2163 }
2164
2165 /*
2166 * Since callsiteinfo is allocated from the arena, delay the reset until
2167 * class pointers are resolved.
2168 */
2169 dvmCompilerArenaReset();
2170
2171 assert(cUnit.assemblerStatus == kSuccess);
2172 #if defined(WITH_JIT_TUNING)
2173 methodStats->nativeSize += cUnit.totalSize;
2174 #endif
2175
2176 return info->codeAddress != NULL;
2177 }
2178