1 /*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "Dalvik.h"
18 #include "CompilerInternals.h"
19 #include "Dataflow.h"
20 #include "Loop.h"
21
22 #define DEBUG_LOOP(X)
23
24 #if 0
25 /* Debugging routines */
26 static void dumpConstants(CompilationUnit *cUnit)
27 {
28 int i;
29 ALOGE("LOOP starting offset: %x", cUnit->entryBlock->startOffset);
30 for (i = 0; i < cUnit->numSSARegs; i++) {
31 if (dvmIsBitSet(cUnit->isConstantV, i)) {
32 int subNReg = dvmConvertSSARegToDalvik(cUnit, i);
33 ALOGE("CONST: s%d(v%d_%d) has %d", i,
34 DECODE_REG(subNReg), DECODE_SUB(subNReg),
35 cUnit->constantValues[i]);
36 }
37 }
38 }
39
40 static void dumpIVList(CompilationUnit *cUnit)
41 {
42 unsigned int i;
43 GrowableList *ivList = cUnit->loopAnalysis->ivList;
44
45 for (i = 0; i < ivList->numUsed; i++) {
46 InductionVariableInfo *ivInfo =
47 (InductionVariableInfo *) ivList->elemList[i];
48 int iv = dvmConvertSSARegToDalvik(cUnit, ivInfo->ssaReg);
49 /* Basic IV */
50 if (ivInfo->ssaReg == ivInfo->basicSSAReg) {
51 ALOGE("BIV %d: s%d(v%d_%d) + %d", i,
52 ivInfo->ssaReg,
53 DECODE_REG(iv), DECODE_SUB(iv),
54 ivInfo->inc);
55 /* Dependent IV */
56 } else {
57 int biv = dvmConvertSSARegToDalvik(cUnit, ivInfo->basicSSAReg);
58
59 ALOGE("DIV %d: s%d(v%d_%d) = %d * s%d(v%d_%d) + %d", i,
60 ivInfo->ssaReg,
61 DECODE_REG(iv), DECODE_SUB(iv),
62 ivInfo->m,
63 ivInfo->basicSSAReg,
64 DECODE_REG(biv), DECODE_SUB(biv),
65 ivInfo->c);
66 }
67 }
68 }
69
70 static void dumpHoistedChecks(CompilationUnit *cUnit)
71 {
72 LoopAnalysis *loopAnalysis = cUnit->loopAnalysis;
73 unsigned int i;
74
75 for (i = 0; i < loopAnalysis->arrayAccessInfo->numUsed; i++) {
76 ArrayAccessInfo *arrayAccessInfo =
77 GET_ELEM_N(loopAnalysis->arrayAccessInfo,
78 ArrayAccessInfo*, i);
79 int arrayReg = DECODE_REG(
80 dvmConvertSSARegToDalvik(cUnit, arrayAccessInfo->arrayReg));
81 int idxReg = DECODE_REG(
82 dvmConvertSSARegToDalvik(cUnit, arrayAccessInfo->ivReg));
83 ALOGE("Array access %d", i);
84 ALOGE(" arrayReg %d", arrayReg);
85 ALOGE(" idxReg %d", idxReg);
86 ALOGE(" endReg %d", loopAnalysis->endConditionReg);
87 ALOGE(" maxC %d", arrayAccessInfo->maxC);
88 ALOGE(" minC %d", arrayAccessInfo->minC);
89 ALOGE(" opcode %d", loopAnalysis->loopBranchOpcode);
90 }
91 }
92
93 #endif
94
findPredecessorBlock(const CompilationUnit * cUnit,const BasicBlock * bb)95 static BasicBlock *findPredecessorBlock(const CompilationUnit *cUnit,
96 const BasicBlock *bb)
97 {
98 int numPred = dvmCountSetBits(bb->predecessors);
99 BitVectorIterator bvIterator;
100 dvmBitVectorIteratorInit(bb->predecessors, &bvIterator);
101
102 if (numPred == 1) {
103 int predIdx = dvmBitVectorIteratorNext(&bvIterator);
104 return (BasicBlock *) dvmGrowableListGetElement(&cUnit->blockList,
105 predIdx);
106 /* First loop block */
107 } else if ((numPred == 2) &&
108 dvmIsBitSet(bb->predecessors, cUnit->entryBlock->id)) {
109 while (true) {
110 int predIdx = dvmBitVectorIteratorNext(&bvIterator);
111 if (predIdx == cUnit->entryBlock->id) continue;
112 return (BasicBlock *) dvmGrowableListGetElement(&cUnit->blockList,
113 predIdx);
114 }
115 /* Doesn't support other shape of control flow yet */
116 } else {
117 return NULL;
118 }
119 }
120
121 /* Used for normalized loop exit condition checks */
negateOpcode(Opcode opcode)122 static Opcode negateOpcode(Opcode opcode)
123 {
124 switch (opcode) {
125 /* reg/reg cmp */
126 case OP_IF_EQ:
127 return OP_IF_NE;
128 case OP_IF_NE:
129 return OP_IF_EQ;
130 case OP_IF_LT:
131 return OP_IF_GE;
132 case OP_IF_GE:
133 return OP_IF_LT;
134 case OP_IF_GT:
135 return OP_IF_LE;
136 case OP_IF_LE:
137 return OP_IF_GT;
138 /* reg/zero cmp */
139 case OP_IF_EQZ:
140 return OP_IF_NEZ;
141 case OP_IF_NEZ:
142 return OP_IF_EQZ;
143 case OP_IF_LTZ:
144 return OP_IF_GEZ;
145 case OP_IF_GEZ:
146 return OP_IF_LTZ;
147 case OP_IF_GTZ:
148 return OP_IF_LEZ;
149 case OP_IF_LEZ:
150 return OP_IF_GTZ;
151 default:
152 ALOGE("opcode %d cannot be negated", opcode);
153 dvmAbort();
154 break;
155 }
156 return (Opcode)-1; // unreached
157 }
158
159 /*
160 * A loop is considered optimizable if:
161 * 1) It has one basic induction variable.
162 * 2) The loop back branch compares the BIV with a constant.
163 * 3) We need to normalize the loop exit condition so that the loop is exited
164 * via the taken path.
165 * 4) If it is a count-up loop, the condition is GE/GT. Otherwise it is
166 * LE/LT/LEZ/LTZ for a count-down loop.
167 *
168 * Return false for loops that fail the above tests.
169 */
isSimpleCountedLoop(CompilationUnit * cUnit)170 static bool isSimpleCountedLoop(CompilationUnit *cUnit)
171 {
172 unsigned int i;
173 BasicBlock *loopBackBlock = cUnit->entryBlock->fallThrough;
174 LoopAnalysis *loopAnalysis = cUnit->loopAnalysis;
175
176 if (loopAnalysis->numBasicIV != 1) return false;
177 for (i = 0; i < loopAnalysis->ivList->numUsed; i++) {
178 InductionVariableInfo *ivInfo;
179
180 ivInfo = GET_ELEM_N(loopAnalysis->ivList, InductionVariableInfo*, i);
181 /* Count up or down loop? */
182 if (ivInfo->ssaReg == ivInfo->basicSSAReg) {
183 /* Infinite loop */
184 if (ivInfo->inc == 0) {
185 return false;
186 }
187 loopAnalysis->isCountUpLoop = ivInfo->inc > 0;
188 break;
189 }
190 }
191
192 /* Find the block that ends with a branch to exit the loop */
193 while (true) {
194 loopBackBlock = findPredecessorBlock(cUnit, loopBackBlock);
195 /* Loop structure not recognized as counted blocks */
196 if (loopBackBlock == NULL) {
197 return false;
198 }
199 /* Unconditional goto - continue to trace up the predecessor chain */
200 if (loopBackBlock->taken == NULL) {
201 continue;
202 }
203 break;
204 }
205
206 MIR *branch = loopBackBlock->lastMIRInsn;
207 Opcode opcode = branch->dalvikInsn.opcode;
208
209 /* Last instruction is not a conditional branch - bail */
210 if (dexGetFlagsFromOpcode(opcode) != (kInstrCanContinue|kInstrCanBranch)) {
211 return false;
212 }
213
214 int endSSAReg;
215 int endDalvikReg;
216
217 /* reg/reg comparison */
218 if (branch->ssaRep->numUses == 2) {
219 if (branch->ssaRep->uses[0] == loopAnalysis->ssaBIV) {
220 endSSAReg = branch->ssaRep->uses[1];
221 } else if (branch->ssaRep->uses[1] == loopAnalysis->ssaBIV) {
222 endSSAReg = branch->ssaRep->uses[0];
223 opcode = negateOpcode(opcode);
224 } else {
225 return false;
226 }
227 endDalvikReg = dvmConvertSSARegToDalvik(cUnit, endSSAReg);
228 /*
229 * If the comparison is not between the BIV and a loop invariant,
230 * return false. endDalvikReg is loop invariant if one of the
231 * following is true:
232 * - It is not defined in the loop (ie DECODE_SUB returns 0)
233 * - It is reloaded with a constant
234 */
235 if ((DECODE_SUB(endDalvikReg) != 0) &&
236 !dvmIsBitSet(cUnit->isConstantV, endSSAReg)) {
237 return false;
238 }
239 /* Compare against zero */
240 } else if (branch->ssaRep->numUses == 1) {
241 if (branch->ssaRep->uses[0] == loopAnalysis->ssaBIV) {
242 /* Keep the compiler happy */
243 endDalvikReg = -1;
244 } else {
245 return false;
246 }
247 } else {
248 return false;
249 }
250
251 /* Normalize the loop exit check as "if (iv op end) exit;" */
252 if (loopBackBlock->taken->blockType == kDalvikByteCode) {
253 opcode = negateOpcode(opcode);
254 }
255
256 if (loopAnalysis->isCountUpLoop) {
257 /*
258 * If the normalized condition op is not > or >=, this is not an
259 * optimization candidate.
260 */
261 switch (opcode) {
262 case OP_IF_GT:
263 case OP_IF_GE:
264 break;
265 default:
266 return false;
267 }
268 loopAnalysis->endConditionReg = DECODE_REG(endDalvikReg);
269 } else {
270 /*
271 * If the normalized condition op is not < or <=, this is not an
272 * optimization candidate.
273 */
274 switch (opcode) {
275 case OP_IF_LT:
276 case OP_IF_LE:
277 loopAnalysis->endConditionReg = DECODE_REG(endDalvikReg);
278 break;
279 case OP_IF_LTZ:
280 case OP_IF_LEZ:
281 break;
282 default:
283 return false;
284 }
285 }
286 /*
287 * Remember the normalized opcode, which will be used to determine the end
288 * value used for the yanked range checks.
289 */
290 loopAnalysis->loopBranchOpcode = opcode;
291 return true;
292 }
293
294 /*
295 * Record the upper and lower bound information for range checks for each
296 * induction variable. If array A is accessed by index "i+5", the upper and
297 * lower bound will be len(A)-5 and -5, respectively.
298 */
updateRangeCheckInfo(CompilationUnit * cUnit,int arrayReg,int idxReg)299 static void updateRangeCheckInfo(CompilationUnit *cUnit, int arrayReg,
300 int idxReg)
301 {
302 InductionVariableInfo *ivInfo;
303 LoopAnalysis *loopAnalysis = cUnit->loopAnalysis;
304 unsigned int i, j;
305
306 for (i = 0; i < loopAnalysis->ivList->numUsed; i++) {
307 ivInfo = GET_ELEM_N(loopAnalysis->ivList, InductionVariableInfo*, i);
308 if (ivInfo->ssaReg == idxReg) {
309 ArrayAccessInfo *arrayAccessInfo = NULL;
310 for (j = 0; j < loopAnalysis->arrayAccessInfo->numUsed; j++) {
311 ArrayAccessInfo *existingArrayAccessInfo =
312 GET_ELEM_N(loopAnalysis->arrayAccessInfo,
313 ArrayAccessInfo*,
314 j);
315 if (existingArrayAccessInfo->arrayReg == arrayReg) {
316 if (ivInfo->c > existingArrayAccessInfo->maxC) {
317 existingArrayAccessInfo->maxC = ivInfo->c;
318 }
319 if (ivInfo->c < existingArrayAccessInfo->minC) {
320 existingArrayAccessInfo->minC = ivInfo->c;
321 }
322 arrayAccessInfo = existingArrayAccessInfo;
323 break;
324 }
325 }
326 if (arrayAccessInfo == NULL) {
327 arrayAccessInfo =
328 (ArrayAccessInfo *)dvmCompilerNew(sizeof(ArrayAccessInfo),
329 false);
330 arrayAccessInfo->ivReg = ivInfo->basicSSAReg;
331 arrayAccessInfo->arrayReg = arrayReg;
332 arrayAccessInfo->maxC = (ivInfo->c > 0) ? ivInfo->c : 0;
333 arrayAccessInfo->minC = (ivInfo->c < 0) ? ivInfo->c : 0;
334 dvmInsertGrowableList(loopAnalysis->arrayAccessInfo,
335 (intptr_t) arrayAccessInfo);
336 }
337 break;
338 }
339 }
340 }
341
342 /* Returns true if the loop body cannot throw any exceptions */
doLoopBodyCodeMotion(CompilationUnit * cUnit)343 static bool doLoopBodyCodeMotion(CompilationUnit *cUnit)
344 {
345 BasicBlock *loopBody = cUnit->entryBlock->fallThrough;
346 MIR *mir;
347 bool loopBodyCanThrow = false;
348
349 for (mir = loopBody->firstMIRInsn; mir; mir = mir->next) {
350 DecodedInstruction *dInsn = &mir->dalvikInsn;
351 int dfAttributes =
352 dvmCompilerDataFlowAttributes[mir->dalvikInsn.opcode];
353
354 /* Skip extended MIR instructions */
355 if ((u2) dInsn->opcode >= kNumPackedOpcodes) continue;
356
357 int instrFlags = dexGetFlagsFromOpcode(dInsn->opcode);
358
359 /* Instruction is clean */
360 if ((instrFlags & kInstrCanThrow) == 0) continue;
361
362 /*
363 * Currently we can only optimize away null and range checks. Punt on
364 * instructions that can throw due to other exceptions.
365 */
366 if (!(dfAttributes & DF_HAS_NR_CHECKS)) {
367 loopBodyCanThrow = true;
368 continue;
369 }
370
371 /*
372 * This comparison is redundant now, but we will have more than one
373 * group of flags to check soon.
374 */
375 if (dfAttributes & DF_HAS_NR_CHECKS) {
376 /*
377 * Check if the null check is applied on a loop invariant register?
378 * If the register's SSA id is less than the number of Dalvik
379 * registers, then it is loop invariant.
380 */
381 int refIdx;
382 switch (dfAttributes & DF_HAS_NR_CHECKS) {
383 case DF_NULL_N_RANGE_CHECK_0:
384 refIdx = 0;
385 break;
386 case DF_NULL_N_RANGE_CHECK_1:
387 refIdx = 1;
388 break;
389 case DF_NULL_N_RANGE_CHECK_2:
390 refIdx = 2;
391 break;
392 default:
393 refIdx = 0;
394 ALOGE("Jit: bad case in doLoopBodyCodeMotion");
395 dvmCompilerAbort(cUnit);
396 }
397
398 int useIdx = refIdx + 1;
399 int subNRegArray =
400 dvmConvertSSARegToDalvik(cUnit, mir->ssaRep->uses[refIdx]);
401 int arraySub = DECODE_SUB(subNRegArray);
402
403 /*
404 * If the register is never updated in the loop (ie subscript == 0),
405 * it is an optimization candidate.
406 */
407 if (arraySub != 0) {
408 loopBodyCanThrow = true;
409 continue;
410 }
411
412 /*
413 * Then check if the range check can be hoisted out of the loop if
414 * it is basic or dependent induction variable.
415 */
416 if (dvmIsBitSet(cUnit->loopAnalysis->isIndVarV,
417 mir->ssaRep->uses[useIdx])) {
418 mir->OptimizationFlags |=
419 MIR_IGNORE_RANGE_CHECK | MIR_IGNORE_NULL_CHECK;
420 updateRangeCheckInfo(cUnit, mir->ssaRep->uses[refIdx],
421 mir->ssaRep->uses[useIdx]);
422 }
423 }
424 }
425
426 return !loopBodyCanThrow;
427 }
428
genHoistedChecks(CompilationUnit * cUnit)429 static void genHoistedChecks(CompilationUnit *cUnit)
430 {
431 unsigned int i;
432 BasicBlock *entry = cUnit->entryBlock;
433 LoopAnalysis *loopAnalysis = cUnit->loopAnalysis;
434 int globalMaxC = 0;
435 int globalMinC = 0;
436 /* Should be loop invariant */
437 int idxReg = 0;
438
439 for (i = 0; i < loopAnalysis->arrayAccessInfo->numUsed; i++) {
440 ArrayAccessInfo *arrayAccessInfo =
441 GET_ELEM_N(loopAnalysis->arrayAccessInfo,
442 ArrayAccessInfo*, i);
443 int arrayReg = DECODE_REG(
444 dvmConvertSSARegToDalvik(cUnit, arrayAccessInfo->arrayReg));
445 idxReg = DECODE_REG(
446 dvmConvertSSARegToDalvik(cUnit, arrayAccessInfo->ivReg));
447
448 MIR *rangeCheckMIR = (MIR *)dvmCompilerNew(sizeof(MIR), true);
449 rangeCheckMIR->dalvikInsn.opcode = (loopAnalysis->isCountUpLoop) ?
450 (Opcode)kMirOpNullNRangeUpCheck : (Opcode)kMirOpNullNRangeDownCheck;
451 rangeCheckMIR->dalvikInsn.vA = arrayReg;
452 rangeCheckMIR->dalvikInsn.vB = idxReg;
453 rangeCheckMIR->dalvikInsn.vC = loopAnalysis->endConditionReg;
454 rangeCheckMIR->dalvikInsn.arg[0] = arrayAccessInfo->maxC;
455 rangeCheckMIR->dalvikInsn.arg[1] = arrayAccessInfo->minC;
456 rangeCheckMIR->dalvikInsn.arg[2] = loopAnalysis->loopBranchOpcode;
457 dvmCompilerAppendMIR(entry, rangeCheckMIR);
458 if (arrayAccessInfo->maxC > globalMaxC) {
459 globalMaxC = arrayAccessInfo->maxC;
460 }
461 if (arrayAccessInfo->minC < globalMinC) {
462 globalMinC = arrayAccessInfo->minC;
463 }
464 }
465
466 if (loopAnalysis->arrayAccessInfo->numUsed != 0) {
467 if (loopAnalysis->isCountUpLoop) {
468 MIR *boundCheckMIR = (MIR *)dvmCompilerNew(sizeof(MIR), true);
469 boundCheckMIR->dalvikInsn.opcode = (Opcode)kMirOpLowerBound;
470 boundCheckMIR->dalvikInsn.vA = idxReg;
471 boundCheckMIR->dalvikInsn.vB = globalMinC;
472 dvmCompilerAppendMIR(entry, boundCheckMIR);
473 } else {
474 if (loopAnalysis->loopBranchOpcode == OP_IF_LT ||
475 loopAnalysis->loopBranchOpcode == OP_IF_LE) {
476 MIR *boundCheckMIR = (MIR *)dvmCompilerNew(sizeof(MIR), true);
477 boundCheckMIR->dalvikInsn.opcode = (Opcode)kMirOpLowerBound;
478 boundCheckMIR->dalvikInsn.vA = loopAnalysis->endConditionReg;
479 boundCheckMIR->dalvikInsn.vB = globalMinC;
480 /*
481 * If the end condition is ">" in the source, the check in the
482 * Dalvik bytecode is OP_IF_LE. In this case add 1 back to the
483 * constant field to reflect the fact that the smallest index
484 * value is "endValue + constant + 1".
485 */
486 if (loopAnalysis->loopBranchOpcode == OP_IF_LE) {
487 boundCheckMIR->dalvikInsn.vB++;
488 }
489 dvmCompilerAppendMIR(entry, boundCheckMIR);
490 } else if (loopAnalysis->loopBranchOpcode == OP_IF_LTZ) {
491 /* Array index will fall below 0 */
492 if (globalMinC < 0) {
493 MIR *boundCheckMIR = (MIR *)dvmCompilerNew(sizeof(MIR),
494 true);
495 boundCheckMIR->dalvikInsn.opcode = (Opcode)kMirOpPunt;
496 dvmCompilerAppendMIR(entry, boundCheckMIR);
497 }
498 } else if (loopAnalysis->loopBranchOpcode == OP_IF_LEZ) {
499 /* Array index will fall below 0 */
500 if (globalMinC < -1) {
501 MIR *boundCheckMIR = (MIR *)dvmCompilerNew(sizeof(MIR),
502 true);
503 boundCheckMIR->dalvikInsn.opcode = (Opcode)kMirOpPunt;
504 dvmCompilerAppendMIR(entry, boundCheckMIR);
505 }
506 } else {
507 ALOGE("Jit: bad case in genHoistedChecks");
508 dvmCompilerAbort(cUnit);
509 }
510 }
511 }
512 }
513
resetBlockEdges(BasicBlock * bb)514 void resetBlockEdges(BasicBlock *bb)
515 {
516 bb->taken = NULL;
517 bb->fallThrough = NULL;
518 bb->successorBlockList.blockListType = kNotUsed;
519 }
520
clearPredecessorVector(struct CompilationUnit * cUnit,struct BasicBlock * bb)521 static bool clearPredecessorVector(struct CompilationUnit *cUnit,
522 struct BasicBlock *bb)
523 {
524 dvmClearAllBits(bb->predecessors);
525 return false;
526 }
527
dvmCompilerFilterLoopBlocks(CompilationUnit * cUnit)528 bool dvmCompilerFilterLoopBlocks(CompilationUnit *cUnit)
529 {
530 BasicBlock *firstBB = cUnit->entryBlock->fallThrough;
531
532 int numPred = dvmCountSetBits(firstBB->predecessors);
533 /*
534 * A loop body should have at least two incoming edges.
535 */
536 if (numPred < 2) return false;
537
538 GrowableList *blockList = &cUnit->blockList;
539
540 /* Record blocks included in the loop */
541 dvmClearAllBits(cUnit->tempBlockV);
542
543 dvmCompilerSetBit(cUnit->tempBlockV, cUnit->entryBlock->id);
544 dvmCompilerSetBit(cUnit->tempBlockV, firstBB->id);
545
546 BasicBlock *bodyBB = firstBB;
547
548 /*
549 * First try to include the fall-through block in the loop, then the taken
550 * block. Stop loop formation on the first backward branch that enters the
551 * first block (ie only include the inner-most loop).
552 */
553 while (true) {
554 /* Loop formed */
555 if (bodyBB->taken == firstBB) {
556 /* Check if the fallThrough edge will cause a nested loop */
557 if (bodyBB->fallThrough &&
558 dvmIsBitSet(cUnit->tempBlockV, bodyBB->fallThrough->id)) {
559 return false;
560 }
561 /* Single loop formed */
562 break;
563 } else if (bodyBB->fallThrough == firstBB) {
564 /* Check if the taken edge will cause a nested loop */
565 if (bodyBB->taken &&
566 dvmIsBitSet(cUnit->tempBlockV, bodyBB->taken->id)) {
567 return false;
568 }
569 /* Single loop formed */
570 break;
571 }
572
573 /* Inner loops formed first - quit */
574 if (bodyBB->fallThrough &&
575 dvmIsBitSet(cUnit->tempBlockV, bodyBB->fallThrough->id)) {
576 return false;
577 }
578 if (bodyBB->taken &&
579 dvmIsBitSet(cUnit->tempBlockV, bodyBB->taken->id)) {
580 return false;
581 }
582
583 if (bodyBB->fallThrough) {
584 if (bodyBB->fallThrough->iDom == bodyBB) {
585 bodyBB = bodyBB->fallThrough;
586 dvmCompilerSetBit(cUnit->tempBlockV, bodyBB->id);
587 /*
588 * Loop formation to be detected at the beginning of next
589 * iteration.
590 */
591 continue;
592 }
593 }
594 if (bodyBB->taken) {
595 if (bodyBB->taken->iDom == bodyBB) {
596 bodyBB = bodyBB->taken;
597 dvmCompilerSetBit(cUnit->tempBlockV, bodyBB->id);
598 /*
599 * Loop formation to be detected at the beginning of next
600 * iteration.
601 */
602 continue;
603 }
604 }
605 /*
606 * Current block is not the immediate dominator of either fallthrough
607 * nor taken block - bail out of loop formation.
608 */
609 return false;
610 }
611
612
613 /* Now mark blocks not included in the loop as hidden */
614 GrowableListIterator iterator;
615 dvmGrowableListIteratorInit(blockList, &iterator);
616 while (true) {
617 BasicBlock *bb = (BasicBlock *) dvmGrowableListIteratorNext(&iterator);
618 if (bb == NULL) break;
619 if (!dvmIsBitSet(cUnit->tempBlockV, bb->id)) {
620 bb->hidden = true;
621 /* Clear the insn list */
622 bb->firstMIRInsn = bb->lastMIRInsn = NULL;
623 resetBlockEdges(bb);
624 }
625 }
626
627 dvmCompilerDataFlowAnalysisDispatcher(cUnit, clearPredecessorVector,
628 kAllNodes, false /* isIterative */);
629
630 dvmGrowableListIteratorInit(blockList, &iterator);
631 while (true) {
632 BasicBlock *bb = (BasicBlock *) dvmGrowableListIteratorNext(&iterator);
633 if (bb == NULL) break;
634 if (dvmIsBitSet(cUnit->tempBlockV, bb->id)) {
635 if (bb->taken) {
636 /*
637 * exit block means we run into control-flow that we don't want
638 * to handle.
639 */
640 if (bb->taken == cUnit->exitBlock) {
641 return false;
642 }
643 if (bb->taken->hidden) {
644 bb->taken->blockType = kChainingCellNormal;
645 bb->taken->hidden = false;
646 }
647 dvmCompilerSetBit(bb->taken->predecessors, bb->id);
648 }
649 if (bb->fallThrough) {
650 /*
651 * exit block means we run into control-flow that we don't want
652 * to handle.
653 */
654 if (bb->fallThrough == cUnit->exitBlock) {
655 return false;
656 }
657 if (bb->fallThrough->hidden) {
658 bb->fallThrough->blockType = kChainingCellNormal;
659 bb->fallThrough->hidden = false;
660 }
661 dvmCompilerSetBit(bb->fallThrough->predecessors, bb->id);
662 }
663 /* Loop blocks shouldn't contain any successor blocks (yet) */
664 assert(bb->successorBlockList.blockListType == kNotUsed);
665 }
666 }
667 return true;
668 }
669
670 /*
671 * Main entry point to do loop optimization.
672 * Return false if sanity checks for loop formation/optimization failed.
673 */
dvmCompilerLoopOpt(CompilationUnit * cUnit)674 bool dvmCompilerLoopOpt(CompilationUnit *cUnit)
675 {
676 LoopAnalysis *loopAnalysis =
677 (LoopAnalysis *)dvmCompilerNew(sizeof(LoopAnalysis), true);
678 cUnit->loopAnalysis = loopAnalysis;
679
680 /* Constant propagation */
681 cUnit->isConstantV = dvmCompilerAllocBitVector(cUnit->numSSARegs, false);
682 cUnit->constantValues =
683 (int *)dvmCompilerNew(sizeof(int) * cUnit->numSSARegs,
684 true);
685 dvmCompilerDataFlowAnalysisDispatcher(cUnit,
686 dvmCompilerDoConstantPropagation,
687 kAllNodes,
688 false /* isIterative */);
689 DEBUG_LOOP(dumpConstants(cUnit);)
690
691 /* Find induction variables - basic and dependent */
692 loopAnalysis->ivList =
693 (GrowableList *)dvmCompilerNew(sizeof(GrowableList), true);
694 dvmInitGrowableList(loopAnalysis->ivList, 4);
695 loopAnalysis->isIndVarV = dvmCompilerAllocBitVector(cUnit->numSSARegs, false);
696 dvmCompilerDataFlowAnalysisDispatcher(cUnit,
697 dvmCompilerFindInductionVariables,
698 kAllNodes,
699 false /* isIterative */);
700 DEBUG_LOOP(dumpIVList(cUnit);)
701
702 /* Only optimize array accesses for simple counted loop for now */
703 if (!isSimpleCountedLoop(cUnit))
704 return false;
705
706 loopAnalysis->arrayAccessInfo =
707 (GrowableList *)dvmCompilerNew(sizeof(GrowableList), true);
708 dvmInitGrowableList(loopAnalysis->arrayAccessInfo, 4);
709 loopAnalysis->bodyIsClean = doLoopBodyCodeMotion(cUnit);
710 DEBUG_LOOP(dumpHoistedChecks(cUnit);)
711
712 /*
713 * Convert the array access information into extended MIR code in the loop
714 * header.
715 */
716 genHoistedChecks(cUnit);
717 return true;
718 }
719
720 /*
721 * Select the target block of the backward branch.
722 */
dvmCompilerInsertBackwardChaining(CompilationUnit * cUnit)723 void dvmCompilerInsertBackwardChaining(CompilationUnit *cUnit)
724 {
725 /*
726 * If we are not in self-verification or profiling mode, the backward
727 * branch can go to the entryBlock->fallThrough directly. Suspend polling
728 * code will be generated along the backward branch to honor the suspend
729 * requests.
730 */
731 #ifndef ARCH_IA32
732 #if !defined(WITH_SELF_VERIFICATION)
733 if (gDvmJit.profileMode != kTraceProfilingContinuous &&
734 gDvmJit.profileMode != kTraceProfilingPeriodicOn) {
735 return;
736 }
737 #endif
738 #endif
739
740 /*
741 * In self-verification or profiling mode, the backward branch is altered
742 * to go to the backward chaining cell. Without using the backward chaining
743 * cell we won't be able to do check-pointing on the target PC, or count the
744 * number of iterations accurately.
745 */
746 BasicBlock *firstBB = cUnit->entryBlock->fallThrough;
747 BasicBlock *backBranchBB = findPredecessorBlock(cUnit, firstBB);
748 if (backBranchBB->taken == firstBB) {
749 backBranchBB->taken = cUnit->backChainBlock;
750 } else {
751 assert(backBranchBB->fallThrough == firstBB);
752 backBranchBB->fallThrough = cUnit->backChainBlock;
753 }
754 cUnit->backChainBlock->startOffset = firstBB->startOffset;
755 }
756