1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "compiler_internals.h"
18 #include "dataflow_iterator-inl.h"
19
20 #define NOTVISITED (-1)
21
22 namespace art {
23
ClearAllVisitedFlags()24 void MIRGraph::ClearAllVisitedFlags() {
25 AllNodesIterator iter(this, false /* not iterative */);
26 for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
27 bb->visited = false;
28 }
29 }
30
NeedsVisit(BasicBlock * bb)31 BasicBlock* MIRGraph::NeedsVisit(BasicBlock* bb) {
32 if (bb != NULL) {
33 if (bb->visited || bb->hidden) {
34 bb = NULL;
35 }
36 }
37 return bb;
38 }
39
NextUnvisitedSuccessor(BasicBlock * bb)40 BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb) {
41 BasicBlock* res = NeedsVisit(bb->fall_through);
42 if (res == NULL) {
43 res = NeedsVisit(bb->taken);
44 if (res == NULL) {
45 if (bb->successor_block_list.block_list_type != kNotUsed) {
46 GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_block_list.blocks);
47 while (true) {
48 SuccessorBlockInfo *sbi = iterator.Next();
49 if (sbi == NULL) {
50 break;
51 }
52 res = NeedsVisit(sbi->block);
53 if (res != NULL) {
54 break;
55 }
56 }
57 }
58 }
59 }
60 return res;
61 }
62
MarkPreOrder(BasicBlock * block)63 void MIRGraph::MarkPreOrder(BasicBlock* block) {
64 block->visited = true;
65 /* Enqueue the pre_order block id */
66 dfs_order_->Insert(block->id);
67 }
68
RecordDFSOrders(BasicBlock * block)69 void MIRGraph::RecordDFSOrders(BasicBlock* block) {
70 std::vector<BasicBlock*> succ;
71 MarkPreOrder(block);
72 succ.push_back(block);
73 while (!succ.empty()) {
74 BasicBlock* curr = succ.back();
75 BasicBlock* next_successor = NextUnvisitedSuccessor(curr);
76 if (next_successor != NULL) {
77 MarkPreOrder(next_successor);
78 succ.push_back(next_successor);
79 continue;
80 }
81 curr->dfs_id = dfs_post_order_->Size();
82 dfs_post_order_->Insert(curr->id);
83 succ.pop_back();
84 }
85 }
86
87 /* Sort the blocks by the Depth-First-Search */
ComputeDFSOrders()88 void MIRGraph::ComputeDFSOrders() {
89 /* Initialize or reset the DFS pre_order list */
90 if (dfs_order_ == NULL) {
91 dfs_order_ = new (arena_) GrowableArray<int>(arena_, GetNumBlocks(), kGrowableArrayDfsOrder);
92 } else {
93 /* Just reset the used length on the counter */
94 dfs_order_->Reset();
95 }
96
97 /* Initialize or reset the DFS post_order list */
98 if (dfs_post_order_ == NULL) {
99 dfs_post_order_ = new (arena_) GrowableArray<int>(arena_, GetNumBlocks(), kGrowableArrayDfsPostOrder);
100 } else {
101 /* Just reset the used length on the counter */
102 dfs_post_order_->Reset();
103 }
104
105 // Reset visited flags from all nodes
106 ClearAllVisitedFlags();
107
108 // Record dfs orders
109 RecordDFSOrders(GetEntryBlock());
110
111 num_reachable_blocks_ = dfs_order_->Size();
112 }
113
114 /*
115 * Mark block bit on the per-Dalvik register vector to denote that Dalvik
116 * register idx is defined in BasicBlock bb.
117 */
FillDefBlockMatrix(BasicBlock * bb)118 bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) {
119 if (bb->data_flow_info == NULL) {
120 return false;
121 }
122
123 ArenaBitVector::Iterator iterator(bb->data_flow_info->def_v);
124 while (true) {
125 int idx = iterator.Next();
126 if (idx == -1) {
127 break;
128 }
129 /* Block bb defines register idx */
130 def_block_matrix_[idx]->SetBit(bb->id);
131 }
132 return true;
133 }
134
ComputeDefBlockMatrix()135 void MIRGraph::ComputeDefBlockMatrix() {
136 int num_registers = cu_->num_dalvik_registers;
137 /* Allocate num_dalvik_registers bit vector pointers */
138 def_block_matrix_ = static_cast<ArenaBitVector**>
139 (arena_->Alloc(sizeof(ArenaBitVector *) * num_registers,
140 ArenaAllocator::kAllocDFInfo));
141 int i;
142
143 /* Initialize num_register vectors with num_blocks bits each */
144 for (i = 0; i < num_registers; i++) {
145 def_block_matrix_[i] =
146 new (arena_) ArenaBitVector(arena_, GetNumBlocks(), false, kBitMapBMatrix);
147 }
148 AllNodesIterator iter(this, false /* not iterative */);
149 for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
150 FindLocalLiveIn(bb);
151 }
152 AllNodesIterator iter2(this, false /* not iterative */);
153 for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
154 FillDefBlockMatrix(bb);
155 }
156
157 /*
158 * Also set the incoming parameters as defs in the entry block.
159 * Only need to handle the parameters for the outer method.
160 */
161 int num_regs = cu_->num_dalvik_registers;
162 int in_reg = num_regs - cu_->num_ins;
163 for (; in_reg < num_regs; in_reg++) {
164 def_block_matrix_[in_reg]->SetBit(GetEntryBlock()->id);
165 }
166 }
167
ComputeDomPostOrderTraversal(BasicBlock * bb)168 void MIRGraph::ComputeDomPostOrderTraversal(BasicBlock* bb) {
169 if (dom_post_order_traversal_ == NULL) {
170 // First time - create the array.
171 dom_post_order_traversal_ =
172 new (arena_) GrowableArray<int>(arena_, num_reachable_blocks_,
173 kGrowableArrayDomPostOrderTraversal);
174 } else {
175 dom_post_order_traversal_->Reset();
176 }
177 ClearAllVisitedFlags();
178 std::vector<std::pair<BasicBlock*, ArenaBitVector::Iterator*> > work_stack;
179 bb->visited = true;
180 work_stack.push_back(std::make_pair(bb, new (arena_) ArenaBitVector::Iterator(bb->i_dominated)));
181 while (!work_stack.empty()) {
182 std::pair<BasicBlock*, ArenaBitVector::Iterator*> curr = work_stack.back();
183 BasicBlock* curr_bb = curr.first;
184 ArenaBitVector::Iterator* curr_idom_iter = curr.second;
185 int bb_idx = curr_idom_iter->Next();
186 while ((bb_idx != -1) && (NeedsVisit(GetBasicBlock(bb_idx)) == NULL)) {
187 bb_idx = curr_idom_iter->Next();
188 }
189 if (bb_idx != -1) {
190 BasicBlock* new_bb = GetBasicBlock(bb_idx);
191 new_bb->visited = true;
192 work_stack.push_back(
193 std::make_pair(new_bb, new (arena_) ArenaBitVector::Iterator(new_bb->i_dominated)));
194 } else {
195 // no successor/next
196 dom_post_order_traversal_->Insert(curr_bb->id);
197 work_stack.pop_back();
198
199 /* hacky loop detection */
200 if (curr_bb->taken && curr_bb->dominators->IsBitSet(curr_bb->taken->id)) {
201 attributes_ |= METHOD_HAS_LOOP;
202 }
203 }
204 }
205 }
206
CheckForDominanceFrontier(BasicBlock * dom_bb,const BasicBlock * succ_bb)207 void MIRGraph::CheckForDominanceFrontier(BasicBlock* dom_bb,
208 const BasicBlock* succ_bb) {
209 /*
210 * TODO - evaluate whether phi will ever need to be inserted into exit
211 * blocks.
212 */
213 if (succ_bb->i_dom != dom_bb &&
214 succ_bb->block_type == kDalvikByteCode &&
215 succ_bb->hidden == false) {
216 dom_bb->dom_frontier->SetBit(succ_bb->id);
217 }
218 }
219
220 /* Worker function to compute the dominance frontier */
ComputeDominanceFrontier(BasicBlock * bb)221 bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) {
222 /* Calculate DF_local */
223 if (bb->taken) {
224 CheckForDominanceFrontier(bb, bb->taken);
225 }
226 if (bb->fall_through) {
227 CheckForDominanceFrontier(bb, bb->fall_through);
228 }
229 if (bb->successor_block_list.block_list_type != kNotUsed) {
230 GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_block_list.blocks);
231 while (true) {
232 SuccessorBlockInfo *successor_block_info = iterator.Next();
233 if (successor_block_info == NULL) {
234 break;
235 }
236 BasicBlock* succ_bb = successor_block_info->block;
237 CheckForDominanceFrontier(bb, succ_bb);
238 }
239 }
240
241 /* Calculate DF_up */
242 ArenaBitVector::Iterator bv_iterator(bb->i_dominated);
243 while (true) {
244 // TUNING: hot call to BitVectorIteratorNext
245 int dominated_idx = bv_iterator.Next();
246 if (dominated_idx == -1) {
247 break;
248 }
249 BasicBlock* dominated_bb = GetBasicBlock(dominated_idx);
250 ArenaBitVector::Iterator df_iterator(dominated_bb->dom_frontier);
251 while (true) {
252 // TUNING: hot call to BitVectorIteratorNext
253 int df_up_idx = df_iterator.Next();
254 if (df_up_idx == -1) {
255 break;
256 }
257 BasicBlock* df_up_block = GetBasicBlock(df_up_idx);
258 CheckForDominanceFrontier(bb, df_up_block);
259 }
260 }
261
262 return true;
263 }
264
265 /* Worker function for initializing domination-related data structures */
InitializeDominationInfo(BasicBlock * bb)266 void MIRGraph::InitializeDominationInfo(BasicBlock* bb) {
267 int num_total_blocks = GetBasicBlockListCount();
268
269 if (bb->dominators == NULL) {
270 bb->dominators = new (arena_) ArenaBitVector(arena_, num_total_blocks,
271 false /* expandable */, kBitMapDominators);
272 bb->i_dominated = new (arena_) ArenaBitVector(arena_, num_total_blocks,
273 false /* expandable */, kBitMapIDominated);
274 bb->dom_frontier = new (arena_) ArenaBitVector(arena_, num_total_blocks,
275 false /* expandable */, kBitMapDomFrontier);
276 } else {
277 bb->dominators->ClearAllBits();
278 bb->i_dominated->ClearAllBits();
279 bb->dom_frontier->ClearAllBits();
280 }
281 /* Set all bits in the dominator vector */
282 bb->dominators->SetInitialBits(num_total_blocks);
283
284 return;
285 }
286
287 /*
288 * Walk through the ordered i_dom list until we reach common parent.
289 * Given the ordering of i_dom_list, this common parent represents the
290 * last element of the intersection of block1 and block2 dominators.
291 */
FindCommonParent(int block1,int block2)292 int MIRGraph::FindCommonParent(int block1, int block2) {
293 while (block1 != block2) {
294 while (block1 < block2) {
295 block1 = i_dom_list_[block1];
296 DCHECK_NE(block1, NOTVISITED);
297 }
298 while (block2 < block1) {
299 block2 = i_dom_list_[block2];
300 DCHECK_NE(block2, NOTVISITED);
301 }
302 }
303 return block1;
304 }
305
306 /* Worker function to compute each block's immediate dominator */
ComputeblockIDom(BasicBlock * bb)307 bool MIRGraph::ComputeblockIDom(BasicBlock* bb) {
308 /* Special-case entry block */
309 if (bb == GetEntryBlock()) {
310 return false;
311 }
312
313 /* Iterate through the predecessors */
314 GrowableArray<BasicBlock*>::Iterator iter(bb->predecessors);
315
316 /* Find the first processed predecessor */
317 int idom = -1;
318 while (true) {
319 BasicBlock* pred_bb = iter.Next();
320 CHECK(pred_bb != NULL);
321 if (i_dom_list_[pred_bb->dfs_id] != NOTVISITED) {
322 idom = pred_bb->dfs_id;
323 break;
324 }
325 }
326
327 /* Scan the rest of the predecessors */
328 while (true) {
329 BasicBlock* pred_bb = iter.Next();
330 if (!pred_bb) {
331 break;
332 }
333 if (i_dom_list_[pred_bb->dfs_id] == NOTVISITED) {
334 continue;
335 } else {
336 idom = FindCommonParent(pred_bb->dfs_id, idom);
337 }
338 }
339
340 DCHECK_NE(idom, NOTVISITED);
341
342 /* Did something change? */
343 if (i_dom_list_[bb->dfs_id] != idom) {
344 i_dom_list_[bb->dfs_id] = idom;
345 return true;
346 }
347 return false;
348 }
349
350 /* Worker function to compute each block's domintors */
ComputeBlockDominators(BasicBlock * bb)351 bool MIRGraph::ComputeBlockDominators(BasicBlock* bb) {
352 if (bb == GetEntryBlock()) {
353 bb->dominators->ClearAllBits();
354 } else {
355 bb->dominators->Copy(bb->i_dom->dominators);
356 }
357 bb->dominators->SetBit(bb->id);
358 return false;
359 }
360
SetDominators(BasicBlock * bb)361 bool MIRGraph::SetDominators(BasicBlock* bb) {
362 if (bb != GetEntryBlock()) {
363 int idom_dfs_idx = i_dom_list_[bb->dfs_id];
364 DCHECK_NE(idom_dfs_idx, NOTVISITED);
365 int i_dom_idx = dfs_post_order_->Get(idom_dfs_idx);
366 BasicBlock* i_dom = GetBasicBlock(i_dom_idx);
367 bb->i_dom = i_dom;
368 /* Add bb to the i_dominated set of the immediate dominator block */
369 i_dom->i_dominated->SetBit(bb->id);
370 }
371 return false;
372 }
373
374 /* Compute dominators, immediate dominator, and dominance fronter */
ComputeDominators()375 void MIRGraph::ComputeDominators() {
376 int num_reachable_blocks = num_reachable_blocks_;
377 int num_total_blocks = GetBasicBlockListCount();
378
379 /* Initialize domination-related data structures */
380 ReachableNodesIterator iter(this, false /* not iterative */);
381 for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
382 InitializeDominationInfo(bb);
383 }
384
385 /* Initalize & Clear i_dom_list */
386 if (i_dom_list_ == NULL) {
387 i_dom_list_ = static_cast<int*>(arena_->Alloc(sizeof(int) * num_reachable_blocks,
388 ArenaAllocator::kAllocDFInfo));
389 }
390 for (int i = 0; i < num_reachable_blocks; i++) {
391 i_dom_list_[i] = NOTVISITED;
392 }
393
394 /* For post-order, last block is entry block. Set its i_dom to istelf */
395 DCHECK_EQ(GetEntryBlock()->dfs_id, num_reachable_blocks-1);
396 i_dom_list_[GetEntryBlock()->dfs_id] = GetEntryBlock()->dfs_id;
397
398 /* Compute the immediate dominators */
399 ReversePostOrderDfsIterator iter2(this, true /* iterative */);
400 bool change = false;
401 for (BasicBlock* bb = iter2.Next(false); bb != NULL; bb = iter2.Next(change)) {
402 change = ComputeblockIDom(bb);
403 }
404
405 /* Set the dominator for the root node */
406 GetEntryBlock()->dominators->ClearAllBits();
407 GetEntryBlock()->dominators->SetBit(GetEntryBlock()->id);
408
409 if (temp_block_v_ == NULL) {
410 temp_block_v_ = new (arena_) ArenaBitVector(arena_, num_total_blocks,
411 false /* expandable */, kBitMapTmpBlockV);
412 } else {
413 temp_block_v_->ClearAllBits();
414 }
415 GetEntryBlock()->i_dom = NULL;
416
417 ReachableNodesIterator iter3(this, false /* not iterative */);
418 for (BasicBlock* bb = iter3.Next(); bb != NULL; bb = iter3.Next()) {
419 SetDominators(bb);
420 }
421
422 ReversePostOrderDfsIterator iter4(this, false /* not iterative */);
423 for (BasicBlock* bb = iter4.Next(); bb != NULL; bb = iter4.Next()) {
424 ComputeBlockDominators(bb);
425 }
426
427 // Compute the dominance frontier for each block.
428 ComputeDomPostOrderTraversal(GetEntryBlock());
429 PostOrderDOMIterator iter5(this, false /* not iterative */);
430 for (BasicBlock* bb = iter5.Next(); bb != NULL; bb = iter5.Next()) {
431 ComputeDominanceFrontier(bb);
432 }
433 }
434
435 /*
436 * Perform dest U= src1 ^ ~src2
437 * This is probably not general enough to be placed in BitVector.[ch].
438 */
ComputeSuccLineIn(ArenaBitVector * dest,const ArenaBitVector * src1,const ArenaBitVector * src2)439 void MIRGraph::ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1,
440 const ArenaBitVector* src2) {
441 if (dest->GetStorageSize() != src1->GetStorageSize() ||
442 dest->GetStorageSize() != src2->GetStorageSize() ||
443 dest->IsExpandable() != src1->IsExpandable() ||
444 dest->IsExpandable() != src2->IsExpandable()) {
445 LOG(FATAL) << "Incompatible set properties";
446 }
447
448 unsigned int idx;
449 for (idx = 0; idx < dest->GetStorageSize(); idx++) {
450 dest->GetRawStorage()[idx] |= src1->GetRawStorageWord(idx) & ~(src2->GetRawStorageWord(idx));
451 }
452 }
453
454 /*
455 * Iterate through all successor blocks and propagate up the live-in sets.
456 * The calculated result is used for phi-node pruning - where we only need to
457 * insert a phi node if the variable is live-in to the block.
458 */
ComputeBlockLiveIns(BasicBlock * bb)459 bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) {
460 ArenaBitVector* temp_dalvik_register_v = temp_dalvik_register_v_;
461
462 if (bb->data_flow_info == NULL) {
463 return false;
464 }
465 temp_dalvik_register_v->Copy(bb->data_flow_info->live_in_v);
466 if (bb->taken && bb->taken->data_flow_info)
467 ComputeSuccLineIn(temp_dalvik_register_v, bb->taken->data_flow_info->live_in_v,
468 bb->data_flow_info->def_v);
469 if (bb->fall_through && bb->fall_through->data_flow_info)
470 ComputeSuccLineIn(temp_dalvik_register_v, bb->fall_through->data_flow_info->live_in_v,
471 bb->data_flow_info->def_v);
472 if (bb->successor_block_list.block_list_type != kNotUsed) {
473 GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_block_list.blocks);
474 while (true) {
475 SuccessorBlockInfo *successor_block_info = iterator.Next();
476 if (successor_block_info == NULL) {
477 break;
478 }
479 BasicBlock* succ_bb = successor_block_info->block;
480 if (succ_bb->data_flow_info) {
481 ComputeSuccLineIn(temp_dalvik_register_v, succ_bb->data_flow_info->live_in_v,
482 bb->data_flow_info->def_v);
483 }
484 }
485 }
486 if (!temp_dalvik_register_v->Equal(bb->data_flow_info->live_in_v)) {
487 bb->data_flow_info->live_in_v->Copy(temp_dalvik_register_v);
488 return true;
489 }
490 return false;
491 }
492
493 /* Insert phi nodes to for each variable to the dominance frontiers */
InsertPhiNodes()494 void MIRGraph::InsertPhiNodes() {
495 int dalvik_reg;
496 ArenaBitVector* phi_blocks =
497 new (arena_) ArenaBitVector(arena_, GetNumBlocks(), false, kBitMapPhi);
498 ArenaBitVector* tmp_blocks =
499 new (arena_) ArenaBitVector(arena_, GetNumBlocks(), false, kBitMapTmpBlocks);
500 ArenaBitVector* input_blocks =
501 new (arena_) ArenaBitVector(arena_, GetNumBlocks(), false, kBitMapInputBlocks);
502
503 temp_dalvik_register_v_ =
504 new (arena_) ArenaBitVector(arena_, cu_->num_dalvik_registers, false, kBitMapRegisterV);
505
506 PostOrderDfsIterator iter(this, true /* iterative */);
507 bool change = false;
508 for (BasicBlock* bb = iter.Next(false); bb != NULL; bb = iter.Next(change)) {
509 change = ComputeBlockLiveIns(bb);
510 }
511
512 /* Iterate through each Dalvik register */
513 for (dalvik_reg = cu_->num_dalvik_registers - 1; dalvik_reg >= 0; dalvik_reg--) {
514 bool change;
515
516 input_blocks->Copy(def_block_matrix_[dalvik_reg]);
517 phi_blocks->ClearAllBits();
518
519 /* Calculate the phi blocks for each Dalvik register */
520 do {
521 change = false;
522 tmp_blocks->ClearAllBits();
523 ArenaBitVector::Iterator iterator(input_blocks);
524
525 while (true) {
526 int idx = iterator.Next();
527 if (idx == -1) {
528 break;
529 }
530 BasicBlock* def_bb = GetBasicBlock(idx);
531
532 /* Merge the dominance frontier to tmp_blocks */
533 // TUNING: hot call to Union().
534 if (def_bb->dom_frontier != NULL) {
535 tmp_blocks->Union(def_bb->dom_frontier);
536 }
537 }
538 if (!phi_blocks->Equal(tmp_blocks)) {
539 change = true;
540 phi_blocks->Copy(tmp_blocks);
541
542 /*
543 * Iterate through the original blocks plus the new ones in
544 * the dominance frontier.
545 */
546 input_blocks->Copy(phi_blocks);
547 input_blocks->Union(def_block_matrix_[dalvik_reg]);
548 }
549 } while (change);
550
551 /*
552 * Insert a phi node for dalvik_reg in the phi_blocks if the Dalvik
553 * register is in the live-in set.
554 */
555 ArenaBitVector::Iterator iterator(phi_blocks);
556 while (true) {
557 int idx = iterator.Next();
558 if (idx == -1) {
559 break;
560 }
561 BasicBlock* phi_bb = GetBasicBlock(idx);
562 /* Variable will be clobbered before being used - no need for phi */
563 if (!phi_bb->data_flow_info->live_in_v->IsBitSet(dalvik_reg)) {
564 continue;
565 }
566 MIR *phi =
567 static_cast<MIR*>(arena_->Alloc(sizeof(MIR), ArenaAllocator::kAllocDFInfo));
568 phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
569 phi->dalvikInsn.vA = dalvik_reg;
570 phi->offset = phi_bb->start_offset;
571 phi->m_unit_index = 0; // Arbitrarily assign all Phi nodes to outermost method.
572 PrependMIR(phi_bb, phi);
573 }
574 }
575 }
576
577 /*
578 * Worker function to insert phi-operands with latest SSA names from
579 * predecessor blocks
580 */
InsertPhiNodeOperands(BasicBlock * bb)581 bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) {
582 MIR *mir;
583 std::vector<int> uses;
584 std::vector<int> incoming_arc;
585
586 /* Phi nodes are at the beginning of each block */
587 for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
588 if (mir->dalvikInsn.opcode != static_cast<Instruction::Code>(kMirOpPhi))
589 return true;
590 int ssa_reg = mir->ssa_rep->defs[0];
591 DCHECK_GE(ssa_reg, 0); // Shouldn't see compiler temps here
592 int v_reg = SRegToVReg(ssa_reg);
593
594 uses.clear();
595 incoming_arc.clear();
596
597 /* Iterate through the predecessors */
598 GrowableArray<BasicBlock*>::Iterator iter(bb->predecessors);
599 while (true) {
600 BasicBlock* pred_bb = iter.Next();
601 if (!pred_bb) {
602 break;
603 }
604 int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map[v_reg];
605 uses.push_back(ssa_reg);
606 incoming_arc.push_back(pred_bb->id);
607 }
608
609 /* Count the number of SSA registers for a Dalvik register */
610 int num_uses = uses.size();
611 mir->ssa_rep->num_uses = num_uses;
612 mir->ssa_rep->uses =
613 static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses, ArenaAllocator::kAllocDFInfo));
614 mir->ssa_rep->fp_use =
615 static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses, ArenaAllocator::kAllocDFInfo));
616 int* incoming =
617 static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses, ArenaAllocator::kAllocDFInfo));
618 // TODO: Ugly, rework (but don't burden each MIR/LIR for Phi-only needs)
619 mir->dalvikInsn.vB = reinterpret_cast<uintptr_t>(incoming);
620
621 /* Set the uses array for the phi node */
622 int *use_ptr = mir->ssa_rep->uses;
623 for (int i = 0; i < num_uses; i++) {
624 *use_ptr++ = uses[i];
625 *incoming++ = incoming_arc[i];
626 }
627 }
628
629 return true;
630 }
631
DoDFSPreOrderSSARename(BasicBlock * block)632 void MIRGraph::DoDFSPreOrderSSARename(BasicBlock* block) {
633 if (block->visited || block->hidden) {
634 return;
635 }
636 block->visited = true;
637
638 /* Process this block */
639 DoSSAConversion(block);
640 int map_size = sizeof(int) * cu_->num_dalvik_registers;
641
642 /* Save SSA map snapshot */
643 int* saved_ssa_map =
644 static_cast<int*>(arena_->Alloc(map_size, ArenaAllocator::kAllocDalvikToSSAMap));
645 memcpy(saved_ssa_map, vreg_to_ssa_map_, map_size);
646
647 if (block->fall_through) {
648 DoDFSPreOrderSSARename(block->fall_through);
649 /* Restore SSA map snapshot */
650 memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
651 }
652 if (block->taken) {
653 DoDFSPreOrderSSARename(block->taken);
654 /* Restore SSA map snapshot */
655 memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
656 }
657 if (block->successor_block_list.block_list_type != kNotUsed) {
658 GrowableArray<SuccessorBlockInfo*>::Iterator iterator(block->successor_block_list.blocks);
659 while (true) {
660 SuccessorBlockInfo *successor_block_info = iterator.Next();
661 if (successor_block_info == NULL) {
662 break;
663 }
664 BasicBlock* succ_bb = successor_block_info->block;
665 DoDFSPreOrderSSARename(succ_bb);
666 /* Restore SSA map snapshot */
667 memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
668 }
669 }
670 vreg_to_ssa_map_ = saved_ssa_map;
671 return;
672 }
673
674 /* Perform SSA transformation for the whole method */
SSATransformation()675 void MIRGraph::SSATransformation() {
676 /* Compute the DFS order */
677 ComputeDFSOrders();
678
679 /* Compute the dominator info */
680 ComputeDominators();
681
682 /* Allocate data structures in preparation for SSA conversion */
683 CompilerInitializeSSAConversion();
684
685 /* Find out the "Dalvik reg def x block" relation */
686 ComputeDefBlockMatrix();
687
688 /* Insert phi nodes to dominance frontiers for all variables */
689 InsertPhiNodes();
690
691 /* Rename register names by local defs and phi nodes */
692 ClearAllVisitedFlags();
693 DoDFSPreOrderSSARename(GetEntryBlock());
694
695 /*
696 * Shared temp bit vector used by each block to count the number of defs
697 * from all the predecessor blocks.
698 */
699 temp_ssa_register_v_ =
700 new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false, kBitMapTempSSARegisterV);
701
702 /* Insert phi-operands with latest SSA names from predecessor blocks */
703 ReachableNodesIterator iter2(this, false /* not iterative */);
704 for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
705 InsertPhiNodeOperands(bb);
706 }
707
708 if (cu_->enable_debug & (1 << kDebugDumpCFG)) {
709 DumpCFG("/sdcard/3_post_ssa_cfg/", false);
710 }
711 if (cu_->enable_debug & (1 << kDebugVerifyDataflow)) {
712 VerifyDataflow();
713 }
714 }
715
716 } // namespace art
717