• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "base/stl_util.h"
18 #include "compiler_internals.h"
19 #include "dex_file-inl.h"
20 #include "leb128.h"
21 #include "mir_graph.h"
22 
23 namespace art {
24 
25 #define MAX_PATTERN_LEN 5
26 
27 struct CodePattern {
28   const Instruction::Code opcodes[MAX_PATTERN_LEN];
29   const SpecialCaseHandler handler_code;
30 };
31 
32 static const CodePattern special_patterns[] = {
33   {{Instruction::RETURN_VOID}, kNullMethod},
34   {{Instruction::CONST, Instruction::RETURN}, kConstFunction},
35   {{Instruction::CONST_4, Instruction::RETURN}, kConstFunction},
36   {{Instruction::CONST_4, Instruction::RETURN_OBJECT}, kConstFunction},
37   {{Instruction::CONST_16, Instruction::RETURN}, kConstFunction},
38   {{Instruction::IGET, Instruction:: RETURN}, kIGet},
39   {{Instruction::IGET_BOOLEAN, Instruction::RETURN}, kIGetBoolean},
40   {{Instruction::IGET_OBJECT, Instruction::RETURN_OBJECT}, kIGetObject},
41   {{Instruction::IGET_BYTE, Instruction::RETURN}, kIGetByte},
42   {{Instruction::IGET_CHAR, Instruction::RETURN}, kIGetChar},
43   {{Instruction::IGET_SHORT, Instruction::RETURN}, kIGetShort},
44   {{Instruction::IGET_WIDE, Instruction::RETURN_WIDE}, kIGetWide},
45   {{Instruction::IPUT, Instruction::RETURN_VOID}, kIPut},
46   {{Instruction::IPUT_BOOLEAN, Instruction::RETURN_VOID}, kIPutBoolean},
47   {{Instruction::IPUT_OBJECT, Instruction::RETURN_VOID}, kIPutObject},
48   {{Instruction::IPUT_BYTE, Instruction::RETURN_VOID}, kIPutByte},
49   {{Instruction::IPUT_CHAR, Instruction::RETURN_VOID}, kIPutChar},
50   {{Instruction::IPUT_SHORT, Instruction::RETURN_VOID}, kIPutShort},
51   {{Instruction::IPUT_WIDE, Instruction::RETURN_VOID}, kIPutWide},
52   {{Instruction::RETURN}, kIdentity},
53   {{Instruction::RETURN_OBJECT}, kIdentity},
54   {{Instruction::RETURN_WIDE}, kIdentity},
55 };
56 
57 const char* MIRGraph::extended_mir_op_names_[kMirOpLast - kMirOpFirst] = {
58   "Phi",
59   "Copy",
60   "FusedCmplFloat",
61   "FusedCmpgFloat",
62   "FusedCmplDouble",
63   "FusedCmpgDouble",
64   "FusedCmpLong",
65   "Nop",
66   "OpNullCheck",
67   "OpRangeCheck",
68   "OpDivZeroCheck",
69   "Check1",
70   "Check2",
71   "Select",
72 };
73 
MIRGraph(CompilationUnit * cu,ArenaAllocator * arena)74 MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
75     : reg_location_(NULL),
76       compiler_temps_(arena, 6, kGrowableArrayMisc),
77       cu_(cu),
78       ssa_base_vregs_(NULL),
79       ssa_subscripts_(NULL),
80       vreg_to_ssa_map_(NULL),
81       ssa_last_defs_(NULL),
82       is_constant_v_(NULL),
83       constant_values_(NULL),
84       use_counts_(arena, 256, kGrowableArrayMisc),
85       raw_use_counts_(arena, 256, kGrowableArrayMisc),
86       num_reachable_blocks_(0),
87       dfs_order_(NULL),
88       dfs_post_order_(NULL),
89       dom_post_order_traversal_(NULL),
90       i_dom_list_(NULL),
91       def_block_matrix_(NULL),
92       temp_block_v_(NULL),
93       temp_dalvik_register_v_(NULL),
94       temp_ssa_register_v_(NULL),
95       block_list_(arena, 100, kGrowableArrayBlockList),
96       try_block_addr_(NULL),
97       entry_block_(NULL),
98       exit_block_(NULL),
99       cur_block_(NULL),
100       num_blocks_(0),
101       current_code_item_(NULL),
102       current_method_(kInvalidEntry),
103       current_offset_(kInvalidEntry),
104       def_count_(0),
105       opcode_count_(NULL),
106       num_ssa_regs_(0),
107       method_sreg_(0),
108       attributes_(METHOD_IS_LEAF),  // Start with leaf assumption, change on encountering invoke.
109       checkstats_(NULL),
110       special_case_(kNoHandler),
111       arena_(arena) {
112   try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */);
113 }
114 
~MIRGraph()115 MIRGraph::~MIRGraph() {
116   STLDeleteElements(&m_units_);
117 }
118 
119 /*
120  * Parse an instruction, return the length of the instruction
121  */
ParseInsn(const uint16_t * code_ptr,DecodedInstruction * decoded_instruction)122 int MIRGraph::ParseInsn(const uint16_t* code_ptr, DecodedInstruction* decoded_instruction) {
123   const Instruction* instruction = Instruction::At(code_ptr);
124   *decoded_instruction = DecodedInstruction(instruction);
125 
126   return instruction->SizeInCodeUnits();
127 }
128 
129 
130 /* Split an existing block from the specified code offset into two */
SplitBlock(unsigned int code_offset,BasicBlock * orig_block,BasicBlock ** immed_pred_block_p)131 BasicBlock* MIRGraph::SplitBlock(unsigned int code_offset,
132                                  BasicBlock* orig_block, BasicBlock** immed_pred_block_p) {
133   MIR* insn = orig_block->first_mir_insn;
134   while (insn) {
135     if (insn->offset == code_offset) break;
136     insn = insn->next;
137   }
138   if (insn == NULL) {
139     LOG(FATAL) << "Break split failed";
140   }
141   BasicBlock *bottom_block = NewMemBB(kDalvikByteCode, num_blocks_++);
142   block_list_.Insert(bottom_block);
143 
144   bottom_block->start_offset = code_offset;
145   bottom_block->first_mir_insn = insn;
146   bottom_block->last_mir_insn = orig_block->last_mir_insn;
147 
148   /* If this block was terminated by a return, the flag needs to go with the bottom block */
149   bottom_block->terminated_by_return = orig_block->terminated_by_return;
150   orig_block->terminated_by_return = false;
151 
152   /* Add it to the quick lookup cache */
153   block_map_.Put(bottom_block->start_offset, bottom_block);
154 
155   /* Handle the taken path */
156   bottom_block->taken = orig_block->taken;
157   if (bottom_block->taken) {
158     orig_block->taken = NULL;
159     bottom_block->taken->predecessors->Delete(orig_block);
160     bottom_block->taken->predecessors->Insert(bottom_block);
161   }
162 
163   /* Handle the fallthrough path */
164   bottom_block->fall_through = orig_block->fall_through;
165   orig_block->fall_through = bottom_block;
166   bottom_block->predecessors->Insert(orig_block);
167   if (bottom_block->fall_through) {
168     bottom_block->fall_through->predecessors->Delete(orig_block);
169     bottom_block->fall_through->predecessors->Insert(bottom_block);
170   }
171 
172   /* Handle the successor list */
173   if (orig_block->successor_block_list.block_list_type != kNotUsed) {
174     bottom_block->successor_block_list = orig_block->successor_block_list;
175     orig_block->successor_block_list.block_list_type = kNotUsed;
176     GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bottom_block->successor_block_list.blocks);
177     while (true) {
178       SuccessorBlockInfo *successor_block_info = iterator.Next();
179       if (successor_block_info == NULL) break;
180       BasicBlock *bb = successor_block_info->block;
181       bb->predecessors->Delete(orig_block);
182       bb->predecessors->Insert(bottom_block);
183     }
184   }
185 
186   orig_block->last_mir_insn = insn->prev;
187 
188   insn->prev->next = NULL;
189   insn->prev = NULL;
190   /*
191    * Update the immediate predecessor block pointer so that outgoing edges
192    * can be applied to the proper block.
193    */
194   if (immed_pred_block_p) {
195     DCHECK_EQ(*immed_pred_block_p, orig_block);
196     *immed_pred_block_p = bottom_block;
197   }
198   return bottom_block;
199 }
200 
201 /*
202  * Given a code offset, find out the block that starts with it. If the offset
203  * is in the middle of an existing block, split it into two.  If immed_pred_block_p
204  * is not non-null and is the block being split, update *immed_pred_block_p to
205  * point to the bottom block so that outgoing edges can be set up properly
206  * (by the caller)
207  * Utilizes a map for fast lookup of the typical cases.
208  */
FindBlock(unsigned int code_offset,bool split,bool create,BasicBlock ** immed_pred_block_p)209 BasicBlock* MIRGraph::FindBlock(unsigned int code_offset, bool split, bool create,
210                                 BasicBlock** immed_pred_block_p) {
211   BasicBlock* bb;
212   unsigned int i;
213   SafeMap<unsigned int, BasicBlock*>::iterator it;
214 
215   it = block_map_.find(code_offset);
216   if (it != block_map_.end()) {
217     return it->second;
218   } else if (!create) {
219     return NULL;
220   }
221 
222   if (split) {
223     for (i = 0; i < block_list_.Size(); i++) {
224       bb = block_list_.Get(i);
225       if (bb->block_type != kDalvikByteCode) continue;
226       /* Check if a branch jumps into the middle of an existing block */
227       if ((code_offset > bb->start_offset) && (bb->last_mir_insn != NULL) &&
228           (code_offset <= bb->last_mir_insn->offset)) {
229         BasicBlock *new_bb = SplitBlock(code_offset, bb, bb == *immed_pred_block_p ?
230                                        immed_pred_block_p : NULL);
231         return new_bb;
232       }
233     }
234   }
235 
236   /* Create a new one */
237   bb = NewMemBB(kDalvikByteCode, num_blocks_++);
238   block_list_.Insert(bb);
239   bb->start_offset = code_offset;
240   block_map_.Put(bb->start_offset, bb);
241   return bb;
242 }
243 
244 /* Identify code range in try blocks and set up the empty catch blocks */
ProcessTryCatchBlocks()245 void MIRGraph::ProcessTryCatchBlocks() {
246   int tries_size = current_code_item_->tries_size_;
247   int offset;
248 
249   if (tries_size == 0) {
250     return;
251   }
252 
253   for (int i = 0; i < tries_size; i++) {
254     const DexFile::TryItem* pTry =
255         DexFile::GetTryItems(*current_code_item_, i);
256     int start_offset = pTry->start_addr_;
257     int end_offset = start_offset + pTry->insn_count_;
258     for (offset = start_offset; offset < end_offset; offset++) {
259       try_block_addr_->SetBit(offset);
260     }
261   }
262 
263   // Iterate over each of the handlers to enqueue the empty Catch blocks
264   const byte* handlers_ptr = DexFile::GetCatchHandlerData(*current_code_item_, 0);
265   uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
266   for (uint32_t idx = 0; idx < handlers_size; idx++) {
267     CatchHandlerIterator iterator(handlers_ptr);
268     for (; iterator.HasNext(); iterator.Next()) {
269       uint32_t address = iterator.GetHandlerAddress();
270       FindBlock(address, false /* split */, true /*create*/,
271                 /* immed_pred_block_p */ NULL);
272     }
273     handlers_ptr = iterator.EndDataPointer();
274   }
275 }
276 
277 /* Process instructions with the kBranch flag */
ProcessCanBranch(BasicBlock * cur_block,MIR * insn,int cur_offset,int width,int flags,const uint16_t * code_ptr,const uint16_t * code_end)278 BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
279                                        int flags, const uint16_t* code_ptr,
280                                        const uint16_t* code_end) {
281   int target = cur_offset;
282   switch (insn->dalvikInsn.opcode) {
283     case Instruction::GOTO:
284     case Instruction::GOTO_16:
285     case Instruction::GOTO_32:
286       target += insn->dalvikInsn.vA;
287       break;
288     case Instruction::IF_EQ:
289     case Instruction::IF_NE:
290     case Instruction::IF_LT:
291     case Instruction::IF_GE:
292     case Instruction::IF_GT:
293     case Instruction::IF_LE:
294       cur_block->conditional_branch = true;
295       target += insn->dalvikInsn.vC;
296       break;
297     case Instruction::IF_EQZ:
298     case Instruction::IF_NEZ:
299     case Instruction::IF_LTZ:
300     case Instruction::IF_GEZ:
301     case Instruction::IF_GTZ:
302     case Instruction::IF_LEZ:
303       cur_block->conditional_branch = true;
304       target += insn->dalvikInsn.vB;
305       break;
306     default:
307       LOG(FATAL) << "Unexpected opcode(" << insn->dalvikInsn.opcode << ") with kBranch set";
308   }
309   BasicBlock *taken_block = FindBlock(target, /* split */ true, /* create */ true,
310                                       /* immed_pred_block_p */ &cur_block);
311   cur_block->taken = taken_block;
312   taken_block->predecessors->Insert(cur_block);
313 
314   /* Always terminate the current block for conditional branches */
315   if (flags & Instruction::kContinue) {
316     BasicBlock *fallthrough_block = FindBlock(cur_offset +  width,
317                                              /*
318                                               * If the method is processed
319                                               * in sequential order from the
320                                               * beginning, we don't need to
321                                               * specify split for continue
322                                               * blocks. However, this
323                                               * routine can be called by
324                                               * compileLoop, which starts
325                                               * parsing the method from an
326                                               * arbitrary address in the
327                                               * method body.
328                                               */
329                                              true,
330                                              /* create */
331                                              true,
332                                              /* immed_pred_block_p */
333                                              &cur_block);
334     cur_block->fall_through = fallthrough_block;
335     fallthrough_block->predecessors->Insert(cur_block);
336   } else if (code_ptr < code_end) {
337     FindBlock(cur_offset + width, /* split */ false, /* create */ true,
338                 /* immed_pred_block_p */ NULL);
339   }
340   return cur_block;
341 }
342 
343 /* Process instructions with the kSwitch flag */
ProcessCanSwitch(BasicBlock * cur_block,MIR * insn,int cur_offset,int width,int flags)344 void MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
345                                 int flags) {
346   const uint16_t* switch_data =
347       reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset + insn->dalvikInsn.vB);
348   int size;
349   const int* keyTable;
350   const int* target_table;
351   int i;
352   int first_key;
353 
354   /*
355    * Packed switch data format:
356    *  ushort ident = 0x0100   magic value
357    *  ushort size             number of entries in the table
358    *  int first_key           first (and lowest) switch case value
359    *  int targets[size]       branch targets, relative to switch opcode
360    *
361    * Total size is (4+size*2) 16-bit code units.
362    */
363   if (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) {
364     DCHECK_EQ(static_cast<int>(switch_data[0]),
365               static_cast<int>(Instruction::kPackedSwitchSignature));
366     size = switch_data[1];
367     first_key = switch_data[2] | (switch_data[3] << 16);
368     target_table = reinterpret_cast<const int*>(&switch_data[4]);
369     keyTable = NULL;        // Make the compiler happy
370   /*
371    * Sparse switch data format:
372    *  ushort ident = 0x0200   magic value
373    *  ushort size             number of entries in the table; > 0
374    *  int keys[size]          keys, sorted low-to-high; 32-bit aligned
375    *  int targets[size]       branch targets, relative to switch opcode
376    *
377    * Total size is (2+size*4) 16-bit code units.
378    */
379   } else {
380     DCHECK_EQ(static_cast<int>(switch_data[0]),
381               static_cast<int>(Instruction::kSparseSwitchSignature));
382     size = switch_data[1];
383     keyTable = reinterpret_cast<const int*>(&switch_data[2]);
384     target_table = reinterpret_cast<const int*>(&switch_data[2 + size*2]);
385     first_key = 0;   // To make the compiler happy
386   }
387 
388   if (cur_block->successor_block_list.block_list_type != kNotUsed) {
389     LOG(FATAL) << "Successor block list already in use: "
390                << static_cast<int>(cur_block->successor_block_list.block_list_type);
391   }
392   cur_block->successor_block_list.block_list_type =
393       (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
394       kPackedSwitch : kSparseSwitch;
395   cur_block->successor_block_list.blocks =
396       new (arena_) GrowableArray<SuccessorBlockInfo*>(arena_, size, kGrowableArraySuccessorBlocks);
397 
398   for (i = 0; i < size; i++) {
399     BasicBlock *case_block = FindBlock(cur_offset + target_table[i], /* split */ true,
400                                       /* create */ true, /* immed_pred_block_p */ &cur_block);
401     SuccessorBlockInfo *successor_block_info =
402         static_cast<SuccessorBlockInfo*>(arena_->Alloc(sizeof(SuccessorBlockInfo),
403                                                        ArenaAllocator::kAllocSuccessor));
404     successor_block_info->block = case_block;
405     successor_block_info->key =
406         (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
407         first_key + i : keyTable[i];
408     cur_block->successor_block_list.blocks->Insert(successor_block_info);
409     case_block->predecessors->Insert(cur_block);
410   }
411 
412   /* Fall-through case */
413   BasicBlock* fallthrough_block = FindBlock(cur_offset +  width, /* split */ false,
414                                             /* create */ true, /* immed_pred_block_p */ NULL);
415   cur_block->fall_through = fallthrough_block;
416   fallthrough_block->predecessors->Insert(cur_block);
417 }
418 
419 /* Process instructions with the kThrow flag */
ProcessCanThrow(BasicBlock * cur_block,MIR * insn,int cur_offset,int width,int flags,ArenaBitVector * try_block_addr,const uint16_t * code_ptr,const uint16_t * code_end)420 BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
421                                       int flags, ArenaBitVector* try_block_addr,
422                                       const uint16_t* code_ptr, const uint16_t* code_end) {
423   bool in_try_block = try_block_addr->IsBitSet(cur_offset);
424 
425   /* In try block */
426   if (in_try_block) {
427     CatchHandlerIterator iterator(*current_code_item_, cur_offset);
428 
429     if (cur_block->successor_block_list.block_list_type != kNotUsed) {
430       LOG(INFO) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
431       LOG(FATAL) << "Successor block list already in use: "
432                  << static_cast<int>(cur_block->successor_block_list.block_list_type);
433     }
434 
435     cur_block->successor_block_list.block_list_type = kCatch;
436     cur_block->successor_block_list.blocks =
437         new (arena_) GrowableArray<SuccessorBlockInfo*>(arena_, 2, kGrowableArraySuccessorBlocks);
438 
439     for (; iterator.HasNext(); iterator.Next()) {
440       BasicBlock *catch_block = FindBlock(iterator.GetHandlerAddress(), false /* split*/,
441                                          false /* creat */, NULL  /* immed_pred_block_p */);
442       catch_block->catch_entry = true;
443       if (kIsDebugBuild) {
444         catches_.insert(catch_block->start_offset);
445       }
446       SuccessorBlockInfo *successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
447           (arena_->Alloc(sizeof(SuccessorBlockInfo), ArenaAllocator::kAllocSuccessor));
448       successor_block_info->block = catch_block;
449       successor_block_info->key = iterator.GetHandlerTypeIndex();
450       cur_block->successor_block_list.blocks->Insert(successor_block_info);
451       catch_block->predecessors->Insert(cur_block);
452     }
453   } else {
454     BasicBlock *eh_block = NewMemBB(kExceptionHandling, num_blocks_++);
455     cur_block->taken = eh_block;
456     block_list_.Insert(eh_block);
457     eh_block->start_offset = cur_offset;
458     eh_block->predecessors->Insert(cur_block);
459   }
460 
461   if (insn->dalvikInsn.opcode == Instruction::THROW) {
462     cur_block->explicit_throw = true;
463     if (code_ptr < code_end) {
464       // Force creation of new block following THROW via side-effect
465       FindBlock(cur_offset + width, /* split */ false, /* create */ true,
466                 /* immed_pred_block_p */ NULL);
467     }
468     if (!in_try_block) {
469        // Don't split a THROW that can't rethrow - we're done.
470       return cur_block;
471     }
472   }
473 
474   /*
475    * Split the potentially-throwing instruction into two parts.
476    * The first half will be a pseudo-op that captures the exception
477    * edges and terminates the basic block.  It always falls through.
478    * Then, create a new basic block that begins with the throwing instruction
479    * (minus exceptions).  Note: this new basic block must NOT be entered into
480    * the block_map.  If the potentially-throwing instruction is the target of a
481    * future branch, we need to find the check psuedo half.  The new
482    * basic block containing the work portion of the instruction should
483    * only be entered via fallthrough from the block containing the
484    * pseudo exception edge MIR.  Note also that this new block is
485    * not automatically terminated after the work portion, and may
486    * contain following instructions.
487    */
488   BasicBlock *new_block = NewMemBB(kDalvikByteCode, num_blocks_++);
489   block_list_.Insert(new_block);
490   new_block->start_offset = insn->offset;
491   cur_block->fall_through = new_block;
492   new_block->predecessors->Insert(cur_block);
493   MIR* new_insn = static_cast<MIR*>(arena_->Alloc(sizeof(MIR), ArenaAllocator::kAllocMIR));
494   *new_insn = *insn;
495   insn->dalvikInsn.opcode =
496       static_cast<Instruction::Code>(kMirOpCheck);
497   // Associate the two halves
498   insn->meta.throw_insn = new_insn;
499   new_insn->meta.throw_insn = insn;
500   AppendMIR(new_block, new_insn);
501   return new_block;
502 }
503 
504 /* Parse a Dex method and insert it into the MIRGraph at the current insert point. */
InlineMethod(const DexFile::CodeItem * code_item,uint32_t access_flags,InvokeType invoke_type,uint16_t class_def_idx,uint32_t method_idx,jobject class_loader,const DexFile & dex_file)505 void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
506                            InvokeType invoke_type, uint16_t class_def_idx,
507                            uint32_t method_idx, jobject class_loader, const DexFile& dex_file) {
508   current_code_item_ = code_item;
509   method_stack_.push_back(std::make_pair(current_method_, current_offset_));
510   current_method_ = m_units_.size();
511   current_offset_ = 0;
512   // TODO: will need to snapshot stack image and use that as the mir context identification.
513   m_units_.push_back(new DexCompilationUnit(cu_, class_loader, Runtime::Current()->GetClassLinker(),
514                      dex_file, current_code_item_, class_def_idx, method_idx, access_flags));
515   const uint16_t* code_ptr = current_code_item_->insns_;
516   const uint16_t* code_end =
517       current_code_item_->insns_ + current_code_item_->insns_size_in_code_units_;
518 
519   // TODO: need to rework expansion of block list & try_block_addr when inlining activated.
520   block_list_.Resize(block_list_.Size() + current_code_item_->insns_size_in_code_units_);
521   // TODO: replace with explicit resize routine.  Using automatic extension side effect for now.
522   try_block_addr_->SetBit(current_code_item_->insns_size_in_code_units_);
523   try_block_addr_->ClearBit(current_code_item_->insns_size_in_code_units_);
524 
525   // If this is the first method, set up default entry and exit blocks.
526   if (current_method_ == 0) {
527     DCHECK(entry_block_ == NULL);
528     DCHECK(exit_block_ == NULL);
529     DCHECK_EQ(num_blocks_, 0);
530     entry_block_ = NewMemBB(kEntryBlock, num_blocks_++);
531     exit_block_ = NewMemBB(kExitBlock, num_blocks_++);
532     block_list_.Insert(entry_block_);
533     block_list_.Insert(exit_block_);
534     // TODO: deprecate all "cu->" fields; move what's left to wherever CompilationUnit is allocated.
535     cu_->dex_file = &dex_file;
536     cu_->class_def_idx = class_def_idx;
537     cu_->method_idx = method_idx;
538     cu_->access_flags = access_flags;
539     cu_->invoke_type = invoke_type;
540     cu_->shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
541     cu_->num_ins = current_code_item_->ins_size_;
542     cu_->num_regs = current_code_item_->registers_size_ - cu_->num_ins;
543     cu_->num_outs = current_code_item_->outs_size_;
544     cu_->num_dalvik_registers = current_code_item_->registers_size_;
545     cu_->insns = current_code_item_->insns_;
546     cu_->code_item = current_code_item_;
547   } else {
548     UNIMPLEMENTED(FATAL) << "Nested inlining not implemented.";
549     /*
550      * Will need to manage storage for ins & outs, push prevous state and update
551      * insert point.
552      */
553   }
554 
555   /* Current block to record parsed instructions */
556   BasicBlock *cur_block = NewMemBB(kDalvikByteCode, num_blocks_++);
557   DCHECK_EQ(current_offset_, 0);
558   cur_block->start_offset = current_offset_;
559   block_list_.Insert(cur_block);
560   /* Add first block to the fast lookup cache */
561 // FIXME: block map needs association with offset/method pair rather than just offset
562   block_map_.Put(cur_block->start_offset, cur_block);
563 // FIXME: this needs to insert at the insert point rather than entry block.
564   entry_block_->fall_through = cur_block;
565   cur_block->predecessors->Insert(entry_block_);
566 
567     /* Identify code range in try blocks and set up the empty catch blocks */
568   ProcessTryCatchBlocks();
569 
570   /* Set up for simple method detection */
571   int num_patterns = sizeof(special_patterns)/sizeof(special_patterns[0]);
572   bool live_pattern = (num_patterns > 0) && !(cu_->disable_opt & (1 << kMatch));
573   bool* dead_pattern =
574       static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_patterns, ArenaAllocator::kAllocMisc));
575   int pattern_pos = 0;
576 
577   /* Parse all instructions and put them into containing basic blocks */
578   while (code_ptr < code_end) {
579     MIR *insn = static_cast<MIR *>(arena_->Alloc(sizeof(MIR), ArenaAllocator::kAllocMIR));
580     insn->offset = current_offset_;
581     insn->m_unit_index = current_method_;
582     int width = ParseInsn(code_ptr, &insn->dalvikInsn);
583     insn->width = width;
584     Instruction::Code opcode = insn->dalvikInsn.opcode;
585     if (opcode_count_ != NULL) {
586       opcode_count_[static_cast<int>(opcode)]++;
587     }
588 
589 
590     /* Possible simple method? */
591     if (live_pattern) {
592       live_pattern = false;
593       special_case_ = kNoHandler;
594       for (int i = 0; i < num_patterns; i++) {
595         if (!dead_pattern[i]) {
596           if (special_patterns[i].opcodes[pattern_pos] == opcode) {
597             live_pattern = true;
598             special_case_ = special_patterns[i].handler_code;
599           } else {
600              dead_pattern[i] = true;
601           }
602         }
603       }
604     pattern_pos++;
605     }
606 
607     int flags = Instruction::FlagsOf(insn->dalvikInsn.opcode);
608 
609     int df_flags = oat_data_flow_attributes_[insn->dalvikInsn.opcode];
610 
611     if (df_flags & DF_HAS_DEFS) {
612       def_count_ += (df_flags & DF_A_WIDE) ? 2 : 1;
613     }
614 
615     // Check for inline data block signatures
616     if (opcode == Instruction::NOP) {
617       // A simple NOP will have a width of 1 at this point, embedded data NOP > 1.
618       if ((width == 1) && ((current_offset_ & 0x1) == 0x1) && ((code_end - code_ptr) > 1)) {
619         // Could be an aligning nop.  If an embedded data NOP follows, treat pair as single unit.
620         uint16_t following_raw_instruction = code_ptr[1];
621         if ((following_raw_instruction == Instruction::kSparseSwitchSignature) ||
622             (following_raw_instruction == Instruction::kPackedSwitchSignature) ||
623             (following_raw_instruction == Instruction::kArrayDataSignature)) {
624           width += Instruction::At(code_ptr + 1)->SizeInCodeUnits();
625         }
626       }
627       if (width == 1) {
628         // It is a simple nop - treat normally.
629         AppendMIR(cur_block, insn);
630       } else {
631         DCHECK(cur_block->fall_through == NULL);
632         DCHECK(cur_block->taken == NULL);
633         // Unreachable instruction, mark for no continuation.
634         flags &= ~Instruction::kContinue;
635       }
636     } else {
637       AppendMIR(cur_block, insn);
638     }
639 
640     code_ptr += width;
641 
642     if (flags & Instruction::kBranch) {
643       cur_block = ProcessCanBranch(cur_block, insn, current_offset_,
644                                    width, flags, code_ptr, code_end);
645     } else if (flags & Instruction::kReturn) {
646       cur_block->terminated_by_return = true;
647       cur_block->fall_through = exit_block_;
648       exit_block_->predecessors->Insert(cur_block);
649       /*
650        * Terminate the current block if there are instructions
651        * afterwards.
652        */
653       if (code_ptr < code_end) {
654         /*
655          * Create a fallthrough block for real instructions
656          * (incl. NOP).
657          */
658          FindBlock(current_offset_ + width, /* split */ false, /* create */ true,
659                    /* immed_pred_block_p */ NULL);
660       }
661     } else if (flags & Instruction::kThrow) {
662       cur_block = ProcessCanThrow(cur_block, insn, current_offset_, width, flags, try_block_addr_,
663                                   code_ptr, code_end);
664     } else if (flags & Instruction::kSwitch) {
665       ProcessCanSwitch(cur_block, insn, current_offset_, width, flags);
666     }
667     current_offset_ += width;
668     BasicBlock *next_block = FindBlock(current_offset_, /* split */ false, /* create */
669                                       false, /* immed_pred_block_p */ NULL);
670     if (next_block) {
671       /*
672        * The next instruction could be the target of a previously parsed
673        * forward branch so a block is already created. If the current
674        * instruction is not an unconditional branch, connect them through
675        * the fall-through link.
676        */
677       DCHECK(cur_block->fall_through == NULL ||
678              cur_block->fall_through == next_block ||
679              cur_block->fall_through == exit_block_);
680 
681       if ((cur_block->fall_through == NULL) && (flags & Instruction::kContinue)) {
682         cur_block->fall_through = next_block;
683         next_block->predecessors->Insert(cur_block);
684       }
685       cur_block = next_block;
686     }
687   }
688   if (cu_->enable_debug & (1 << kDebugDumpCFG)) {
689     DumpCFG("/sdcard/1_post_parse_cfg/", true);
690   }
691 
692   if (cu_->verbose) {
693     DumpMIRGraph();
694   }
695 }
696 
ShowOpcodeStats()697 void MIRGraph::ShowOpcodeStats() {
698   DCHECK(opcode_count_ != NULL);
699   LOG(INFO) << "Opcode Count";
700   for (int i = 0; i < kNumPackedOpcodes; i++) {
701     if (opcode_count_[i] != 0) {
702       LOG(INFO) << "-C- " << Instruction::Name(static_cast<Instruction::Code>(i))
703                 << " " << opcode_count_[i];
704     }
705   }
706 }
707 
708 // TODO: use a configurable base prefix, and adjust callers to supply pass name.
709 /* Dump the CFG into a DOT graph */
DumpCFG(const char * dir_prefix,bool all_blocks)710 void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks) {
711   FILE* file;
712   std::string fname(PrettyMethod(cu_->method_idx, *cu_->dex_file));
713   ReplaceSpecialChars(fname);
714   fname = StringPrintf("%s%s%x.dot", dir_prefix, fname.c_str(),
715                       GetEntryBlock()->fall_through->start_offset);
716   file = fopen(fname.c_str(), "w");
717   if (file == NULL) {
718     return;
719   }
720   fprintf(file, "digraph G {\n");
721 
722   fprintf(file, "  rankdir=TB\n");
723 
724   int num_blocks = all_blocks ? GetNumBlocks() : num_reachable_blocks_;
725   int idx;
726 
727   for (idx = 0; idx < num_blocks; idx++) {
728     int block_idx = all_blocks ? idx : dfs_order_->Get(idx);
729     BasicBlock *bb = GetBasicBlock(block_idx);
730     if (bb == NULL) break;
731     if (bb->block_type == kDead) continue;
732     if (bb->block_type == kEntryBlock) {
733       fprintf(file, "  entry_%d [shape=Mdiamond];\n", bb->id);
734     } else if (bb->block_type == kExitBlock) {
735       fprintf(file, "  exit_%d [shape=Mdiamond];\n", bb->id);
736     } else if (bb->block_type == kDalvikByteCode) {
737       fprintf(file, "  block%04x_%d [shape=record,label = \"{ \\\n",
738               bb->start_offset, bb->id);
739       const MIR *mir;
740         fprintf(file, "    {block id %d\\l}%s\\\n", bb->id,
741                 bb->first_mir_insn ? " | " : " ");
742         for (mir = bb->first_mir_insn; mir; mir = mir->next) {
743             int opcode = mir->dalvikInsn.opcode;
744             fprintf(file, "    {%04x %s %s %s\\l}%s\\\n", mir->offset,
745                     mir->ssa_rep ? GetDalvikDisassembly(mir) :
746                     (opcode < kMirOpFirst) ?  Instruction::Name(mir->dalvikInsn.opcode) :
747                     extended_mir_op_names_[opcode - kMirOpFirst],
748                     (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0 ? " no_rangecheck" : " ",
749                     (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0 ? " no_nullcheck" : " ",
750                     mir->next ? " | " : " ");
751         }
752         fprintf(file, "  }\"];\n\n");
753     } else if (bb->block_type == kExceptionHandling) {
754       char block_name[BLOCK_NAME_LEN];
755 
756       GetBlockName(bb, block_name);
757       fprintf(file, "  %s [shape=invhouse];\n", block_name);
758     }
759 
760     char block_name1[BLOCK_NAME_LEN], block_name2[BLOCK_NAME_LEN];
761 
762     if (bb->taken) {
763       GetBlockName(bb, block_name1);
764       GetBlockName(bb->taken, block_name2);
765       fprintf(file, "  %s:s -> %s:n [style=dotted]\n",
766               block_name1, block_name2);
767     }
768     if (bb->fall_through) {
769       GetBlockName(bb, block_name1);
770       GetBlockName(bb->fall_through, block_name2);
771       fprintf(file, "  %s:s -> %s:n\n", block_name1, block_name2);
772     }
773 
774     if (bb->successor_block_list.block_list_type != kNotUsed) {
775       fprintf(file, "  succ%04x_%d [shape=%s,label = \"{ \\\n",
776               bb->start_offset, bb->id,
777               (bb->successor_block_list.block_list_type == kCatch) ?
778                "Mrecord" : "record");
779       GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_block_list.blocks);
780       SuccessorBlockInfo *successor_block_info = iterator.Next();
781 
782       int succ_id = 0;
783       while (true) {
784         if (successor_block_info == NULL) break;
785 
786         BasicBlock *dest_block = successor_block_info->block;
787         SuccessorBlockInfo *next_successor_block_info = iterator.Next();
788 
789         fprintf(file, "    {<f%d> %04x: %04x\\l}%s\\\n",
790                 succ_id++,
791                 successor_block_info->key,
792                 dest_block->start_offset,
793                 (next_successor_block_info != NULL) ? " | " : " ");
794 
795         successor_block_info = next_successor_block_info;
796       }
797       fprintf(file, "  }\"];\n\n");
798 
799       GetBlockName(bb, block_name1);
800       fprintf(file, "  %s:s -> succ%04x_%d:n [style=dashed]\n",
801               block_name1, bb->start_offset, bb->id);
802 
803       if (bb->successor_block_list.block_list_type == kPackedSwitch ||
804           bb->successor_block_list.block_list_type == kSparseSwitch) {
805         GrowableArray<SuccessorBlockInfo*>::Iterator iter(bb->successor_block_list.blocks);
806 
807         succ_id = 0;
808         while (true) {
809           SuccessorBlockInfo *successor_block_info = iter.Next();
810           if (successor_block_info == NULL) break;
811 
812           BasicBlock *dest_block = successor_block_info->block;
813 
814           GetBlockName(dest_block, block_name2);
815           fprintf(file, "  succ%04x_%d:f%d:e -> %s:n\n", bb->start_offset,
816                   bb->id, succ_id++, block_name2);
817         }
818       }
819     }
820     fprintf(file, "\n");
821 
822     if (cu_->verbose) {
823       /* Display the dominator tree */
824       GetBlockName(bb, block_name1);
825       fprintf(file, "  cfg%s [label=\"%s\", shape=none];\n",
826               block_name1, block_name1);
827       if (bb->i_dom) {
828         GetBlockName(bb->i_dom, block_name2);
829         fprintf(file, "  cfg%s:s -> cfg%s:n\n\n", block_name2, block_name1);
830       }
831     }
832   }
833   fprintf(file, "}\n");
834   fclose(file);
835 }
836 
837 /* Insert an MIR instruction to the end of a basic block */
AppendMIR(BasicBlock * bb,MIR * mir)838 void MIRGraph::AppendMIR(BasicBlock* bb, MIR* mir) {
839   if (bb->first_mir_insn == NULL) {
840     DCHECK(bb->last_mir_insn == NULL);
841     bb->last_mir_insn = bb->first_mir_insn = mir;
842     mir->prev = mir->next = NULL;
843   } else {
844     bb->last_mir_insn->next = mir;
845     mir->prev = bb->last_mir_insn;
846     mir->next = NULL;
847     bb->last_mir_insn = mir;
848   }
849 }
850 
851 /* Insert an MIR instruction to the head of a basic block */
PrependMIR(BasicBlock * bb,MIR * mir)852 void MIRGraph::PrependMIR(BasicBlock* bb, MIR* mir) {
853   if (bb->first_mir_insn == NULL) {
854     DCHECK(bb->last_mir_insn == NULL);
855     bb->last_mir_insn = bb->first_mir_insn = mir;
856     mir->prev = mir->next = NULL;
857   } else {
858     bb->first_mir_insn->prev = mir;
859     mir->next = bb->first_mir_insn;
860     mir->prev = NULL;
861     bb->first_mir_insn = mir;
862   }
863 }
864 
865 /* Insert a MIR instruction after the specified MIR */
InsertMIRAfter(BasicBlock * bb,MIR * current_mir,MIR * new_mir)866 void MIRGraph::InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir) {
867   new_mir->prev = current_mir;
868   new_mir->next = current_mir->next;
869   current_mir->next = new_mir;
870 
871   if (new_mir->next) {
872     /* Is not the last MIR in the block */
873     new_mir->next->prev = new_mir;
874   } else {
875     /* Is the last MIR in the block */
876     bb->last_mir_insn = new_mir;
877   }
878 }
879 
GetDalvikDisassembly(const MIR * mir)880 char* MIRGraph::GetDalvikDisassembly(const MIR* mir) {
881   DecodedInstruction insn = mir->dalvikInsn;
882   std::string str;
883   int flags = 0;
884   int opcode = insn.opcode;
885   char* ret;
886   bool nop = false;
887   SSARepresentation* ssa_rep = mir->ssa_rep;
888   Instruction::Format dalvik_format = Instruction::k10x;  // Default to no-operand format
889   int defs = (ssa_rep != NULL) ? ssa_rep->num_defs : 0;
890   int uses = (ssa_rep != NULL) ? ssa_rep->num_uses : 0;
891 
892   // Handle special cases.
893   if ((opcode == kMirOpCheck) || (opcode == kMirOpCheckPart2)) {
894     str.append(extended_mir_op_names_[opcode - kMirOpFirst]);
895     str.append(": ");
896     // Recover the original Dex instruction
897     insn = mir->meta.throw_insn->dalvikInsn;
898     ssa_rep = mir->meta.throw_insn->ssa_rep;
899     defs = ssa_rep->num_defs;
900     uses = ssa_rep->num_uses;
901     opcode = insn.opcode;
902   } else if (opcode == kMirOpNop) {
903     str.append("[");
904     insn.opcode = mir->meta.original_opcode;
905     opcode = mir->meta.original_opcode;
906     nop = true;
907   }
908 
909   if (opcode >= kMirOpFirst) {
910     str.append(extended_mir_op_names_[opcode - kMirOpFirst]);
911   } else {
912     dalvik_format = Instruction::FormatOf(insn.opcode);
913     flags = Instruction::FlagsOf(insn.opcode);
914     str.append(Instruction::Name(insn.opcode));
915   }
916 
917   if (opcode == kMirOpPhi) {
918     int* incoming = reinterpret_cast<int*>(insn.vB);
919     str.append(StringPrintf(" %s = (%s",
920                GetSSANameWithConst(ssa_rep->defs[0], true).c_str(),
921                GetSSANameWithConst(ssa_rep->uses[0], true).c_str()));
922     str.append(StringPrintf(":%d", incoming[0]));
923     int i;
924     for (i = 1; i < uses; i++) {
925       str.append(StringPrintf(", %s:%d",
926                               GetSSANameWithConst(ssa_rep->uses[i], true).c_str(),
927                               incoming[i]));
928     }
929     str.append(")");
930   } else if ((flags & Instruction::kBranch) != 0) {
931     // For branches, decode the instructions to print out the branch targets.
932     int offset = 0;
933     switch (dalvik_format) {
934       case Instruction::k21t:
935         str.append(StringPrintf(" %s,", GetSSANameWithConst(ssa_rep->uses[0], false).c_str()));
936         offset = insn.vB;
937         break;
938       case Instruction::k22t:
939         str.append(StringPrintf(" %s, %s,", GetSSANameWithConst(ssa_rep->uses[0], false).c_str(),
940                    GetSSANameWithConst(ssa_rep->uses[1], false).c_str()));
941         offset = insn.vC;
942         break;
943       case Instruction::k10t:
944       case Instruction::k20t:
945       case Instruction::k30t:
946         offset = insn.vA;
947         break;
948       default:
949         LOG(FATAL) << "Unexpected branch format " << dalvik_format << " from " << insn.opcode;
950     }
951     str.append(StringPrintf(" 0x%x (%c%x)", mir->offset + offset,
952                             offset > 0 ? '+' : '-', offset > 0 ? offset : -offset));
953   } else {
954     // For invokes-style formats, treat wide regs as a pair of singles
955     bool show_singles = ((dalvik_format == Instruction::k35c) ||
956                          (dalvik_format == Instruction::k3rc));
957     if (defs != 0) {
958       str.append(StringPrintf(" %s", GetSSANameWithConst(ssa_rep->defs[0], false).c_str()));
959       if (uses != 0) {
960         str.append(", ");
961       }
962     }
963     for (int i = 0; i < uses; i++) {
964       str.append(
965           StringPrintf(" %s", GetSSANameWithConst(ssa_rep->uses[i], show_singles).c_str()));
966       if (!show_singles && (reg_location_ != NULL) && reg_location_[i].wide) {
967         // For the listing, skip the high sreg.
968         i++;
969       }
970       if (i != (uses -1)) {
971         str.append(",");
972       }
973     }
974     switch (dalvik_format) {
975       case Instruction::k11n:  // Add one immediate from vB
976       case Instruction::k21s:
977       case Instruction::k31i:
978       case Instruction::k21h:
979         str.append(StringPrintf(", #%d", insn.vB));
980         break;
981       case Instruction::k51l:  // Add one wide immediate
982         str.append(StringPrintf(", #%lld", insn.vB_wide));
983         break;
984       case Instruction::k21c:  // One register, one string/type/method index
985       case Instruction::k31c:
986         str.append(StringPrintf(", index #%d", insn.vB));
987         break;
988       case Instruction::k22c:  // Two registers, one string/type/method index
989         str.append(StringPrintf(", index #%d", insn.vC));
990         break;
991       case Instruction::k22s:  // Add one immediate from vC
992       case Instruction::k22b:
993         str.append(StringPrintf(", #%d", insn.vC));
994         break;
995       default: {
996         // Nothing left to print
997       }
998     }
999   }
1000   if (nop) {
1001     str.append("]--optimized away");
1002   }
1003   int length = str.length() + 1;
1004   ret = static_cast<char*>(arena_->Alloc(length, ArenaAllocator::kAllocDFInfo));
1005   strncpy(ret, str.c_str(), length);
1006   return ret;
1007 }
1008 
1009 /* Turn method name into a legal Linux file name */
ReplaceSpecialChars(std::string & str)1010 void MIRGraph::ReplaceSpecialChars(std::string& str) {
1011   static const struct { const char before; const char after; } match[] = {
1012     {'/', '-'}, {';', '#'}, {' ', '#'}, {'$', '+'},
1013     {'(', '@'}, {')', '@'}, {'<', '='}, {'>', '='}
1014   };
1015   for (unsigned int i = 0; i < sizeof(match)/sizeof(match[0]); i++) {
1016     std::replace(str.begin(), str.end(), match[i].before, match[i].after);
1017   }
1018 }
1019 
GetSSAName(int ssa_reg)1020 std::string MIRGraph::GetSSAName(int ssa_reg) {
1021   // TODO: This value is needed for LLVM and debugging. Currently, we compute this and then copy to
1022   //       the arena. We should be smarter and just place straight into the arena, or compute the
1023   //       value more lazily.
1024   return StringPrintf("v%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg));
1025 }
1026 
1027 // Similar to GetSSAName, but if ssa name represents an immediate show that as well.
GetSSANameWithConst(int ssa_reg,bool singles_only)1028 std::string MIRGraph::GetSSANameWithConst(int ssa_reg, bool singles_only) {
1029   if (reg_location_ == NULL) {
1030     // Pre-SSA - just use the standard name
1031     return GetSSAName(ssa_reg);
1032   }
1033   if (IsConst(reg_location_[ssa_reg])) {
1034     if (!singles_only && reg_location_[ssa_reg].wide) {
1035       return StringPrintf("v%d_%d#0x%llx", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg),
1036                           ConstantValueWide(reg_location_[ssa_reg]));
1037     } else {
1038       return StringPrintf("v%d_%d#0x%x", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg),
1039                           ConstantValue(reg_location_[ssa_reg]));
1040     }
1041   } else {
1042     return StringPrintf("v%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg));
1043   }
1044 }
1045 
GetBlockName(BasicBlock * bb,char * name)1046 void MIRGraph::GetBlockName(BasicBlock* bb, char* name) {
1047   switch (bb->block_type) {
1048     case kEntryBlock:
1049       snprintf(name, BLOCK_NAME_LEN, "entry_%d", bb->id);
1050       break;
1051     case kExitBlock:
1052       snprintf(name, BLOCK_NAME_LEN, "exit_%d", bb->id);
1053       break;
1054     case kDalvikByteCode:
1055       snprintf(name, BLOCK_NAME_LEN, "block%04x_%d", bb->start_offset, bb->id);
1056       break;
1057     case kExceptionHandling:
1058       snprintf(name, BLOCK_NAME_LEN, "exception%04x_%d", bb->start_offset,
1059                bb->id);
1060       break;
1061     default:
1062       snprintf(name, BLOCK_NAME_LEN, "_%d", bb->id);
1063       break;
1064   }
1065 }
1066 
GetShortyFromTargetIdx(int target_idx)1067 const char* MIRGraph::GetShortyFromTargetIdx(int target_idx) {
1068   // FIXME: use current code unit for inline support.
1069   const DexFile::MethodId& method_id = cu_->dex_file->GetMethodId(target_idx);
1070   return cu_->dex_file->GetShorty(method_id.proto_idx_);
1071 }
1072 
1073 /* Debug Utility - dump a compilation unit */
DumpMIRGraph()1074 void MIRGraph::DumpMIRGraph() {
1075   BasicBlock* bb;
1076   const char* block_type_names[] = {
1077     "Entry Block",
1078     "Code Block",
1079     "Exit Block",
1080     "Exception Handling",
1081     "Catch Block"
1082   };
1083 
1084   LOG(INFO) << "Compiling " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
1085   LOG(INFO) << cu_->insns << " insns";
1086   LOG(INFO) << GetNumBlocks() << " blocks in total";
1087   GrowableArray<BasicBlock*>::Iterator iterator(&block_list_);
1088 
1089   while (true) {
1090     bb = iterator.Next();
1091     if (bb == NULL) break;
1092     LOG(INFO) << StringPrintf("Block %d (%s) (insn %04x - %04x%s)",
1093         bb->id,
1094         block_type_names[bb->block_type],
1095         bb->start_offset,
1096         bb->last_mir_insn ? bb->last_mir_insn->offset : bb->start_offset,
1097         bb->last_mir_insn ? "" : " empty");
1098     if (bb->taken) {
1099       LOG(INFO) << "  Taken branch: block " << bb->taken->id
1100                 << "(0x" << std::hex << bb->taken->start_offset << ")";
1101     }
1102     if (bb->fall_through) {
1103       LOG(INFO) << "  Fallthrough : block " << bb->fall_through->id
1104                 << " (0x" << std::hex << bb->fall_through->start_offset << ")";
1105     }
1106   }
1107 }
1108 
1109 /*
1110  * Build an array of location records for the incoming arguments.
1111  * Note: one location record per word of arguments, with dummy
1112  * high-word loc for wide arguments.  Also pull up any following
1113  * MOVE_RESULT and incorporate it into the invoke.
1114  */
NewMemCallInfo(BasicBlock * bb,MIR * mir,InvokeType type,bool is_range)1115 CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type,
1116                                   bool is_range) {
1117   CallInfo* info = static_cast<CallInfo*>(arena_->Alloc(sizeof(CallInfo),
1118                                                         ArenaAllocator::kAllocMisc));
1119   MIR* move_result_mir = FindMoveResult(bb, mir);
1120   if (move_result_mir == NULL) {
1121     info->result.location = kLocInvalid;
1122   } else {
1123     info->result = GetRawDest(move_result_mir);
1124     move_result_mir->meta.original_opcode = move_result_mir->dalvikInsn.opcode;
1125     move_result_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
1126   }
1127   info->num_arg_words = mir->ssa_rep->num_uses;
1128   info->args = (info->num_arg_words == 0) ? NULL : static_cast<RegLocation*>
1129       (arena_->Alloc(sizeof(RegLocation) * info->num_arg_words, ArenaAllocator::kAllocMisc));
1130   for (int i = 0; i < info->num_arg_words; i++) {
1131     info->args[i] = GetRawSrc(mir, i);
1132   }
1133   info->opt_flags = mir->optimization_flags;
1134   info->type = type;
1135   info->is_range = is_range;
1136   info->index = mir->dalvikInsn.vB;
1137   info->offset = mir->offset;
1138   return info;
1139 }
1140 
1141 // Allocate a new basic block.
NewMemBB(BBType block_type,int block_id)1142 BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id) {
1143   BasicBlock* bb = static_cast<BasicBlock*>(arena_->Alloc(sizeof(BasicBlock),
1144                                                           ArenaAllocator::kAllocBB));
1145   bb->block_type = block_type;
1146   bb->id = block_id;
1147   // TUNING: better estimate of the exit block predecessors?
1148   bb->predecessors = new (arena_) GrowableArray<BasicBlock*>(arena_,
1149                                                              (block_type == kExitBlock) ? 2048 : 2,
1150                                                              kGrowableArrayPredecessors);
1151   bb->successor_block_list.block_list_type = kNotUsed;
1152   block_id_map_.Put(block_id, block_id);
1153   return bb;
1154 }
1155 
1156 }  // namespace art
1157