• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_DEX_MIR_GRAPH_H_
18 #define ART_COMPILER_DEX_MIR_GRAPH_H_
19 
20 #include <stdint.h>
21 
22 #include "base/arena_containers.h"
23 #include "base/bit_utils.h"
24 #include "base/scoped_arena_containers.h"
25 #include "dex_file.h"
26 #include "dex_instruction.h"
27 #include "dex_types.h"
28 #include "invoke_type.h"
29 #include "mir_field_info.h"
30 #include "mir_method_info.h"
31 #include "reg_location.h"
32 #include "reg_storage.h"
33 #include "utils/arena_bit_vector.h"
34 
35 namespace art {
36 
37 struct CompilationUnit;
38 class DexCompilationUnit;
39 class DexFileMethodInliner;
40 class GlobalValueNumbering;
41 class GvnDeadCodeElimination;
42 class PassManager;
43 class TypeInference;
44 
45 // Forward declaration.
46 class MIRGraph;
47 
48 enum DataFlowAttributePos {
49   kUA = 0,
50   kUB,
51   kUC,
52   kAWide,
53   kBWide,
54   kCWide,
55   kDA,
56   kIsMove,
57   kSetsConst,
58   kFormat35c,
59   kFormat3rc,
60   kFormatExtended,       // Extended format for extended MIRs.
61   kNullCheckA,           // Null check of A.
62   kNullCheckB,           // Null check of B.
63   kNullCheckOut0,        // Null check out outgoing arg0.
64   kDstNonNull,           // May assume dst is non-null.
65   kRetNonNull,           // May assume retval is non-null.
66   kNullTransferSrc0,     // Object copy src[0] -> dst.
67   kNullTransferSrcN,     // Phi null check state transfer.
68   kRangeCheckC,          // Range check of C.
69   kCheckCastA,           // Check cast of A.
70   kFPA,
71   kFPB,
72   kFPC,
73   kCoreA,
74   kCoreB,
75   kCoreC,
76   kRefA,
77   kRefB,
78   kRefC,
79   kSameTypeAB,           // A and B have the same type but it can be core/ref/fp (IF_cc).
80   kUsesMethodStar,       // Implicit use of Method*.
81   kUsesIField,           // Accesses an instance field (IGET/IPUT).
82   kUsesSField,           // Accesses a static field (SGET/SPUT).
83   kCanInitializeClass,   // Can trigger class initialization (SGET/SPUT/INVOKE_STATIC).
84   kDoLVN,                // Worth computing local value numbers.
85 };
86 
87 #define DF_NOP                  UINT64_C(0)
88 #define DF_UA                   (UINT64_C(1) << kUA)
89 #define DF_UB                   (UINT64_C(1) << kUB)
90 #define DF_UC                   (UINT64_C(1) << kUC)
91 #define DF_A_WIDE               (UINT64_C(1) << kAWide)
92 #define DF_B_WIDE               (UINT64_C(1) << kBWide)
93 #define DF_C_WIDE               (UINT64_C(1) << kCWide)
94 #define DF_DA                   (UINT64_C(1) << kDA)
95 #define DF_IS_MOVE              (UINT64_C(1) << kIsMove)
96 #define DF_SETS_CONST           (UINT64_C(1) << kSetsConst)
97 #define DF_FORMAT_35C           (UINT64_C(1) << kFormat35c)
98 #define DF_FORMAT_3RC           (UINT64_C(1) << kFormat3rc)
99 #define DF_FORMAT_EXTENDED      (UINT64_C(1) << kFormatExtended)
100 #define DF_NULL_CHK_A           (UINT64_C(1) << kNullCheckA)
101 #define DF_NULL_CHK_B           (UINT64_C(1) << kNullCheckB)
102 #define DF_NULL_CHK_OUT0        (UINT64_C(1) << kNullCheckOut0)
103 #define DF_NON_NULL_DST         (UINT64_C(1) << kDstNonNull)
104 #define DF_NON_NULL_RET         (UINT64_C(1) << kRetNonNull)
105 #define DF_NULL_TRANSFER_0      (UINT64_C(1) << kNullTransferSrc0)
106 #define DF_NULL_TRANSFER_N      (UINT64_C(1) << kNullTransferSrcN)
107 #define DF_RANGE_CHK_C          (UINT64_C(1) << kRangeCheckC)
108 #define DF_CHK_CAST             (UINT64_C(1) << kCheckCastA)
109 #define DF_FP_A                 (UINT64_C(1) << kFPA)
110 #define DF_FP_B                 (UINT64_C(1) << kFPB)
111 #define DF_FP_C                 (UINT64_C(1) << kFPC)
112 #define DF_CORE_A               (UINT64_C(1) << kCoreA)
113 #define DF_CORE_B               (UINT64_C(1) << kCoreB)
114 #define DF_CORE_C               (UINT64_C(1) << kCoreC)
115 #define DF_REF_A                (UINT64_C(1) << kRefA)
116 #define DF_REF_B                (UINT64_C(1) << kRefB)
117 #define DF_REF_C                (UINT64_C(1) << kRefC)
118 #define DF_SAME_TYPE_AB         (UINT64_C(1) << kSameTypeAB)
119 #define DF_UMS                  (UINT64_C(1) << kUsesMethodStar)
120 #define DF_IFIELD               (UINT64_C(1) << kUsesIField)
121 #define DF_SFIELD               (UINT64_C(1) << kUsesSField)
122 #define DF_CLINIT               (UINT64_C(1) << kCanInitializeClass)
123 #define DF_LVN                  (UINT64_C(1) << kDoLVN)
124 
125 #define DF_HAS_USES             (DF_UA | DF_UB | DF_UC)
126 
127 #define DF_HAS_DEFS             (DF_DA)
128 
129 #define DF_HAS_NULL_CHKS        (DF_NULL_CHK_A | \
130                                  DF_NULL_CHK_B | \
131                                  DF_NULL_CHK_OUT0)
132 
133 #define DF_HAS_RANGE_CHKS       (DF_RANGE_CHK_C)
134 
135 #define DF_HAS_NR_CHKS          (DF_HAS_NULL_CHKS | \
136                                  DF_HAS_RANGE_CHKS)
137 
138 #define DF_A_IS_REG             (DF_UA | DF_DA)
139 #define DF_B_IS_REG             (DF_UB)
140 #define DF_C_IS_REG             (DF_UC)
141 #define DF_USES_FP              (DF_FP_A | DF_FP_B | DF_FP_C)
142 #define DF_NULL_TRANSFER        (DF_NULL_TRANSFER_0 | DF_NULL_TRANSFER_N)
143 #define DF_IS_INVOKE            (DF_FORMAT_35C | DF_FORMAT_3RC)
144 
145 enum OatMethodAttributes {
146   kIsLeaf,            // Method is leaf.
147 };
148 
149 #define METHOD_IS_LEAF          (1 << kIsLeaf)
150 
151 // Minimum field size to contain Dalvik v_reg number.
152 #define VREG_NUM_WIDTH 16
153 
154 #define INVALID_VREG (0xFFFFU)
155 #define INVALID_OFFSET (0xDEADF00FU)
156 
157 #define MIR_IGNORE_NULL_CHECK           (1 << kMIRIgnoreNullCheck)
158 #define MIR_IGNORE_RANGE_CHECK          (1 << kMIRIgnoreRangeCheck)
159 #define MIR_IGNORE_CHECK_CAST           (1 << kMIRIgnoreCheckCast)
160 #define MIR_STORE_NON_NULL_VALUE        (1 << kMIRStoreNonNullValue)
161 #define MIR_CLASS_IS_INITIALIZED        (1 << kMIRClassIsInitialized)
162 #define MIR_CLASS_IS_IN_DEX_CACHE       (1 << kMIRClassIsInDexCache)
163 #define MIR_IGNORE_DIV_ZERO_CHECK       (1 << kMirIgnoreDivZeroCheck)
164 #define MIR_INLINED                     (1 << kMIRInlined)
165 #define MIR_INLINED_PRED                (1 << kMIRInlinedPred)
166 #define MIR_CALLEE                      (1 << kMIRCallee)
167 #define MIR_IGNORE_SUSPEND_CHECK        (1 << kMIRIgnoreSuspendCheck)
168 #define MIR_DUP                         (1 << kMIRDup)
169 #define MIR_MARK                        (1 << kMIRMark)
170 #define MIR_STORE_NON_TEMPORAL          (1 << kMIRStoreNonTemporal)
171 
172 #define BLOCK_NAME_LEN 80
173 
174 typedef uint16_t BasicBlockId;
175 static const BasicBlockId NullBasicBlockId = 0;
176 
177 // Leaf optimization is basically the removal of suspend checks from leaf methods.
178 // This is incompatible with SuspendCheckElimination (SCE) which eliminates suspend
179 // checks from loops that call any non-intrinsic method, since a loop that calls
180 // only a leaf method would end up without any suspend checks at all. So turning
181 // this on automatically disables the SCE in MIRGraph::EliminateSuspendChecksGate().
182 //
183 // Since the Optimizing compiler is actually applying the same optimization, Quick
184 // must not run SCE anyway, so we enable this optimization as a way to disable SCE
185 // while keeping a consistent behavior across the backends, b/22657404.
186 static constexpr bool kLeafOptimization = true;
187 
188 /*
189  * In general, vreg/sreg describe Dalvik registers that originated with dx.  However,
190  * it is useful to have compiler-generated temporary registers and have them treated
191  * in the same manner as dx-generated virtual registers.  This struct records the SSA
192  * name of compiler-introduced temporaries.
193  */
194 struct CompilerTemp {
195   int32_t v_reg;      // Virtual register number for temporary.
196   int32_t s_reg_low;  // SSA name for low Dalvik word.
197 };
198 
199 enum CompilerTempType {
200   kCompilerTempVR,                // A virtual register temporary.
201   kCompilerTempSpecialMethodPtr,  // Temporary that keeps track of current method pointer.
202   kCompilerTempBackend,           // Temporary that is used by backend.
203 };
204 
205 // When debug option enabled, records effectiveness of null and range check elimination.
206 struct Checkstats {
207   int32_t null_checks;
208   int32_t null_checks_eliminated;
209   int32_t range_checks;
210   int32_t range_checks_eliminated;
211 };
212 
213 // Dataflow attributes of a basic block.
214 struct BasicBlockDataFlow {
215   ArenaBitVector* use_v;
216   ArenaBitVector* def_v;
217   ArenaBitVector* live_in_v;
218   int32_t* vreg_to_ssa_map_exit;
219 };
220 
221 /*
222  * Normalized use/def for a MIR operation using SSA names rather than vregs.  Note that
223  * uses/defs retain the Dalvik convention that long operations operate on a pair of 32-bit
224  * vregs.  For example, "ADD_LONG v0, v2, v3" would have 2 defs (v0/v1) and 4 uses (v2/v3, v4/v5).
225  * Following SSA renaming, this is the primary struct used by code generators to locate
226  * operand and result registers.  This is a somewhat confusing and unhelpful convention that
227  * we may want to revisit in the future.
228  *
229  * TODO:
230  *  1. Add accessors for uses/defs and make data private
231  *  2. Change fp_use/fp_def to a bit array (could help memory usage)
232  *  3. Combine array storage into internal array and handled via accessors from 1.
233  */
234 struct SSARepresentation {
235   int32_t* uses;
236   int32_t* defs;
237   uint16_t num_uses_allocated;
238   uint16_t num_defs_allocated;
239   uint16_t num_uses;
240   uint16_t num_defs;
241 
242   static uint32_t GetStartUseIndex(Instruction::Code opcode);
243 };
244 
245 /*
246  * The Midlevel Intermediate Representation node, which may be largely considered a
247  * wrapper around a Dalvik byte code.
248  */
249 class MIR : public ArenaObject<kArenaAllocMIR> {
250  public:
251   /*
252    * TODO: remove embedded DecodedInstruction to save space, keeping only opcode.  Recover
253    * additional fields on as-needed basis.  Question: how to support MIR Pseudo-ops; probably
254    * need to carry aux data pointer.
255    */
256   struct DecodedInstruction {
257     uint32_t vA;
258     uint32_t vB;
259     uint64_t vB_wide;        /* for k51l */
260     uint32_t vC;
261     uint32_t arg[5];         /* vC/D/E/F/G in invoke or filled-new-array */
262     Instruction::Code opcode;
263 
DecodedInstructionDecodedInstruction264     explicit DecodedInstruction():vA(0), vB(0), vB_wide(0), vC(0), opcode(Instruction::NOP) {
265     }
266 
267     /*
268      * Given a decoded instruction representing a const bytecode, it updates
269      * the out arguments with proper values as dictated by the constant bytecode.
270      */
271     bool GetConstant(int64_t* ptr_value, bool* wide) const;
272 
IsPseudoMirOpDecodedInstruction273     static bool IsPseudoMirOp(Instruction::Code opcode) {
274       return static_cast<int>(opcode) >= static_cast<int>(kMirOpFirst);
275     }
276 
IsPseudoMirOpDecodedInstruction277     static bool IsPseudoMirOp(int opcode) {
278       return opcode >= static_cast<int>(kMirOpFirst);
279     }
280 
IsInvokeDecodedInstruction281     bool IsInvoke() const {
282       return ((FlagsOf() & Instruction::kInvoke) == Instruction::kInvoke);
283     }
284 
IsStoreDecodedInstruction285     bool IsStore() const {
286       return ((FlagsOf() & Instruction::kStore) == Instruction::kStore);
287     }
288 
IsLoadDecodedInstruction289     bool IsLoad() const {
290       return ((FlagsOf() & Instruction::kLoad) == Instruction::kLoad);
291     }
292 
IsConditionalBranchDecodedInstruction293     bool IsConditionalBranch() const {
294       return (FlagsOf() == (Instruction::kContinue | Instruction::kBranch));
295     }
296 
297     /**
298      * @brief Is the register C component of the decoded instruction a constant?
299      */
IsCFieldOrConstantDecodedInstruction300     bool IsCFieldOrConstant() const {
301       return ((FlagsOf() & Instruction::kRegCFieldOrConstant) == Instruction::kRegCFieldOrConstant);
302     }
303 
304     /**
305      * @brief Is the register C component of the decoded instruction a constant?
306      */
IsBFieldOrConstantDecodedInstruction307     bool IsBFieldOrConstant() const {
308       return ((FlagsOf() & Instruction::kRegBFieldOrConstant) == Instruction::kRegBFieldOrConstant);
309     }
310 
IsCastDecodedInstruction311     bool IsCast() const {
312       return ((FlagsOf() & Instruction::kCast) == Instruction::kCast);
313     }
314 
315     /**
316      * @brief Does the instruction clobber memory?
317      * @details Clobber means that the instruction changes the memory not in a punctual way.
318      *          Therefore any supposition on memory aliasing or memory contents should be disregarded
319      *            when crossing such an instruction.
320      */
ClobbersDecodedInstruction321     bool Clobbers() const {
322       return ((FlagsOf() & Instruction::kClobber) == Instruction::kClobber);
323     }
324 
IsLinearDecodedInstruction325     bool IsLinear() const {
326       return (FlagsOf() & (Instruction::kAdd | Instruction::kSubtract)) != 0;
327     }
328 
329     int FlagsOf() const;
330   } dalvikInsn;
331 
332   NarrowDexOffset offset;         // Offset of the instruction in code units.
333   uint16_t optimization_flags;
334   int16_t m_unit_index;           // From which method was this MIR included
335   BasicBlockId bb;
336   MIR* next;
337   SSARepresentation* ssa_rep;
338   union {
339     // Incoming edges for phi node.
340     BasicBlockId* phi_incoming;
341     // Establish link from check instruction (kMirOpCheck) to the actual throwing instruction.
342     MIR* throw_insn;
343     // Branch condition for fused cmp or select.
344     ConditionCode ccode;
345     // IGET/IPUT lowering info index, points to MIRGraph::ifield_lowering_infos_. Due to limit on
346     // the number of code points (64K) and size of IGET/IPUT insn (2), this will never exceed 32K.
347     uint32_t ifield_lowering_info;
348     // SGET/SPUT lowering info index, points to MIRGraph::sfield_lowering_infos_. Due to limit on
349     // the number of code points (64K) and size of SGET/SPUT insn (2), this will never exceed 32K.
350     uint32_t sfield_lowering_info;
351     // INVOKE data index, points to MIRGraph::method_lowering_infos_. Also used for inlined
352     // CONST and MOVE insn (with MIR_CALLEE) to remember the invoke for type inference.
353     uint32_t method_lowering_info;
354   } meta;
355 
MIR()356   explicit MIR() : offset(0), optimization_flags(0), m_unit_index(0), bb(NullBasicBlockId),
357                  next(nullptr), ssa_rep(nullptr) {
358     memset(&meta, 0, sizeof(meta));
359   }
360 
GetStartUseIndex()361   uint32_t GetStartUseIndex() const {
362     return SSARepresentation::GetStartUseIndex(dalvikInsn.opcode);
363   }
364 
365   MIR* Copy(CompilationUnit *c_unit);
366   MIR* Copy(MIRGraph* mir_Graph);
367 };
368 
369 struct SuccessorBlockInfo;
370 
371 class BasicBlock : public DeletableArenaObject<kArenaAllocBB> {
372  public:
BasicBlock(BasicBlockId block_id,BBType type,ArenaAllocator * allocator)373   BasicBlock(BasicBlockId block_id, BBType type, ArenaAllocator* allocator)
374       : id(block_id),
375         dfs_id(), start_offset(), fall_through(), taken(), i_dom(), nesting_depth(),
376         block_type(type),
377         successor_block_list_type(kNotUsed),
378         visited(), hidden(), catch_entry(), explicit_throw(), conditional_branch(),
379         terminated_by_return(), dominates_return(), use_lvn(), first_mir_insn(),
380         last_mir_insn(), data_flow_info(), dominators(), i_dominated(), dom_frontier(),
381         predecessors(allocator->Adapter(kArenaAllocBBPredecessors)),
382         successor_blocks(allocator->Adapter(kArenaAllocSuccessor)) {
383   }
384   BasicBlockId id;
385   BasicBlockId dfs_id;
386   NarrowDexOffset start_offset;     // Offset in code units.
387   BasicBlockId fall_through;
388   BasicBlockId taken;
389   BasicBlockId i_dom;               // Immediate dominator.
390   uint16_t nesting_depth;
391   BBType block_type:4;
392   BlockListType successor_block_list_type:4;
393   bool visited:1;
394   bool hidden:1;
395   bool catch_entry:1;
396   bool explicit_throw:1;
397   bool conditional_branch:1;
398   bool terminated_by_return:1;  // Block ends with a Dalvik return opcode.
399   bool dominates_return:1;      // Is a member of return extended basic block.
400   bool use_lvn:1;               // Run local value numbering on this block.
401   MIR* first_mir_insn;
402   MIR* last_mir_insn;
403   BasicBlockDataFlow* data_flow_info;
404   ArenaBitVector* dominators;
405   ArenaBitVector* i_dominated;      // Set nodes being immediately dominated.
406   ArenaBitVector* dom_frontier;     // Dominance frontier.
407   ArenaVector<BasicBlockId> predecessors;
408   ArenaVector<SuccessorBlockInfo*> successor_blocks;
409 
410   void AppendMIR(MIR* mir);
411   void AppendMIRList(MIR* first_list_mir, MIR* last_list_mir);
412   void AppendMIRList(const std::vector<MIR*>& insns);
413   void PrependMIR(MIR* mir);
414   void PrependMIRList(MIR* first_list_mir, MIR* last_list_mir);
415   void PrependMIRList(const std::vector<MIR*>& to_add);
416   void InsertMIRAfter(MIR* current_mir, MIR* new_mir);
417   void InsertMIRListAfter(MIR* insert_after, MIR* first_list_mir, MIR* last_list_mir);
418   MIR* FindPreviousMIR(MIR* mir);
419   void InsertMIRBefore(MIR* insert_before, MIR* list);
420   void InsertMIRListBefore(MIR* insert_before, MIR* first_list_mir, MIR* last_list_mir);
421   bool RemoveMIR(MIR* mir);
422   bool RemoveMIRList(MIR* first_list_mir, MIR* last_list_mir);
423 
424   BasicBlock* Copy(CompilationUnit* c_unit);
425   BasicBlock* Copy(MIRGraph* mir_graph);
426 
427   /**
428    * @brief Reset the optimization_flags field of each MIR.
429    */
430   void ResetOptimizationFlags(uint16_t reset_flags);
431 
432   /**
433    * @brief Kill the BasicBlock.
434    * @details Unlink predecessors and successors, remove all MIRs, set the block type to kDead
435    *          and set hidden to true.
436    */
437   void Kill(MIRGraph* mir_graph);
438 
439   /**
440    * @brief Is ssa_reg the last SSA definition of that VR in the block?
441    */
442   bool IsSSALiveOut(const CompilationUnit* c_unit, int ssa_reg);
443 
444   /**
445    * @brief Replace the edge going to old_bb to now go towards new_bb.
446    */
447   bool ReplaceChild(BasicBlockId old_bb, BasicBlockId new_bb);
448 
449   /**
450    * @brief Erase the predecessor old_pred.
451    */
452   void ErasePredecessor(BasicBlockId old_pred);
453 
454   /**
455    * @brief Update the predecessor array from old_pred to new_pred.
456    */
457   void UpdatePredecessor(BasicBlockId old_pred, BasicBlockId new_pred);
458 
459   /**
460    * @brief Return first non-Phi insn.
461    */
462   MIR* GetFirstNonPhiInsn();
463 
464   /**
465    * @brief Checks whether the block ends with if-nez or if-eqz that branches to
466    *        the given successor only if the register in not zero.
467    */
BranchesToSuccessorOnlyIfNotZero(BasicBlockId succ_id)468   bool BranchesToSuccessorOnlyIfNotZero(BasicBlockId succ_id) const {
469     if (last_mir_insn == nullptr) {
470       return false;
471     }
472     Instruction::Code last_opcode = last_mir_insn->dalvikInsn.opcode;
473     return ((last_opcode == Instruction::IF_EQZ && fall_through == succ_id) ||
474         (last_opcode == Instruction::IF_NEZ && taken == succ_id)) &&
475         // Make sure the other successor isn't the same (empty if), b/21614284.
476         (fall_through != taken);
477   }
478 
479   /**
480    * @brief Used to obtain the next MIR that follows unconditionally.
481    * @details The implementation does not guarantee that a MIR does not
482    * follow even if this method returns nullptr.
483    * @param mir_graph the MIRGraph.
484    * @param current The MIR for which to find an unconditional follower.
485    * @return Returns the following MIR if one can be found.
486    */
487   MIR* GetNextUnconditionalMir(MIRGraph* mir_graph, MIR* current);
488   bool IsExceptionBlock() const;
489 
490  private:
491   DISALLOW_COPY_AND_ASSIGN(BasicBlock);
492 };
493 
494 /*
495  * The "blocks" field in "successor_block_list" points to an array of elements with the type
496  * "SuccessorBlockInfo".  For catch blocks, key is type index for the exception.  For switch
497  * blocks, key is the case value.
498  */
499 struct SuccessorBlockInfo {
500   BasicBlockId block;
501   int key;
502 };
503 
504 /**
505  * @class ChildBlockIterator
506  * @brief Enable an easy iteration of the children.
507  */
508 class ChildBlockIterator {
509  public:
510   /**
511    * @brief Constructs a child iterator.
512    * @param bb The basic whose children we need to iterate through.
513    * @param mir_graph The MIRGraph used to get the basic block during iteration.
514    */
515   ChildBlockIterator(BasicBlock* bb, MIRGraph* mir_graph);
516   BasicBlock* Next();
517 
518  private:
519   BasicBlock* basic_block_;
520   MIRGraph* mir_graph_;
521   bool visited_fallthrough_;
522   bool visited_taken_;
523   bool have_successors_;
524   ArenaVector<SuccessorBlockInfo*>::const_iterator successor_iter_;
525 };
526 
527 /*
528  * Collection of information describing an invoke, and the destination of
529  * the subsequent MOVE_RESULT (if applicable).  Collected as a unit to enable
530  * more efficient invoke code generation.
531  */
532 struct CallInfo {
533   size_t num_arg_words;   // Note: word count, not arg count.
534   RegLocation* args;      // One for each word of arguments.
535   RegLocation result;     // Eventual target of MOVE_RESULT.
536   int opt_flags;
537   InvokeType type;
538   uint32_t dex_idx;
539   MethodReference method_ref;
540   uint32_t index;         // Method idx for invokes, type idx for FilledNewArray.
541   uintptr_t direct_code;
542   uintptr_t direct_method;
543   RegLocation target;     // Target of following move_result.
544   bool skip_this;
545   bool is_range;
546   DexOffset offset;       // Offset in code units.
547   MIR* mir;
548   int32_t string_init_offset;
549 };
550 
551 
552 const RegLocation bad_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0, RegStorage(), INVALID_SREG,
553                              INVALID_SREG};
554 
555 class MIRGraph {
556  public:
557   MIRGraph(CompilationUnit* cu, ArenaAllocator* arena);
558   virtual ~MIRGraph();
559 
560   /*
561    * Examine the graph to determine whether it's worthwile to spend the time compiling
562    * this method.
563    */
564   bool SkipCompilation(std::string* skip_message);
565 
566   /*
567    * Should we skip the compilation of this method based on its name?
568    */
569   bool SkipCompilationByName(const std::string& methodname);
570 
571   /*
572    * Parse dex method and add MIR at current insert point.  Returns id (which is
573    * actually the index of the method in the m_units_ array).
574    */
575   void InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
576                     InvokeType invoke_type, uint16_t class_def_idx,
577                     uint32_t method_idx, jobject class_loader, const DexFile& dex_file);
578 
579   /* Find existing block */
FindBlock(DexOffset code_offset,ScopedArenaVector<uint16_t> * dex_pc_to_block_map)580   BasicBlock* FindBlock(DexOffset code_offset,
581                         ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
582     return FindBlock(code_offset, false, nullptr, dex_pc_to_block_map);
583   }
584 
GetCurrentInsns()585   const uint16_t* GetCurrentInsns() const {
586     return current_code_item_->insns_;
587   }
588 
589   /**
590    * @brief Used to obtain the raw dex bytecode instruction pointer.
591    * @param m_unit_index The method index in MIRGraph (caused by having multiple methods).
592    * This is guaranteed to contain index 0 which is the base method being compiled.
593    * @return Returns the raw instruction pointer.
594    */
595   const uint16_t* GetInsns(int m_unit_index) const;
596 
597   /**
598    * @brief Used to obtain the raw data table.
599    * @param mir sparse switch, packed switch, of fill-array-data
600    * @param table_offset The table offset from start of method.
601    * @return Returns the raw table pointer.
602    */
GetTable(MIR * mir,uint32_t table_offset)603   const uint16_t* GetTable(MIR* mir, uint32_t table_offset) const {
604     return GetInsns(mir->m_unit_index) + mir->offset + static_cast<int32_t>(table_offset);
605   }
606 
GetNumBlocks()607   unsigned int GetNumBlocks() const {
608     return block_list_.size();
609   }
610 
611   /**
612    * @brief Provides the total size in code units of all instructions in MIRGraph.
613    * @details Includes the sizes of all methods in compilation unit.
614    * @return Returns the cumulative sum of all insn sizes (in code units).
615    */
616   size_t GetNumDalvikInsns() const;
617 
GetTryBlockAddr()618   ArenaBitVector* GetTryBlockAddr() const {
619     return try_block_addr_;
620   }
621 
GetEntryBlock()622   BasicBlock* GetEntryBlock() const {
623     return entry_block_;
624   }
625 
GetExitBlock()626   BasicBlock* GetExitBlock() const {
627     return exit_block_;
628   }
629 
GetBasicBlock(unsigned int block_id)630   BasicBlock* GetBasicBlock(unsigned int block_id) const {
631     DCHECK_LT(block_id, block_list_.size());  // NOTE: NullBasicBlockId is 0.
632     return (block_id == NullBasicBlockId) ? nullptr : block_list_[block_id];
633   }
634 
GetBasicBlockListCount()635   size_t GetBasicBlockListCount() const {
636     return block_list_.size();
637   }
638 
GetBlockList()639   const ArenaVector<BasicBlock*>& GetBlockList() {
640     return block_list_;
641   }
642 
GetDfsOrder()643   const ArenaVector<BasicBlockId>& GetDfsOrder() {
644     return dfs_order_;
645   }
646 
GetDfsPostOrder()647   const ArenaVector<BasicBlockId>& GetDfsPostOrder() {
648     return dfs_post_order_;
649   }
650 
GetDomPostOrder()651   const ArenaVector<BasicBlockId>& GetDomPostOrder() {
652     return dom_post_order_traversal_;
653   }
654 
GetDefCount()655   int GetDefCount() const {
656     return def_count_;
657   }
658 
GetArena()659   ArenaAllocator* GetArena() const {
660     return arena_;
661   }
662 
EnableOpcodeCounting()663   void EnableOpcodeCounting() {
664     opcode_count_ = arena_->AllocArray<int>(kNumPackedOpcodes, kArenaAllocMisc);
665   }
666 
667   void ShowOpcodeStats();
668 
GetCurrentDexCompilationUnit()669   DexCompilationUnit* GetCurrentDexCompilationUnit() const {
670     return m_units_[current_method_];
671   }
672 
673   /**
674    * @brief Dump a CFG into a dot file format.
675    * @param dir_prefix the directory the file will be created in.
676    * @param all_blocks does the dumper use all the basic blocks or use the reachable blocks.
677    * @param suffix does the filename require a suffix or not (default = nullptr).
678    */
679   void DumpCFG(const char* dir_prefix, bool all_blocks, const char* suffix = nullptr);
680 
HasCheckCast()681   bool HasCheckCast() const {
682     return (merged_df_flags_ & DF_CHK_CAST) != 0u;
683   }
684 
HasFieldAccess()685   bool HasFieldAccess() const {
686     return (merged_df_flags_ & (DF_IFIELD | DF_SFIELD)) != 0u;
687   }
688 
HasStaticFieldAccess()689   bool HasStaticFieldAccess() const {
690     return (merged_df_flags_ & DF_SFIELD) != 0u;
691   }
692 
HasInvokes()693   bool HasInvokes() const {
694     // NOTE: These formats include the rare filled-new-array/range.
695     return (merged_df_flags_ & (DF_FORMAT_35C | DF_FORMAT_3RC)) != 0u;
696   }
697 
698   void DoCacheFieldLoweringInfo();
699 
GetIFieldLoweringInfo(MIR * mir)700   const MirIFieldLoweringInfo& GetIFieldLoweringInfo(MIR* mir) const {
701     return GetIFieldLoweringInfo(mir->meta.ifield_lowering_info);
702   }
703 
GetIFieldLoweringInfo(uint32_t lowering_info)704   const MirIFieldLoweringInfo& GetIFieldLoweringInfo(uint32_t lowering_info) const {
705     DCHECK_LT(lowering_info, ifield_lowering_infos_.size());
706     return ifield_lowering_infos_[lowering_info];
707   }
708 
GetIFieldLoweringInfoCount()709   size_t GetIFieldLoweringInfoCount() const {
710     return ifield_lowering_infos_.size();
711   }
712 
GetSFieldLoweringInfo(MIR * mir)713   const MirSFieldLoweringInfo& GetSFieldLoweringInfo(MIR* mir) const {
714     return GetSFieldLoweringInfo(mir->meta.sfield_lowering_info);
715   }
716 
GetSFieldLoweringInfo(uint32_t lowering_info)717   const MirSFieldLoweringInfo& GetSFieldLoweringInfo(uint32_t lowering_info) const {
718     DCHECK_LT(lowering_info, sfield_lowering_infos_.size());
719     return sfield_lowering_infos_[lowering_info];
720   }
721 
GetSFieldLoweringInfoCount()722   size_t GetSFieldLoweringInfoCount() const {
723     return sfield_lowering_infos_.size();
724   }
725 
726   void DoCacheMethodLoweringInfo();
727 
GetMethodLoweringInfo(MIR * mir)728   const MirMethodLoweringInfo& GetMethodLoweringInfo(MIR* mir) const {
729     return GetMethodLoweringInfo(mir->meta.method_lowering_info);
730   }
731 
GetMethodLoweringInfo(uint32_t lowering_info)732   const MirMethodLoweringInfo& GetMethodLoweringInfo(uint32_t lowering_info) const {
733     DCHECK_LT(lowering_info, method_lowering_infos_.size());
734     return method_lowering_infos_[lowering_info];
735   }
736 
GetMethodLoweringInfoCount()737   size_t GetMethodLoweringInfoCount() const {
738     return method_lowering_infos_.size();
739   }
740 
741   void ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke, MIR* iget_or_iput);
742 
743   void InitRegLocations();
744 
745   void RemapRegLocations();
746 
747   void DumpRegLocTable(RegLocation* table, int count);
748 
749   void BasicBlockOptimizationStart();
750   void BasicBlockOptimization();
751   void BasicBlockOptimizationEnd();
752 
753   void StringChange();
754 
GetTopologicalSortOrder()755   const ArenaVector<BasicBlockId>& GetTopologicalSortOrder() {
756     DCHECK(!topological_order_.empty());
757     return topological_order_;
758   }
759 
GetTopologicalSortOrderLoopEnds()760   const ArenaVector<BasicBlockId>& GetTopologicalSortOrderLoopEnds() {
761     DCHECK(!topological_order_loop_ends_.empty());
762     return topological_order_loop_ends_;
763   }
764 
GetTopologicalSortOrderIndexes()765   const ArenaVector<BasicBlockId>& GetTopologicalSortOrderIndexes() {
766     DCHECK(!topological_order_indexes_.empty());
767     return topological_order_indexes_;
768   }
769 
GetTopologicalSortOrderLoopHeadStack()770   ArenaVector<std::pair<uint16_t, bool>>* GetTopologicalSortOrderLoopHeadStack() {
771     DCHECK(!topological_order_.empty());  // Checking the main array, not the stack.
772     return &topological_order_loop_head_stack_;
773   }
774 
GetMaxNestedLoops()775   size_t GetMaxNestedLoops() const {
776     return max_nested_loops_;
777   }
778 
IsLoopHead(BasicBlockId bb_id)779   bool IsLoopHead(BasicBlockId bb_id) {
780     return topological_order_loop_ends_[topological_order_indexes_[bb_id]] != 0u;
781   }
782 
IsConst(int32_t s_reg)783   bool IsConst(int32_t s_reg) const {
784     return is_constant_v_->IsBitSet(s_reg);
785   }
786 
IsConst(RegLocation loc)787   bool IsConst(RegLocation loc) const {
788     return loc.orig_sreg < 0 ? false : IsConst(loc.orig_sreg);
789   }
790 
ConstantValue(RegLocation loc)791   int32_t ConstantValue(RegLocation loc) const {
792     DCHECK(IsConst(loc));
793     return constant_values_[loc.orig_sreg];
794   }
795 
ConstantValue(int32_t s_reg)796   int32_t ConstantValue(int32_t s_reg) const {
797     DCHECK(IsConst(s_reg));
798     return constant_values_[s_reg];
799   }
800 
801   /**
802    * @brief Used to obtain 64-bit value of a pair of ssa registers.
803    * @param s_reg_low The ssa register representing the low bits.
804    * @param s_reg_high The ssa register representing the high bits.
805    * @return Retusn the 64-bit constant value.
806    */
ConstantValueWide(int32_t s_reg_low,int32_t s_reg_high)807   int64_t ConstantValueWide(int32_t s_reg_low, int32_t s_reg_high) const {
808     DCHECK(IsConst(s_reg_low));
809     DCHECK(IsConst(s_reg_high));
810     return (static_cast<int64_t>(constant_values_[s_reg_high]) << 32) |
811         Low32Bits(static_cast<int64_t>(constant_values_[s_reg_low]));
812   }
813 
ConstantValueWide(RegLocation loc)814   int64_t ConstantValueWide(RegLocation loc) const {
815     DCHECK(IsConst(loc));
816     DCHECK(!loc.high_word);  // Do not allow asking for the high partner.
817     DCHECK_LT(loc.orig_sreg + 1, GetNumSSARegs());
818     return (static_cast<int64_t>(constant_values_[loc.orig_sreg + 1]) << 32) |
819         Low32Bits(static_cast<int64_t>(constant_values_[loc.orig_sreg]));
820   }
821 
822   /**
823    * @brief Used to mark ssa register as being constant.
824    * @param ssa_reg The ssa register.
825    * @param value The constant value of ssa register.
826    */
827   void SetConstant(int32_t ssa_reg, int32_t value);
828 
829   /**
830    * @brief Used to mark ssa register and its wide counter-part as being constant.
831    * @param ssa_reg The ssa register.
832    * @param value The 64-bit constant value of ssa register and its pair.
833    */
834   void SetConstantWide(int32_t ssa_reg, int64_t value);
835 
IsConstantNullRef(RegLocation loc)836   bool IsConstantNullRef(RegLocation loc) const {
837     return loc.ref && loc.is_const && (ConstantValue(loc) == 0);
838   }
839 
GetNumSSARegs()840   int GetNumSSARegs() const {
841     return num_ssa_regs_;
842   }
843 
SetNumSSARegs(int new_num)844   void SetNumSSARegs(int new_num) {
845      /*
846       * TODO: It's theoretically possible to exceed 32767, though any cases which did
847       * would be filtered out with current settings.  When orig_sreg field is removed
848       * from RegLocation, expand s_reg_low to handle all possible cases and remove DCHECK().
849       */
850     CHECK_EQ(new_num, static_cast<int16_t>(new_num));
851     num_ssa_regs_ = new_num;
852   }
853 
GetNumReachableBlocks()854   unsigned int GetNumReachableBlocks() const {
855     return num_reachable_blocks_;
856   }
857 
GetUseCount(int sreg)858   uint32_t GetUseCount(int sreg) const {
859     DCHECK_LT(static_cast<size_t>(sreg), use_counts_.size());
860     return use_counts_[sreg];
861   }
862 
GetRawUseCount(int sreg)863   uint32_t GetRawUseCount(int sreg) const {
864     DCHECK_LT(static_cast<size_t>(sreg), raw_use_counts_.size());
865     return raw_use_counts_[sreg];
866   }
867 
GetSSASubscript(int ssa_reg)868   int GetSSASubscript(int ssa_reg) const {
869     DCHECK_LT(static_cast<size_t>(ssa_reg), ssa_subscripts_.size());
870     return ssa_subscripts_[ssa_reg];
871   }
872 
GetRawSrc(MIR * mir,int num)873   RegLocation GetRawSrc(MIR* mir, int num) {
874     DCHECK(num < mir->ssa_rep->num_uses);
875     RegLocation res = reg_location_[mir->ssa_rep->uses[num]];
876     return res;
877   }
878 
GetRawDest(MIR * mir)879   RegLocation GetRawDest(MIR* mir) {
880     DCHECK_GT(mir->ssa_rep->num_defs, 0);
881     RegLocation res = reg_location_[mir->ssa_rep->defs[0]];
882     return res;
883   }
884 
GetDest(MIR * mir)885   RegLocation GetDest(MIR* mir) {
886     RegLocation res = GetRawDest(mir);
887     DCHECK(!res.wide);
888     return res;
889   }
890 
GetSrc(MIR * mir,int num)891   RegLocation GetSrc(MIR* mir, int num) {
892     RegLocation res = GetRawSrc(mir, num);
893     DCHECK(!res.wide);
894     return res;
895   }
896 
GetDestWide(MIR * mir)897   RegLocation GetDestWide(MIR* mir) {
898     RegLocation res = GetRawDest(mir);
899     DCHECK(res.wide);
900     return res;
901   }
902 
GetSrcWide(MIR * mir,int low)903   RegLocation GetSrcWide(MIR* mir, int low) {
904     RegLocation res = GetRawSrc(mir, low);
905     DCHECK(res.wide);
906     return res;
907   }
908 
GetBadLoc()909   RegLocation GetBadLoc() {
910     return bad_loc;
911   }
912 
GetMethodSReg()913   int GetMethodSReg() const {
914     return method_sreg_;
915   }
916 
917   /**
918    * @brief Used to obtain the number of compiler temporaries being used.
919    * @return Returns the number of compiler temporaries.
920    */
GetNumUsedCompilerTemps()921   size_t GetNumUsedCompilerTemps() const {
922     // Assume that the special temps will always be used.
923     return GetNumNonSpecialCompilerTemps() + max_available_special_compiler_temps_;
924   }
925 
926   /**
927    * @brief Used to obtain number of bytes needed for special temps.
928    * @details This space is always needed because temps have special location on stack.
929    * @return Returns number of bytes for the special temps.
930    */
931   size_t GetNumBytesForSpecialTemps() const;
932 
933   /**
934    * @brief Used by backend as a hint for maximum number of bytes for non-special temps.
935    * @details Returns 4 bytes for each temp because that is the maximum amount needed
936    * for storing each temp. The BE could be smarter though and allocate a smaller
937    * spill region.
938    * @return Returns the maximum number of bytes needed for non-special temps.
939    */
GetMaximumBytesForNonSpecialTemps()940   size_t GetMaximumBytesForNonSpecialTemps() const {
941     return GetNumNonSpecialCompilerTemps() * sizeof(uint32_t);
942   }
943 
944   /**
945    * @brief Used to obtain the number of non-special compiler temporaries being used.
946    * @return Returns the number of non-special compiler temporaries.
947    */
GetNumNonSpecialCompilerTemps()948   size_t GetNumNonSpecialCompilerTemps() const {
949     return num_non_special_compiler_temps_;
950   }
951 
952   /**
953    * @brief Used to set the total number of available non-special compiler temporaries.
954    * @details Can fail setting the new max if there are more temps being used than the new_max.
955    * @param new_max The new maximum number of non-special compiler temporaries.
956    * @return Returns true if the max was set and false if failed to set.
957    */
SetMaxAvailableNonSpecialCompilerTemps(size_t new_max)958   bool SetMaxAvailableNonSpecialCompilerTemps(size_t new_max) {
959     // Make sure that enough temps still exist for backend and also that the
960     // new max can still keep around all of the already requested temps.
961     if (new_max < (GetNumNonSpecialCompilerTemps() + reserved_temps_for_backend_)) {
962       return false;
963     } else {
964       max_available_non_special_compiler_temps_ = new_max;
965       return true;
966     }
967   }
968 
969   /**
970    * @brief Provides the number of non-special compiler temps available for use by ME.
971    * @details Even if this returns zero, special compiler temps are guaranteed to be available.
972    * Additionally, this makes sure to not use any temps reserved for BE only.
973    * @return Returns the number of available temps.
974    */
975   size_t GetNumAvailableVRTemps();
976 
977   /**
978    * @brief Used to obtain the maximum number of compiler temporaries that can be requested.
979    * @return Returns the maximum number of compiler temporaries, whether used or not.
980    */
GetMaxPossibleCompilerTemps()981   size_t GetMaxPossibleCompilerTemps() const {
982     return max_available_special_compiler_temps_ + max_available_non_special_compiler_temps_;
983   }
984 
985   /**
986    * @brief Used to signal that the compiler temps have been committed.
987    * @details This should be used once the number of temps can no longer change,
988    * such as after frame size is committed and cannot be changed.
989    */
CommitCompilerTemps()990   void CommitCompilerTemps() {
991     compiler_temps_committed_ = true;
992   }
993 
994   /**
995    * @brief Used to obtain a new unique compiler temporary.
996    * @details Two things are done for convenience when allocating a new compiler
997    * temporary. The ssa register is automatically requested and the information
998    * about reg location is filled. This helps when the temp is requested post
999    * ssa initialization, such as when temps are requested by the backend.
1000    * @warning If the temp requested will be used for ME and have multiple versions,
1001    * the sreg provided by the temp will be invalidated on next ssa recalculation.
1002    * @param ct_type Type of compiler temporary requested.
1003    * @param wide Whether we should allocate a wide temporary.
1004    * @return Returns the newly created compiler temporary.
1005    */
1006   CompilerTemp* GetNewCompilerTemp(CompilerTempType ct_type, bool wide);
1007 
1008   /**
1009    * @brief Used to remove last created compiler temporary when it's not needed.
1010    * @param temp the temporary to remove.
1011    */
1012   void RemoveLastCompilerTemp(CompilerTempType ct_type, bool wide, CompilerTemp* temp);
1013 
MethodIsLeaf()1014   bool MethodIsLeaf() {
1015     return attributes_ & METHOD_IS_LEAF;
1016   }
1017 
GetRegLocation(int index)1018   RegLocation GetRegLocation(int index) {
1019     DCHECK((index >= 0) && (index < num_ssa_regs_));
1020     return reg_location_[index];
1021   }
1022 
GetMethodLoc()1023   RegLocation GetMethodLoc() {
1024     return reg_location_[method_sreg_];
1025   }
1026 
IsBackEdge(BasicBlock * branch_bb,BasicBlockId target_bb_id)1027   bool IsBackEdge(BasicBlock* branch_bb, BasicBlockId target_bb_id) {
1028     DCHECK_NE(target_bb_id, NullBasicBlockId);
1029     DCHECK_LT(target_bb_id, topological_order_indexes_.size());
1030     DCHECK_LT(branch_bb->id, topological_order_indexes_.size());
1031     return topological_order_indexes_[target_bb_id] <= topological_order_indexes_[branch_bb->id];
1032   }
1033 
IsSuspendCheckEdge(BasicBlock * branch_bb,BasicBlockId target_bb_id)1034   bool IsSuspendCheckEdge(BasicBlock* branch_bb, BasicBlockId target_bb_id) {
1035     if (!IsBackEdge(branch_bb, target_bb_id)) {
1036       return false;
1037     }
1038     if (suspend_checks_in_loops_ == nullptr) {
1039       // We didn't run suspend check elimination.
1040       return true;
1041     }
1042     uint16_t target_depth = GetBasicBlock(target_bb_id)->nesting_depth;
1043     return (suspend_checks_in_loops_[branch_bb->id] & (1u << (target_depth - 1u))) == 0;
1044   }
1045 
CountBranch(DexOffset target_offset)1046   void CountBranch(DexOffset target_offset) {
1047     if (target_offset <= current_offset_) {
1048       backward_branches_++;
1049     } else {
1050       forward_branches_++;
1051     }
1052   }
1053 
GetBranchCount()1054   int GetBranchCount() {
1055     return backward_branches_ + forward_branches_;
1056   }
1057 
1058   // Is this vreg in the in set?
IsInVReg(uint32_t vreg)1059   bool IsInVReg(uint32_t vreg) {
1060     return (vreg >= GetFirstInVR()) && (vreg < GetFirstTempVR());
1061   }
1062 
GetNumOfCodeVRs()1063   uint32_t GetNumOfCodeVRs() const {
1064     return current_code_item_->registers_size_;
1065   }
1066 
GetNumOfCodeAndTempVRs()1067   uint32_t GetNumOfCodeAndTempVRs() const {
1068     // Include all of the possible temps so that no structures overflow when initialized.
1069     return GetNumOfCodeVRs() + GetMaxPossibleCompilerTemps();
1070   }
1071 
GetNumOfLocalCodeVRs()1072   uint32_t GetNumOfLocalCodeVRs() const {
1073     // This also refers to the first "in" VR.
1074     return GetNumOfCodeVRs() - current_code_item_->ins_size_;
1075   }
1076 
GetNumOfInVRs()1077   uint32_t GetNumOfInVRs() const {
1078     return current_code_item_->ins_size_;
1079   }
1080 
GetNumOfOutVRs()1081   uint32_t GetNumOfOutVRs() const {
1082     return current_code_item_->outs_size_;
1083   }
1084 
GetFirstInVR()1085   uint32_t GetFirstInVR() const {
1086     return GetNumOfLocalCodeVRs();
1087   }
1088 
GetFirstTempVR()1089   uint32_t GetFirstTempVR() const {
1090     // Temp VRs immediately follow code VRs.
1091     return GetNumOfCodeVRs();
1092   }
1093 
GetFirstSpecialTempVR()1094   uint32_t GetFirstSpecialTempVR() const {
1095     // Special temps appear first in the ordering before non special temps.
1096     return GetFirstTempVR();
1097   }
1098 
GetFirstNonSpecialTempVR()1099   uint32_t GetFirstNonSpecialTempVR() const {
1100     // We always leave space for all the special temps before the non-special ones.
1101     return GetFirstSpecialTempVR() + max_available_special_compiler_temps_;
1102   }
1103 
HasTryCatchBlocks()1104   bool HasTryCatchBlocks() const {
1105     return current_code_item_->tries_size_ != 0;
1106   }
1107 
1108   void DumpCheckStats();
1109   MIR* FindMoveResult(BasicBlock* bb, MIR* mir);
1110 
1111   /* Return the base virtual register for a SSA name */
SRegToVReg(int ssa_reg)1112   int SRegToVReg(int ssa_reg) const {
1113     return ssa_base_vregs_[ssa_reg];
1114   }
1115 
1116   void VerifyDataflow();
1117   void CheckForDominanceFrontier(BasicBlock* dom_bb, const BasicBlock* succ_bb);
1118   bool EliminateNullChecksGate();
1119   bool EliminateNullChecks(BasicBlock* bb);
1120   void EliminateNullChecksEnd();
1121   void InferTypesStart();
1122   bool InferTypes(BasicBlock* bb);
1123   void InferTypesEnd();
1124   bool EliminateClassInitChecksGate();
1125   bool EliminateClassInitChecks(BasicBlock* bb);
1126   void EliminateClassInitChecksEnd();
1127   bool ApplyGlobalValueNumberingGate();
1128   bool ApplyGlobalValueNumbering(BasicBlock* bb);
1129   void ApplyGlobalValueNumberingEnd();
1130   bool EliminateDeadCodeGate();
1131   bool EliminateDeadCode(BasicBlock* bb);
1132   void EliminateDeadCodeEnd();
1133   void GlobalValueNumberingCleanup();
1134   bool EliminateSuspendChecksGate();
1135   bool EliminateSuspendChecks(BasicBlock* bb);
1136 
GetGvnIFieldId(MIR * mir)1137   uint16_t GetGvnIFieldId(MIR* mir) const {
1138     DCHECK(IsInstructionIGetOrIPut(mir->dalvikInsn.opcode));
1139     DCHECK_LT(mir->meta.ifield_lowering_info, ifield_lowering_infos_.size());
1140     DCHECK(temp_.gvn.ifield_ids != nullptr);
1141     return temp_.gvn.ifield_ids[mir->meta.ifield_lowering_info];
1142   }
1143 
GetGvnSFieldId(MIR * mir)1144   uint16_t GetGvnSFieldId(MIR* mir) const {
1145     DCHECK(IsInstructionSGetOrSPut(mir->dalvikInsn.opcode));
1146     DCHECK_LT(mir->meta.sfield_lowering_info, sfield_lowering_infos_.size());
1147     DCHECK(temp_.gvn.sfield_ids != nullptr);
1148     return temp_.gvn.sfield_ids[mir->meta.sfield_lowering_info];
1149   }
1150 
PuntToInterpreter()1151   bool PuntToInterpreter() {
1152     return punt_to_interpreter_;
1153   }
1154 
1155   void SetPuntToInterpreter(bool val);
1156 
1157   void DisassembleExtendedInstr(const MIR* mir, std::string* decoded_mir);
1158   char* GetDalvikDisassembly(const MIR* mir);
1159   void ReplaceSpecialChars(std::string& str);
1160   std::string GetSSAName(int ssa_reg);
1161   std::string GetSSANameWithConst(int ssa_reg, bool singles_only);
1162   void GetBlockName(BasicBlock* bb, char* name);
1163   const char* GetShortyFromMethodReference(const MethodReference& target_method);
1164   void DumpMIRGraph();
1165   CallInfo* NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range);
1166   BasicBlock* NewMemBB(BBType block_type, int block_id);
1167   MIR* NewMIR();
1168   MIR* AdvanceMIR(BasicBlock** p_bb, MIR* mir);
1169   BasicBlock* NextDominatedBlock(BasicBlock* bb);
1170   bool LayoutBlocks(BasicBlock* bb);
1171   void ComputeTopologicalSortOrder();
1172   BasicBlock* CreateNewBB(BBType block_type);
1173 
1174   bool InlineSpecialMethodsGate();
1175   void InlineSpecialMethodsStart();
1176   void InlineSpecialMethods(BasicBlock* bb);
1177   void InlineSpecialMethodsEnd();
1178 
1179   /**
1180    * @brief Perform the initial preparation for the Method Uses.
1181    */
1182   void InitializeMethodUses();
1183 
1184   /**
1185    * @brief Perform the initial preparation for the Constant Propagation.
1186    */
1187   void InitializeConstantPropagation();
1188 
1189   /**
1190    * @brief Perform the initial preparation for the SSA Transformation.
1191    */
1192   void SSATransformationStart();
1193 
1194   /**
1195    * @brief Insert a the operands for the Phi nodes.
1196    * @param bb the considered BasicBlock.
1197    * @return true
1198    */
1199   bool InsertPhiNodeOperands(BasicBlock* bb);
1200 
1201   /**
1202    * @brief Perform the cleanup after the SSA Transformation.
1203    */
1204   void SSATransformationEnd();
1205 
1206   /**
1207    * @brief Perform constant propagation on a BasicBlock.
1208    * @param bb the considered BasicBlock.
1209    */
1210   void DoConstantPropagation(BasicBlock* bb);
1211 
1212   /**
1213    * @brief Get use count weight for a given block.
1214    * @param bb the BasicBlock.
1215    */
1216   uint32_t GetUseCountWeight(BasicBlock* bb) const;
1217 
1218   /**
1219    * @brief Count the uses in the BasicBlock
1220    * @param bb the BasicBlock
1221    */
1222   void CountUses(BasicBlock* bb);
1223 
1224   static uint64_t GetDataFlowAttributes(Instruction::Code opcode);
1225   static uint64_t GetDataFlowAttributes(MIR* mir);
1226 
1227   /**
1228    * @brief Combine BasicBlocks
1229    * @param the BasicBlock we are considering
1230    */
1231   void CombineBlocks(BasicBlock* bb);
1232 
1233   void ClearAllVisitedFlags();
1234 
1235   void AllocateSSAUseData(MIR *mir, int num_uses);
1236   void AllocateSSADefData(MIR *mir, int num_defs);
1237   void CalculateBasicBlockInformation(const PassManager* const post_opt);
1238   void ComputeDFSOrders();
1239   void ComputeDefBlockMatrix();
1240   void ComputeDominators();
1241   void CompilerInitializeSSAConversion();
1242   virtual void InitializeBasicBlockDataFlow();
1243   void FindPhiNodeBlocks();
1244   void DoDFSPreOrderSSARename(BasicBlock* block);
1245 
DfsOrdersUpToDate()1246   bool DfsOrdersUpToDate() const {
1247     return dfs_orders_up_to_date_;
1248   }
1249 
DominationUpToDate()1250   bool DominationUpToDate() const {
1251     return domination_up_to_date_;
1252   }
1253 
MirSsaRepUpToDate()1254   bool MirSsaRepUpToDate() const {
1255     return mir_ssa_rep_up_to_date_;
1256   }
1257 
TopologicalOrderUpToDate()1258   bool TopologicalOrderUpToDate() const {
1259     return topological_order_up_to_date_;
1260   }
1261 
1262   /*
1263    * IsDebugBuild sanity check: keep track of the Dex PCs for catch entries so that later on
1264    * we can verify that all catch entries have native PC entries.
1265    */
1266   std::set<uint32_t> catches_;
1267 
1268   // TODO: make these private.
1269   RegLocation* reg_location_;                               // Map SSA names to location.
1270   ArenaSafeMap<unsigned int, unsigned int> block_id_map_;   // Block collapse lookup cache.
1271 
1272   static const char* extended_mir_op_names_[kMirOpLast - kMirOpFirst];
1273 
1274   void HandleSSADef(int* defs, int dalvik_reg, int reg_index);
1275 
1276  protected:
1277   int FindCommonParent(int block1, int block2);
1278   void ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1,
1279                          const ArenaBitVector* src2);
1280   void HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v,
1281                        ArenaBitVector* live_in_v, int dalvik_reg_id);
1282   void HandleDef(ArenaBitVector* def_v, int dalvik_reg_id);
1283   void HandleExtended(ArenaBitVector* use_v, ArenaBitVector* def_v,
1284                       ArenaBitVector* live_in_v,
1285                       const MIR::DecodedInstruction& d_insn);
1286   bool DoSSAConversion(BasicBlock* bb);
1287   int ParseInsn(const uint16_t* code_ptr, MIR::DecodedInstruction* decoded_instruction);
1288   bool ContentIsInsn(const uint16_t* code_ptr);
1289   BasicBlock* SplitBlock(DexOffset code_offset, BasicBlock* orig_block,
1290                          BasicBlock** immed_pred_block_p);
1291   BasicBlock* FindBlock(DexOffset code_offset, bool create, BasicBlock** immed_pred_block_p,
1292                         ScopedArenaVector<uint16_t>* dex_pc_to_block_map);
1293   void ProcessTryCatchBlocks(ScopedArenaVector<uint16_t>* dex_pc_to_block_map);
1294   bool IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset, NarrowDexOffset catch_offset);
1295   BasicBlock* ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
1296                                int flags, const uint16_t* code_ptr, const uint16_t* code_end,
1297                                ScopedArenaVector<uint16_t>* dex_pc_to_block_map);
1298   BasicBlock* ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
1299                                int flags,
1300                                ScopedArenaVector<uint16_t>* dex_pc_to_block_map);
1301   BasicBlock* ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
1302                               int flags, ArenaBitVector* try_block_addr, const uint16_t* code_ptr,
1303                               const uint16_t* code_end,
1304                               ScopedArenaVector<uint16_t>* dex_pc_to_block_map);
1305   int AddNewSReg(int v_reg);
1306   void HandleSSAUse(int* uses, int dalvik_reg, int reg_index);
1307   void DataFlowSSAFormat35C(MIR* mir);
1308   void DataFlowSSAFormat3RC(MIR* mir);
1309   void DataFlowSSAFormatExtended(MIR* mir);
1310   bool FindLocalLiveIn(BasicBlock* bb);
1311   bool VerifyPredInfo(BasicBlock* bb);
1312   BasicBlock* NeedsVisit(BasicBlock* bb);
1313   BasicBlock* NextUnvisitedSuccessor(BasicBlock* bb);
1314   void MarkPreOrder(BasicBlock* bb);
1315   void RecordDFSOrders(BasicBlock* bb);
1316   void ComputeDomPostOrderTraversal(BasicBlock* bb);
1317   int GetSSAUseCount(int s_reg);
1318   bool BasicBlockOpt(BasicBlock* bb);
1319   void MultiplyAddOpt(BasicBlock* bb);
1320 
1321   /**
1322    * @brief Check whether the given MIR is possible to throw an exception.
1323    * @param mir The mir to check.
1324    * @return Returns 'true' if the given MIR might throw an exception.
1325    */
1326   bool CanThrow(MIR* mir) const;
1327 
1328   /**
1329    * @brief Combine multiply and add/sub MIRs into corresponding extended MAC MIR.
1330    * @param mul_mir The multiply MIR to be combined.
1331    * @param add_mir The add/sub MIR to be combined.
1332    * @param mul_is_first_addend 'true' if multiply product is the first addend of add operation.
1333    * @param is_wide 'true' if the operations are long type.
1334    * @param is_sub 'true' if it is a multiply-subtract operation.
1335    */
1336   void CombineMultiplyAdd(MIR* mul_mir, MIR* add_mir, bool mul_is_first_addend,
1337                           bool is_wide, bool is_sub);
1338   /*
1339    * @brief Check whether the first MIR anti-depends on the second MIR.
1340    * @details To check whether one of first MIR's uses of vregs is redefined by the second MIR,
1341    * i.e. there is a write-after-read dependency.
1342    * @param first The first MIR.
1343    * @param second The second MIR.
1344    * @param Returns true if there is a write-after-read dependency.
1345    */
1346   bool HasAntiDependency(MIR* first, MIR* second);
1347 
1348   bool BuildExtendedBBList(class BasicBlock* bb);
1349   bool FillDefBlockMatrix(BasicBlock* bb);
1350   void InitializeDominationInfo(BasicBlock* bb);
1351   bool ComputeblockIDom(BasicBlock* bb);
1352   bool ComputeBlockDominators(BasicBlock* bb);
1353   bool SetDominators(BasicBlock* bb);
1354   bool ComputeBlockLiveIns(BasicBlock* bb);
1355   bool ComputeDominanceFrontier(BasicBlock* bb);
1356 
1357   void CountChecks(BasicBlock* bb);
1358   void AnalyzeBlock(BasicBlock* bb, struct MethodStats* stats);
1359   bool ComputeSkipCompilation(struct MethodStats* stats, bool skip_default,
1360                               std::string* skip_message);
1361 
1362   CompilationUnit* const cu_;
1363   ArenaVector<int> ssa_base_vregs_;
1364   ArenaVector<int> ssa_subscripts_;
1365   // Map original Dalvik virtual reg i to the current SSA name.
1366   int32_t* vreg_to_ssa_map_;        // length == method->registers_size
1367   int* ssa_last_defs_;              // length == method->registers_size
1368   ArenaBitVector* is_constant_v_;   // length == num_ssa_reg
1369   int* constant_values_;            // length == num_ssa_reg
1370   // Use counts of ssa names.
1371   ArenaVector<uint32_t> use_counts_;      // Weighted by nesting depth
1372   ArenaVector<uint32_t> raw_use_counts_;  // Not weighted
1373   unsigned int num_reachable_blocks_;
1374   unsigned int max_num_reachable_blocks_;
1375   bool dfs_orders_up_to_date_;
1376   bool domination_up_to_date_;
1377   bool mir_ssa_rep_up_to_date_;
1378   bool topological_order_up_to_date_;
1379   ArenaVector<BasicBlockId> dfs_order_;
1380   ArenaVector<BasicBlockId> dfs_post_order_;
1381   ArenaVector<BasicBlockId> dom_post_order_traversal_;
1382   ArenaVector<BasicBlockId> topological_order_;
1383   // Indexes in topological_order_ need to be only as big as the BasicBlockId.
1384   static_assert(sizeof(BasicBlockId) == sizeof(uint16_t), "Assuming 16 bit BasicBlockId");
1385   // For each loop head, remember the past-the-end index of the end of the loop. 0 if not loop head.
1386   ArenaVector<uint16_t> topological_order_loop_ends_;
1387   // Map BB ids to topological_order_ indexes. 0xffff if not included (hidden or null block).
1388   ArenaVector<uint16_t> topological_order_indexes_;
1389   // Stack of the loop head indexes and recalculation flags for RepeatingTopologicalSortIterator.
1390   ArenaVector<std::pair<uint16_t, bool>> topological_order_loop_head_stack_;
1391   size_t max_nested_loops_;
1392   int* i_dom_list_;
1393   std::unique_ptr<ScopedArenaAllocator> temp_scoped_alloc_;
1394   // Union of temporaries used by different passes.
1395   union {
1396     // Class init check elimination.
1397     struct {
1398       size_t num_class_bits;  // 2 bits per class: class initialized and class in dex cache.
1399       ArenaBitVector* work_classes_to_check;
1400       ArenaBitVector** ending_classes_to_check_matrix;  // num_blocks_ x num_class_bits.
1401       uint16_t* indexes;
1402     } cice;
1403     // Null check elimination.
1404     struct {
1405       size_t num_vregs;
1406       ArenaBitVector* work_vregs_to_check;
1407       ArenaBitVector** ending_vregs_to_check_matrix;  // num_blocks_ x num_vregs.
1408     } nce;
1409     // Special method inlining.
1410     struct {
1411       size_t num_indexes;
1412       ArenaBitVector* processed_indexes;
1413       uint16_t* lowering_infos;
1414     } smi;
1415     // SSA transformation.
1416     struct {
1417       size_t num_vregs;
1418       ArenaBitVector* work_live_vregs;
1419       ArenaBitVector** def_block_matrix;  // num_vregs x num_blocks_.
1420       ArenaBitVector** phi_node_blocks;  // num_vregs x num_blocks_.
1421       TypeInference* ti;
1422     } ssa;
1423     // Global value numbering.
1424     struct {
1425       GlobalValueNumbering* gvn;
1426       uint16_t* ifield_ids;  // Part of GVN/LVN but cached here for LVN to avoid recalculation.
1427       uint16_t* sfield_ids;  // Ditto.
1428       GvnDeadCodeElimination* dce;
1429     } gvn;
1430   } temp_;
1431   static const int kInvalidEntry = -1;
1432   ArenaVector<BasicBlock*> block_list_;
1433   ArenaBitVector* try_block_addr_;
1434   BasicBlock* entry_block_;
1435   BasicBlock* exit_block_;
1436   const DexFile::CodeItem* current_code_item_;
1437   ArenaVector<DexCompilationUnit*> m_units_;     // List of methods included in this graph
1438   typedef std::pair<int, int> MIRLocation;       // Insert point, (m_unit_ index, offset)
1439   ArenaVector<MIRLocation> method_stack_;        // Include stack
1440   int current_method_;
1441   DexOffset current_offset_;                     // Offset in code units
1442   int def_count_;                                // Used to estimate size of ssa name storage.
1443   int* opcode_count_;                            // Dex opcode coverage stats.
1444   int num_ssa_regs_;                             // Number of names following SSA transformation.
1445   ArenaVector<BasicBlockId> extended_basic_blocks_;  // Heads of block "traces".
1446   int method_sreg_;
1447   unsigned int attributes_;
1448   Checkstats* checkstats_;
1449   ArenaAllocator* const arena_;
1450   int backward_branches_;
1451   int forward_branches_;
1452   size_t num_non_special_compiler_temps_;  // Keeps track of allocated non-special compiler temps. These are VRs that are in compiler temp region on stack.
1453   size_t max_available_non_special_compiler_temps_;  // Keeps track of maximum available non-special temps.
1454   size_t max_available_special_compiler_temps_;      // Keeps track of maximum available special temps.
1455   bool requested_backend_temp_;            // Keeps track whether BE temps have been requested.
1456   size_t reserved_temps_for_backend_;      // Keeps track of the remaining temps that are reserved for BE.
1457   bool compiler_temps_committed_;          // Keeps track whether number of temps has been frozen (for example post frame size calculation).
1458   bool punt_to_interpreter_;               // Difficult or not worthwhile - just interpret.
1459   uint64_t merged_df_flags_;
1460   ArenaVector<MirIFieldLoweringInfo> ifield_lowering_infos_;
1461   ArenaVector<MirSFieldLoweringInfo> sfield_lowering_infos_;
1462   ArenaVector<MirMethodLoweringInfo> method_lowering_infos_;
1463 
1464   // In the suspend check elimination pass we determine for each basic block and enclosing
1465   // loop whether there's guaranteed to be a suspend check on the path from the loop head
1466   // to this block. If so, we can eliminate the back-edge suspend check.
1467   // The bb->id is index into suspend_checks_in_loops_ and the loop head's depth is bit index
1468   // in a suspend_checks_in_loops_[bb->id].
1469   uint32_t* suspend_checks_in_loops_;
1470 
1471   static const uint64_t oat_data_flow_attributes_[kMirOpLast];
1472 
1473   friend class MirOptimizationTest;
1474   friend class ClassInitCheckEliminationTest;
1475   friend class SuspendCheckEliminationTest;
1476   friend class NullCheckEliminationTest;
1477   friend class GlobalValueNumberingTest;
1478   friend class GvnDeadCodeEliminationTest;
1479   friend class LocalValueNumberingTest;
1480   friend class TopologicalSortOrderTest;
1481   friend class TypeInferenceTest;
1482   friend class QuickCFITest;
1483   friend class QuickAssembleX86TestBase;
1484 };
1485 
1486 }  // namespace art
1487 
1488 #endif  // ART_COMPILER_DEX_MIR_GRAPH_H_
1489