• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "mir_to_lir-inl.h"
18 
19 #include "dex/dataflow_iterator-inl.h"
20 #include "dex/quick/dex_file_method_inliner.h"
21 #include "driver/compiler_driver.h"
22 #include "primitive.h"
23 #include "thread-inl.h"
24 
25 namespace art {
26 
27 class Mir2Lir::SpecialSuspendCheckSlowPath : public Mir2Lir::LIRSlowPath {
28  public:
SpecialSuspendCheckSlowPath(Mir2Lir * m2l,LIR * branch,LIR * cont)29   SpecialSuspendCheckSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont)
30       : LIRSlowPath(m2l, branch, cont),
31         num_used_args_(0u) {
32   }
33 
PreserveArg(int in_position)34   void PreserveArg(int in_position) {
35     // Avoid duplicates.
36     for (size_t i = 0; i != num_used_args_; ++i) {
37       if (used_args_[i] == in_position) {
38         return;
39       }
40     }
41     DCHECK_LT(num_used_args_, kMaxArgsToPreserve);
42     used_args_[num_used_args_] = in_position;
43     ++num_used_args_;
44   }
45 
Compile()46   void Compile() OVERRIDE {
47     m2l_->ResetRegPool();
48     m2l_->ResetDefTracking();
49     GenerateTargetLabel(kPseudoSuspendTarget);
50 
51     m2l_->LockCallTemps();
52 
53     // Generate frame.
54     m2l_->GenSpecialEntryForSuspend();
55 
56     // Spill all args.
57     for (size_t i = 0, end = m2l_->in_to_reg_storage_mapping_.GetEndMappedIn(); i < end;
58         i += m2l_->in_to_reg_storage_mapping_.GetShorty(i).IsWide() ? 2u : 1u) {
59       m2l_->SpillArg(i);
60     }
61 
62     m2l_->FreeCallTemps();
63 
64     // Do the actual suspend call to runtime.
65     m2l_->CallRuntimeHelper(kQuickTestSuspend, true);
66 
67     m2l_->LockCallTemps();
68 
69     // Unspill used regs. (Don't unspill unused args.)
70     for (size_t i = 0; i != num_used_args_; ++i) {
71       m2l_->UnspillArg(used_args_[i]);
72     }
73 
74     // Pop the frame.
75     m2l_->GenSpecialExitForSuspend();
76 
77     // Branch to the continue label.
78     DCHECK(cont_ != nullptr);
79     m2l_->OpUnconditionalBranch(cont_);
80 
81     m2l_->FreeCallTemps();
82   }
83 
84  private:
85   static constexpr size_t kMaxArgsToPreserve = 2u;
86   size_t num_used_args_;
87   int used_args_[kMaxArgsToPreserve];
88 };
89 
ShortyToRegClass(char shorty_type)90 RegisterClass Mir2Lir::ShortyToRegClass(char shorty_type) {
91   RegisterClass res;
92   switch (shorty_type) {
93     case 'L':
94       res = kRefReg;
95       break;
96     case 'F':
97       // Expected fallthrough.
98     case 'D':
99       res = kFPReg;
100       break;
101     default:
102       res = kCoreReg;
103   }
104   return res;
105 }
106 
LockArg(size_t in_position)107 void Mir2Lir::LockArg(size_t in_position) {
108   RegStorage reg_arg = in_to_reg_storage_mapping_.GetReg(in_position);
109 
110   if (reg_arg.Valid()) {
111     LockTemp(reg_arg);
112   }
113 }
114 
LoadArg(size_t in_position,RegisterClass reg_class,bool wide)115 RegStorage Mir2Lir::LoadArg(size_t in_position, RegisterClass reg_class, bool wide) {
116   ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
117   int offset = StackVisitor::GetOutVROffset(in_position, cu_->instruction_set);
118 
119   if (cu_->instruction_set == kX86) {
120     /*
121      * When doing a call for x86, it moves the stack pointer in order to push return.
122      * Thus, we add another 4 bytes to figure out the out of caller (in of callee).
123      */
124     offset += sizeof(uint32_t);
125   }
126 
127   if (cu_->instruction_set == kX86_64) {
128     /*
129      * When doing a call for x86, it moves the stack pointer in order to push return.
130      * Thus, we add another 8 bytes to figure out the out of caller (in of callee).
131      */
132     offset += sizeof(uint64_t);
133   }
134 
135   RegStorage reg_arg = in_to_reg_storage_mapping_.GetReg(in_position);
136 
137   // TODO: REVISIT: This adds a spill of low part while we could just copy it.
138   if (reg_arg.Valid() && wide && (reg_arg.GetWideKind() == kNotWide)) {
139     // For wide register we've got only half of it.
140     // Flush it to memory then.
141     StoreBaseDisp(TargetPtrReg(kSp), offset, reg_arg, k32, kNotVolatile);
142     reg_arg = RegStorage::InvalidReg();
143   }
144 
145   if (!reg_arg.Valid()) {
146     reg_arg = wide ?  AllocTypedTempWide(false, reg_class) : AllocTypedTemp(false, reg_class);
147     LoadBaseDisp(TargetPtrReg(kSp), offset, reg_arg, wide ? k64 : k32, kNotVolatile);
148   } else {
149     // Check if we need to copy the arg to a different reg_class.
150     if (!RegClassMatches(reg_class, reg_arg)) {
151       if (wide) {
152         RegStorage new_reg = AllocTypedTempWide(false, reg_class);
153         OpRegCopyWide(new_reg, reg_arg);
154         reg_arg = new_reg;
155       } else {
156         RegStorage new_reg = AllocTypedTemp(false, reg_class);
157         OpRegCopy(new_reg, reg_arg);
158         reg_arg = new_reg;
159       }
160     }
161   }
162   return reg_arg;
163 }
164 
LoadArgDirect(size_t in_position,RegLocation rl_dest)165 void Mir2Lir::LoadArgDirect(size_t in_position, RegLocation rl_dest) {
166   DCHECK_EQ(rl_dest.location, kLocPhysReg);
167   ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
168   int offset = StackVisitor::GetOutVROffset(in_position, cu_->instruction_set);
169   if (cu_->instruction_set == kX86) {
170     /*
171      * When doing a call for x86, it moves the stack pointer in order to push return.
172      * Thus, we add another 4 bytes to figure out the out of caller (in of callee).
173      */
174     offset += sizeof(uint32_t);
175   }
176 
177   if (cu_->instruction_set == kX86_64) {
178     /*
179      * When doing a call for x86, it moves the stack pointer in order to push return.
180      * Thus, we add another 8 bytes to figure out the out of caller (in of callee).
181      */
182     offset += sizeof(uint64_t);
183   }
184 
185   RegStorage reg_arg = in_to_reg_storage_mapping_.GetReg(in_position);
186 
187   // TODO: REVISIT: This adds a spill of low part while we could just copy it.
188   if (reg_arg.Valid() && rl_dest.wide && (reg_arg.GetWideKind() == kNotWide)) {
189     // For wide register we've got only half of it.
190     // Flush it to memory then.
191     StoreBaseDisp(TargetPtrReg(kSp), offset, reg_arg, k32, kNotVolatile);
192     reg_arg = RegStorage::InvalidReg();
193   }
194 
195   if (!reg_arg.Valid()) {
196     OpSize op_size = rl_dest.wide ? k64 : (rl_dest.ref ? kReference : k32);
197     LoadBaseDisp(TargetPtrReg(kSp), offset, rl_dest.reg, op_size, kNotVolatile);
198   } else {
199     if (rl_dest.wide) {
200       OpRegCopyWide(rl_dest.reg, reg_arg);
201     } else {
202       OpRegCopy(rl_dest.reg, reg_arg);
203     }
204   }
205 }
206 
SpillArg(size_t in_position)207 void Mir2Lir::SpillArg(size_t in_position) {
208   RegStorage reg_arg = in_to_reg_storage_mapping_.GetReg(in_position);
209 
210   if (reg_arg.Valid()) {
211     int offset = frame_size_ + StackVisitor::GetOutVROffset(in_position, cu_->instruction_set);
212     ShortyArg arg = in_to_reg_storage_mapping_.GetShorty(in_position);
213     OpSize size = arg.IsRef() ? kReference :
214         (arg.IsWide() && reg_arg.GetWideKind() == kWide) ? k64 : k32;
215     StoreBaseDisp(TargetPtrReg(kSp), offset, reg_arg, size, kNotVolatile);
216   }
217 }
218 
UnspillArg(size_t in_position)219 void Mir2Lir::UnspillArg(size_t in_position) {
220   RegStorage reg_arg = in_to_reg_storage_mapping_.GetReg(in_position);
221 
222   if (reg_arg.Valid()) {
223     int offset = frame_size_ + StackVisitor::GetOutVROffset(in_position, cu_->instruction_set);
224     ShortyArg arg = in_to_reg_storage_mapping_.GetShorty(in_position);
225     OpSize size = arg.IsRef() ? kReference :
226         (arg.IsWide() && reg_arg.GetWideKind() == kWide) ? k64 : k32;
227     LoadBaseDisp(TargetPtrReg(kSp), offset, reg_arg, size, kNotVolatile);
228   }
229 }
230 
GenSpecialSuspendTest()231 Mir2Lir::SpecialSuspendCheckSlowPath* Mir2Lir::GenSpecialSuspendTest() {
232   LockCallTemps();
233   LIR* branch = OpTestSuspend(nullptr);
234   FreeCallTemps();
235   LIR* cont = NewLIR0(kPseudoTargetLabel);
236   SpecialSuspendCheckSlowPath* slow_path =
237       new (arena_) SpecialSuspendCheckSlowPath(this, branch, cont);
238   AddSlowPath(slow_path);
239   return slow_path;
240 }
241 
GenSpecialIGet(MIR * mir,const InlineMethod & special)242 bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) {
243   // FastInstance() already checked by DexFileMethodInliner.
244   const InlineIGetIPutData& data = special.d.ifield_data;
245   if (data.method_is_static != 0u || data.object_arg != 0u) {
246     // The object is not "this" and has to be null-checked.
247     return false;
248   }
249 
250   OpSize size;
251   switch (data.op_variant) {
252     case InlineMethodAnalyser::IGetVariant(Instruction::IGET):
253       size = in_to_reg_storage_mapping_.GetShorty(data.src_arg).IsFP() ? kSingle : k32;
254       break;
255     case InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE):
256       size = in_to_reg_storage_mapping_.GetShorty(data.src_arg).IsFP() ? kDouble : k64;
257       break;
258     case InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT):
259       size = kReference;
260       break;
261     case InlineMethodAnalyser::IGetVariant(Instruction::IGET_SHORT):
262       size = kSignedHalf;
263       break;
264     case InlineMethodAnalyser::IGetVariant(Instruction::IGET_CHAR):
265       size = kUnsignedHalf;
266       break;
267     case InlineMethodAnalyser::IGetVariant(Instruction::IGET_BYTE):
268       size = kSignedByte;
269       break;
270     case InlineMethodAnalyser::IGetVariant(Instruction::IGET_BOOLEAN):
271       size = kUnsignedByte;
272       break;
273     default:
274       LOG(FATAL) << "Unknown variant: " << data.op_variant;
275       UNREACHABLE();
276   }
277 
278   // Point of no return - no aborts after this
279   if (!kLeafOptimization) {
280     auto* slow_path = GenSpecialSuspendTest();
281     slow_path->PreserveArg(data.object_arg);
282   }
283   LockArg(data.object_arg);
284   GenPrintLabel(mir);
285   RegStorage reg_obj = LoadArg(data.object_arg, kRefReg);
286   RegisterClass reg_class = RegClassForFieldLoadStore(size, data.is_volatile);
287   RegisterClass ret_reg_class = ShortyToRegClass(cu_->shorty[0]);
288   RegLocation rl_dest = IsWide(size) ? GetReturnWide(ret_reg_class) : GetReturn(ret_reg_class);
289   RegStorage r_result = rl_dest.reg;
290   if (!RegClassMatches(reg_class, r_result)) {
291     r_result = IsWide(size) ? AllocTypedTempWide(rl_dest.fp, reg_class)
292                             : AllocTypedTemp(rl_dest.fp, reg_class);
293   }
294   if (IsRef(size)) {
295     LoadRefDisp(reg_obj, data.field_offset, r_result, data.is_volatile ? kVolatile : kNotVolatile);
296   } else {
297     LoadBaseDisp(reg_obj, data.field_offset, r_result, size, data.is_volatile ? kVolatile :
298         kNotVolatile);
299   }
300   if (r_result.NotExactlyEquals(rl_dest.reg)) {
301     if (IsWide(size)) {
302       OpRegCopyWide(rl_dest.reg, r_result);
303     } else {
304       OpRegCopy(rl_dest.reg, r_result);
305     }
306   }
307   return true;
308 }
309 
GenSpecialIPut(MIR * mir,const InlineMethod & special)310 bool Mir2Lir::GenSpecialIPut(MIR* mir, const InlineMethod& special) {
311   // FastInstance() already checked by DexFileMethodInliner.
312   const InlineIGetIPutData& data = special.d.ifield_data;
313   if (data.method_is_static != 0u || data.object_arg != 0u) {
314     // The object is not "this" and has to be null-checked.
315     return false;
316   }
317   if (data.return_arg_plus1 != 0u) {
318     // The setter returns a method argument which we don't support here.
319     return false;
320   }
321 
322   OpSize size;
323   switch (data.op_variant) {
324     case InlineMethodAnalyser::IPutVariant(Instruction::IPUT):
325       size = in_to_reg_storage_mapping_.GetShorty(data.src_arg).IsFP() ? kSingle : k32;
326       break;
327     case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE):
328       size = in_to_reg_storage_mapping_.GetShorty(data.src_arg).IsFP() ? kDouble : k64;
329       break;
330     case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_OBJECT):
331       size = kReference;
332       break;
333     case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_SHORT):
334       size = kSignedHalf;
335       break;
336     case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_CHAR):
337       size = kUnsignedHalf;
338       break;
339     case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BYTE):
340       size = kSignedByte;
341       break;
342     case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BOOLEAN):
343       size = kUnsignedByte;
344       break;
345     default:
346       LOG(FATAL) << "Unknown variant: " << data.op_variant;
347       UNREACHABLE();
348   }
349 
350   // Point of no return - no aborts after this
351   if (!kLeafOptimization) {
352     auto* slow_path = GenSpecialSuspendTest();
353     slow_path->PreserveArg(data.object_arg);
354     slow_path->PreserveArg(data.src_arg);
355   }
356   LockArg(data.object_arg);
357   LockArg(data.src_arg);
358   GenPrintLabel(mir);
359   RegStorage reg_obj = LoadArg(data.object_arg, kRefReg);
360   RegisterClass reg_class = RegClassForFieldLoadStore(size, data.is_volatile);
361   RegStorage reg_src = LoadArg(data.src_arg, reg_class, IsWide(size));
362   if (IsRef(size)) {
363     StoreRefDisp(reg_obj, data.field_offset, reg_src, data.is_volatile ? kVolatile : kNotVolatile);
364   } else {
365     StoreBaseDisp(reg_obj, data.field_offset, reg_src, size, data.is_volatile ? kVolatile :
366         kNotVolatile);
367   }
368   if (IsRef(size)) {
369     MarkGCCard(0, reg_src, reg_obj);
370   }
371   return true;
372 }
373 
GenSpecialIdentity(MIR * mir,const InlineMethod & special)374 bool Mir2Lir::GenSpecialIdentity(MIR* mir, const InlineMethod& special) {
375   const InlineReturnArgData& data = special.d.return_data;
376   bool wide = (data.is_wide != 0u);
377 
378   // Point of no return - no aborts after this
379   if (!kLeafOptimization) {
380     auto* slow_path = GenSpecialSuspendTest();
381     slow_path->PreserveArg(data.arg);
382   }
383   LockArg(data.arg);
384   GenPrintLabel(mir);
385   RegisterClass reg_class = ShortyToRegClass(cu_->shorty[0]);
386   RegLocation rl_dest = wide ? GetReturnWide(reg_class) : GetReturn(reg_class);
387   LoadArgDirect(data.arg, rl_dest);
388   return true;
389 }
390 
391 /*
392  * Special-case code generation for simple non-throwing leaf methods.
393  */
GenSpecialCase(BasicBlock * bb,MIR * mir,const InlineMethod & special)394 bool Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special) {
395   DCHECK(special.flags & kInlineSpecial);
396   current_dalvik_offset_ = mir->offset;
397   DCHECK(current_mir_ == nullptr);  // Safepoints attributed to prologue.
398   MIR* return_mir = nullptr;
399   bool successful = false;
400   EnsureInitializedArgMappingToPhysicalReg();
401 
402   switch (special.opcode) {
403     case kInlineOpNop:
404       successful = true;
405       DCHECK_EQ(mir->dalvikInsn.opcode, Instruction::RETURN_VOID);
406       if (!kLeafOptimization) {
407         GenSpecialSuspendTest();
408       }
409       return_mir = mir;
410       break;
411     case kInlineOpNonWideConst: {
412       successful = true;
413       if (!kLeafOptimization) {
414         GenSpecialSuspendTest();
415       }
416       RegLocation rl_dest = GetReturn(ShortyToRegClass(cu_->shorty[0]));
417       GenPrintLabel(mir);
418       LoadConstant(rl_dest.reg, static_cast<int>(special.d.data));
419       return_mir = bb->GetNextUnconditionalMir(mir_graph_, mir);
420       break;
421     }
422     case kInlineOpReturnArg:
423       successful = GenSpecialIdentity(mir, special);
424       return_mir = mir;
425       break;
426     case kInlineOpIGet:
427       successful = GenSpecialIGet(mir, special);
428       return_mir = bb->GetNextUnconditionalMir(mir_graph_, mir);
429       break;
430     case kInlineOpIPut:
431       successful = GenSpecialIPut(mir, special);
432       return_mir = bb->GetNextUnconditionalMir(mir_graph_, mir);
433       break;
434     default:
435       break;
436   }
437 
438   if (successful) {
439     if (kIsDebugBuild) {
440       // Clear unreachable catch entries.
441       mir_graph_->catches_.clear();
442     }
443 
444     // Handle verbosity for return MIR.
445     if (return_mir != nullptr) {
446       current_dalvik_offset_ = return_mir->offset;
447       // Not handling special identity case because it already generated code as part
448       // of the return. The label should have been added before any code was generated.
449       if (special.opcode != kInlineOpReturnArg) {
450         GenPrintLabel(return_mir);
451       }
452     }
453     GenSpecialExitSequence();
454 
455     if (!kLeafOptimization) {
456       HandleSlowPaths();
457     } else {
458       core_spill_mask_ = 0;
459       num_core_spills_ = 0;
460       fp_spill_mask_ = 0;
461       num_fp_spills_ = 0;
462       frame_size_ = 0;
463       core_vmap_table_.clear();
464       fp_vmap_table_.clear();
465     }
466   }
467 
468   return successful;
469 }
470 
471 /*
472  * Target-independent code generation.  Use only high-level
473  * load/store utilities here, or target-dependent genXX() handlers
474  * when necessary.
475  */
CompileDalvikInstruction(MIR * mir,BasicBlock * bb,LIR * label_list)476 void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list) {
477   RegLocation rl_src[3];
478   RegLocation rl_dest = mir_graph_->GetBadLoc();
479   RegLocation rl_result = mir_graph_->GetBadLoc();
480   const Instruction::Code opcode = mir->dalvikInsn.opcode;
481   const int opt_flags = mir->optimization_flags;
482   const uint32_t vB = mir->dalvikInsn.vB;
483   const uint32_t vC = mir->dalvikInsn.vC;
484   DCHECK(CheckCorePoolSanity()) << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " @ 0x:"
485                                 << std::hex << current_dalvik_offset_;
486 
487   // Prep Src and Dest locations.
488   int next_sreg = 0;
489   int next_loc = 0;
490   uint64_t attrs = MIRGraph::GetDataFlowAttributes(opcode);
491   rl_src[0] = rl_src[1] = rl_src[2] = mir_graph_->GetBadLoc();
492   if (attrs & DF_UA) {
493     if (attrs & DF_A_WIDE) {
494       rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
495       next_sreg+= 2;
496     } else {
497       rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
498       next_sreg++;
499     }
500   }
501   if (attrs & DF_UB) {
502     if (attrs & DF_B_WIDE) {
503       rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
504       next_sreg+= 2;
505     } else {
506       rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
507       next_sreg++;
508     }
509   }
510   if (attrs & DF_UC) {
511     if (attrs & DF_C_WIDE) {
512       rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
513     } else {
514       rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
515     }
516   }
517   if (attrs & DF_DA) {
518     if (attrs & DF_A_WIDE) {
519       rl_dest = mir_graph_->GetDestWide(mir);
520     } else {
521       rl_dest = mir_graph_->GetDest(mir);
522     }
523   }
524   switch (opcode) {
525     case Instruction::NOP:
526       break;
527 
528     case Instruction::MOVE_EXCEPTION:
529       GenMoveException(rl_dest);
530       break;
531 
532     case Instruction::RETURN_VOID_NO_BARRIER:
533     case Instruction::RETURN_VOID:
534       if (((cu_->access_flags & kAccConstructor) != 0) &&
535           cu_->compiler_driver->RequiresConstructorBarrier(Thread::Current(), cu_->dex_file,
536                                                           cu_->class_def_idx)) {
537         GenMemBarrier(kStoreStore);
538       }
539       if (!kLeafOptimization || !mir_graph_->MethodIsLeaf()) {
540         GenSuspendTest(opt_flags);
541       }
542       break;
543 
544     case Instruction::RETURN_OBJECT:
545       DCHECK(rl_src[0].ref);
546       FALLTHROUGH_INTENDED;
547     case Instruction::RETURN:
548       if (!kLeafOptimization || !mir_graph_->MethodIsLeaf()) {
549         GenSuspendTest(opt_flags);
550       }
551       StoreValue(GetReturn(ShortyToRegClass(cu_->shorty[0])), rl_src[0]);
552       break;
553 
554     case Instruction::RETURN_WIDE:
555       if (!kLeafOptimization || !mir_graph_->MethodIsLeaf()) {
556         GenSuspendTest(opt_flags);
557       }
558       StoreValueWide(GetReturnWide(ShortyToRegClass(cu_->shorty[0])), rl_src[0]);
559       break;
560 
561     case Instruction::MOVE_RESULT:
562     case Instruction::MOVE_RESULT_WIDE:
563     case Instruction::MOVE_RESULT_OBJECT:
564       // Already processed with invoke or filled-new-array.
565       break;
566 
567     case Instruction::MOVE:
568     case Instruction::MOVE_OBJECT:
569     case Instruction::MOVE_16:
570     case Instruction::MOVE_OBJECT_16:
571     case Instruction::MOVE_FROM16:
572     case Instruction::MOVE_OBJECT_FROM16:
573       StoreValue(rl_dest, rl_src[0]);
574       break;
575 
576     case Instruction::MOVE_WIDE:
577     case Instruction::MOVE_WIDE_16:
578     case Instruction::MOVE_WIDE_FROM16:
579       StoreValueWide(rl_dest, rl_src[0]);
580       break;
581 
582     case Instruction::CONST:
583     case Instruction::CONST_4:
584     case Instruction::CONST_16:
585       GenConst(rl_dest, vB);
586       break;
587 
588     case Instruction::CONST_HIGH16:
589       GenConst(rl_dest, vB << 16);
590       break;
591 
592     case Instruction::CONST_WIDE_16:
593     case Instruction::CONST_WIDE_32:
594       GenConstWide(rl_dest, static_cast<int64_t>(static_cast<int32_t>(vB)));
595       break;
596 
597     case Instruction::CONST_WIDE:
598       GenConstWide(rl_dest, mir->dalvikInsn.vB_wide);
599       break;
600 
601     case Instruction::CONST_WIDE_HIGH16:
602       rl_result = EvalLoc(rl_dest, kAnyReg, true);
603       LoadConstantWide(rl_result.reg, static_cast<int64_t>(vB) << 48);
604       StoreValueWide(rl_dest, rl_result);
605       break;
606 
607     case Instruction::MONITOR_ENTER:
608       GenMonitorEnter(opt_flags, rl_src[0]);
609       break;
610 
611     case Instruction::MONITOR_EXIT:
612       GenMonitorExit(opt_flags, rl_src[0]);
613       break;
614 
615     case Instruction::CHECK_CAST: {
616       GenCheckCast(opt_flags, mir->offset, vB, rl_src[0]);
617       break;
618     }
619     case Instruction::INSTANCE_OF:
620       GenInstanceof(vC, rl_dest, rl_src[0]);
621       break;
622 
623     case Instruction::NEW_INSTANCE:
624       GenNewInstance(vB, rl_dest);
625       break;
626 
627     case Instruction::THROW:
628       GenThrow(rl_src[0]);
629       break;
630 
631     case Instruction::ARRAY_LENGTH: {
632       int len_offset;
633       len_offset = mirror::Array::LengthOffset().Int32Value();
634       rl_src[0] = LoadValue(rl_src[0], kRefReg);
635       GenNullCheck(rl_src[0].reg, opt_flags);
636       rl_result = EvalLoc(rl_dest, kCoreReg, true);
637       Load32Disp(rl_src[0].reg, len_offset, rl_result.reg);
638       MarkPossibleNullPointerException(opt_flags);
639       StoreValue(rl_dest, rl_result);
640       break;
641     }
642     case Instruction::CONST_STRING:
643     case Instruction::CONST_STRING_JUMBO:
644       GenConstString(vB, rl_dest);
645       break;
646 
647     case Instruction::CONST_CLASS:
648       GenConstClass(vB, rl_dest);
649       break;
650 
651     case Instruction::FILL_ARRAY_DATA:
652       GenFillArrayData(mir, vB, rl_src[0]);
653       break;
654 
655     case Instruction::FILLED_NEW_ARRAY:
656       GenFilledNewArray(mir_graph_->NewMemCallInfo(bb, mir, kStatic,
657                         false /* not range */));
658       break;
659 
660     case Instruction::FILLED_NEW_ARRAY_RANGE:
661       GenFilledNewArray(mir_graph_->NewMemCallInfo(bb, mir, kStatic,
662                         true /* range */));
663       break;
664 
665     case Instruction::NEW_ARRAY:
666       GenNewArray(vC, rl_dest, rl_src[0]);
667       break;
668 
669     case Instruction::GOTO:
670     case Instruction::GOTO_16:
671     case Instruction::GOTO_32:
672       if (mir_graph_->IsBackEdge(bb, bb->taken)) {
673         GenSuspendTestAndBranch(opt_flags, &label_list[bb->taken]);
674       } else {
675         OpUnconditionalBranch(&label_list[bb->taken]);
676       }
677       break;
678 
679     case Instruction::PACKED_SWITCH:
680       GenPackedSwitch(mir, vB, rl_src[0]);
681       break;
682 
683     case Instruction::SPARSE_SWITCH:
684       GenSparseSwitch(mir, vB, rl_src[0]);
685       break;
686 
687     case Instruction::CMPL_FLOAT:
688     case Instruction::CMPG_FLOAT:
689     case Instruction::CMPL_DOUBLE:
690     case Instruction::CMPG_DOUBLE:
691       GenCmpFP(opcode, rl_dest, rl_src[0], rl_src[1]);
692       break;
693 
694     case Instruction::CMP_LONG:
695       GenCmpLong(rl_dest, rl_src[0], rl_src[1]);
696       break;
697 
698     case Instruction::IF_EQ:
699     case Instruction::IF_NE:
700     case Instruction::IF_LT:
701     case Instruction::IF_GE:
702     case Instruction::IF_GT:
703     case Instruction::IF_LE: {
704       if (mir_graph_->IsBackEdge(bb, bb->taken) || mir_graph_->IsBackEdge(bb, bb->fall_through)) {
705         GenSuspendTest(opt_flags);
706       }
707       LIR* taken = &label_list[bb->taken];
708       GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken);
709       break;
710     }
711     case Instruction::IF_EQZ:
712     case Instruction::IF_NEZ:
713     case Instruction::IF_LTZ:
714     case Instruction::IF_GEZ:
715     case Instruction::IF_GTZ:
716     case Instruction::IF_LEZ: {
717       if (mir_graph_->IsBackEdge(bb, bb->taken) || mir_graph_->IsBackEdge(bb, bb->fall_through)) {
718         GenSuspendTest(opt_flags);
719       }
720       LIR* taken = &label_list[bb->taken];
721       GenCompareZeroAndBranch(opcode, rl_src[0], taken);
722       break;
723     }
724 
725     case Instruction::AGET_WIDE:
726       GenArrayGet(opt_flags, rl_dest.fp ? kDouble : k64, rl_src[0], rl_src[1], rl_dest, 3);
727       break;
728     case Instruction::AGET_OBJECT:
729       GenArrayGet(opt_flags, kReference, rl_src[0], rl_src[1], rl_dest, 2);
730       break;
731     case Instruction::AGET:
732       GenArrayGet(opt_flags, rl_dest.fp ? kSingle : k32, rl_src[0], rl_src[1], rl_dest, 2);
733       break;
734     case Instruction::AGET_BOOLEAN:
735       GenArrayGet(opt_flags, kUnsignedByte, rl_src[0], rl_src[1], rl_dest, 0);
736       break;
737     case Instruction::AGET_BYTE:
738       GenArrayGet(opt_flags, kSignedByte, rl_src[0], rl_src[1], rl_dest, 0);
739       break;
740     case Instruction::AGET_CHAR:
741       GenArrayGet(opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
742       break;
743     case Instruction::AGET_SHORT:
744       GenArrayGet(opt_flags, kSignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
745       break;
746     case Instruction::APUT_WIDE:
747       GenArrayPut(opt_flags, rl_src[0].fp ? kDouble : k64, rl_src[1], rl_src[2], rl_src[0], 3, false);
748       break;
749     case Instruction::APUT:
750       GenArrayPut(opt_flags, rl_src[0].fp ? kSingle : k32, rl_src[1], rl_src[2], rl_src[0], 2, false);
751       break;
752     case Instruction::APUT_OBJECT: {
753       bool is_null = mir_graph_->IsConstantNullRef(rl_src[0]);
754       bool is_safe = is_null;  // Always safe to store null.
755       if (!is_safe) {
756         // Check safety from verifier type information.
757         const DexCompilationUnit* unit = mir_graph_->GetCurrentDexCompilationUnit();
758         is_safe = cu_->compiler_driver->IsSafeCast(unit, mir->offset);
759       }
760       if (is_null || is_safe) {
761         // Store of constant null doesn't require an assignability test and can be generated inline
762         // without fixed register usage or a card mark.
763         GenArrayPut(opt_flags, kReference, rl_src[1], rl_src[2], rl_src[0], 2, !is_null);
764       } else {
765         GenArrayObjPut(opt_flags, rl_src[1], rl_src[2], rl_src[0]);
766       }
767       break;
768     }
769     case Instruction::APUT_SHORT:
770     case Instruction::APUT_CHAR:
771       GenArrayPut(opt_flags, kUnsignedHalf, rl_src[1], rl_src[2], rl_src[0], 1, false);
772       break;
773     case Instruction::APUT_BYTE:
774     case Instruction::APUT_BOOLEAN:
775       GenArrayPut(opt_flags, kUnsignedByte, rl_src[1], rl_src[2], rl_src[0], 0, false);
776       break;
777 
778     case Instruction::IGET_OBJECT_QUICK:
779     case Instruction::IGET_OBJECT:
780       GenIGet(mir, opt_flags, kReference, Primitive::kPrimNot, rl_dest, rl_src[0]);
781       break;
782 
783     case Instruction::IGET_WIDE_QUICK:
784     case Instruction::IGET_WIDE:
785       // kPrimLong and kPrimDouble share the same entrypoints.
786       if (rl_dest.fp) {
787         GenIGet(mir, opt_flags, kDouble, Primitive::kPrimDouble, rl_dest, rl_src[0]);
788       } else {
789         GenIGet(mir, opt_flags, k64, Primitive::kPrimLong, rl_dest, rl_src[0]);
790       }
791       break;
792 
793     case Instruction::IGET_QUICK:
794     case Instruction::IGET:
795       if (rl_dest.fp) {
796         GenIGet(mir, opt_flags, kSingle, Primitive::kPrimFloat, rl_dest, rl_src[0]);
797       } else {
798         GenIGet(mir, opt_flags, k32, Primitive::kPrimInt, rl_dest, rl_src[0]);
799       }
800       break;
801 
802     case Instruction::IGET_CHAR_QUICK:
803     case Instruction::IGET_CHAR:
804       GenIGet(mir, opt_flags, kUnsignedHalf, Primitive::kPrimChar, rl_dest, rl_src[0]);
805       break;
806 
807     case Instruction::IGET_SHORT_QUICK:
808     case Instruction::IGET_SHORT:
809       GenIGet(mir, opt_flags, kSignedHalf, Primitive::kPrimShort, rl_dest, rl_src[0]);
810       break;
811 
812     case Instruction::IGET_BOOLEAN_QUICK:
813     case Instruction::IGET_BOOLEAN:
814       GenIGet(mir, opt_flags, kUnsignedByte, Primitive::kPrimBoolean, rl_dest, rl_src[0]);
815       break;
816 
817     case Instruction::IGET_BYTE_QUICK:
818     case Instruction::IGET_BYTE:
819       GenIGet(mir, opt_flags, kSignedByte, Primitive::kPrimByte, rl_dest, rl_src[0]);
820       break;
821 
822     case Instruction::IPUT_WIDE_QUICK:
823     case Instruction::IPUT_WIDE:
824       GenIPut(mir, opt_flags, rl_src[0].fp ? kDouble : k64, rl_src[0], rl_src[1]);
825       break;
826 
827     case Instruction::IPUT_OBJECT_QUICK:
828     case Instruction::IPUT_OBJECT:
829       GenIPut(mir, opt_flags, kReference, rl_src[0], rl_src[1]);
830       break;
831 
832     case Instruction::IPUT_QUICK:
833     case Instruction::IPUT:
834       GenIPut(mir, opt_flags, rl_src[0].fp ? kSingle : k32, rl_src[0], rl_src[1]);
835       break;
836 
837     case Instruction::IPUT_BYTE_QUICK:
838     case Instruction::IPUT_BOOLEAN_QUICK:
839     case Instruction::IPUT_BYTE:
840     case Instruction::IPUT_BOOLEAN:
841       GenIPut(mir, opt_flags, kUnsignedByte, rl_src[0], rl_src[1]);
842       break;
843 
844     case Instruction::IPUT_CHAR_QUICK:
845     case Instruction::IPUT_CHAR:
846       GenIPut(mir, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1]);
847       break;
848 
849     case Instruction::IPUT_SHORT_QUICK:
850     case Instruction::IPUT_SHORT:
851       GenIPut(mir, opt_flags, kSignedHalf, rl_src[0], rl_src[1]);
852       break;
853 
854     case Instruction::SGET_OBJECT:
855       GenSget(mir, rl_dest, kReference, Primitive::kPrimNot);
856       break;
857 
858     case Instruction::SGET:
859       GenSget(mir, rl_dest, rl_dest.fp ? kSingle : k32, Primitive::kPrimInt);
860       break;
861 
862     case Instruction::SGET_CHAR:
863       GenSget(mir, rl_dest, kUnsignedHalf, Primitive::kPrimChar);
864       break;
865 
866     case Instruction::SGET_SHORT:
867       GenSget(mir, rl_dest, kSignedHalf, Primitive::kPrimShort);
868       break;
869 
870     case Instruction::SGET_BOOLEAN:
871       GenSget(mir, rl_dest, kUnsignedByte, Primitive::kPrimBoolean);
872       break;
873 
874     case Instruction::SGET_BYTE:
875       GenSget(mir, rl_dest, kSignedByte, Primitive::kPrimByte);
876       break;
877 
878     case Instruction::SGET_WIDE:
879       // kPrimLong and kPrimDouble share the same entrypoints.
880       GenSget(mir, rl_dest, rl_dest.fp ? kDouble : k64, Primitive::kPrimDouble);
881       break;
882 
883     case Instruction::SPUT_OBJECT:
884       GenSput(mir, rl_src[0], kReference);
885       break;
886 
887     case Instruction::SPUT:
888       GenSput(mir, rl_src[0], rl_src[0].fp ? kSingle : k32);
889       break;
890 
891     case Instruction::SPUT_BYTE:
892     case Instruction::SPUT_BOOLEAN:
893       GenSput(mir, rl_src[0], kUnsignedByte);
894       break;
895 
896     case Instruction::SPUT_CHAR:
897       GenSput(mir, rl_src[0], kUnsignedHalf);
898       break;
899 
900     case Instruction::SPUT_SHORT:
901       GenSput(mir, rl_src[0], kSignedHalf);
902       break;
903 
904 
905     case Instruction::SPUT_WIDE:
906       GenSput(mir, rl_src[0], rl_src[0].fp ? kDouble : k64);
907       break;
908 
909     case Instruction::INVOKE_STATIC_RANGE:
910       GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kStatic, true));
911       break;
912     case Instruction::INVOKE_STATIC:
913       GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kStatic, false));
914       break;
915 
916     case Instruction::INVOKE_DIRECT:
917       GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kDirect, false));
918       break;
919     case Instruction::INVOKE_DIRECT_RANGE:
920       GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kDirect, true));
921       break;
922 
923     case Instruction::INVOKE_VIRTUAL_QUICK:
924     case Instruction::INVOKE_VIRTUAL:
925       GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, false));
926       break;
927 
928     case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
929     case Instruction::INVOKE_VIRTUAL_RANGE:
930       GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, true));
931       break;
932 
933     case Instruction::INVOKE_SUPER:
934       GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kSuper, false));
935       break;
936     case Instruction::INVOKE_SUPER_RANGE:
937       GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kSuper, true));
938       break;
939 
940     case Instruction::INVOKE_INTERFACE:
941       GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kInterface, false));
942       break;
943     case Instruction::INVOKE_INTERFACE_RANGE:
944       GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kInterface, true));
945       break;
946 
947     case Instruction::NEG_INT:
948     case Instruction::NOT_INT:
949       GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[0], opt_flags);
950       break;
951 
952     case Instruction::NEG_LONG:
953     case Instruction::NOT_LONG:
954       GenArithOpLong(opcode, rl_dest, rl_src[0], rl_src[0], opt_flags);
955       break;
956 
957     case Instruction::NEG_FLOAT:
958       GenArithOpFloat(opcode, rl_dest, rl_src[0], rl_src[0]);
959       break;
960 
961     case Instruction::NEG_DOUBLE:
962       GenArithOpDouble(opcode, rl_dest, rl_src[0], rl_src[0]);
963       break;
964 
965     case Instruction::INT_TO_LONG:
966       GenIntToLong(rl_dest, rl_src[0]);
967       break;
968 
969     case Instruction::LONG_TO_INT:
970       GenLongToInt(rl_dest, rl_src[0]);
971       break;
972 
973     case Instruction::INT_TO_BYTE:
974     case Instruction::INT_TO_SHORT:
975     case Instruction::INT_TO_CHAR:
976       GenIntNarrowing(opcode, rl_dest, rl_src[0]);
977       break;
978 
979     case Instruction::INT_TO_FLOAT:
980     case Instruction::INT_TO_DOUBLE:
981     case Instruction::LONG_TO_FLOAT:
982     case Instruction::LONG_TO_DOUBLE:
983     case Instruction::FLOAT_TO_INT:
984     case Instruction::FLOAT_TO_LONG:
985     case Instruction::FLOAT_TO_DOUBLE:
986     case Instruction::DOUBLE_TO_INT:
987     case Instruction::DOUBLE_TO_LONG:
988     case Instruction::DOUBLE_TO_FLOAT:
989       GenConversion(opcode, rl_dest, rl_src[0]);
990       break;
991 
992 
993     case Instruction::ADD_INT:
994     case Instruction::ADD_INT_2ADDR:
995     case Instruction::MUL_INT:
996     case Instruction::MUL_INT_2ADDR:
997     case Instruction::AND_INT:
998     case Instruction::AND_INT_2ADDR:
999     case Instruction::OR_INT:
1000     case Instruction::OR_INT_2ADDR:
1001     case Instruction::XOR_INT:
1002     case Instruction::XOR_INT_2ADDR:
1003       if (rl_src[0].is_const &&
1004           InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[0]), opcode)) {
1005         GenArithOpIntLit(opcode, rl_dest, rl_src[1],
1006                              mir_graph_->ConstantValue(rl_src[0].orig_sreg));
1007       } else if (rl_src[1].is_const &&
1008                  InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]), opcode)) {
1009         GenArithOpIntLit(opcode, rl_dest, rl_src[0],
1010                              mir_graph_->ConstantValue(rl_src[1].orig_sreg));
1011       } else {
1012         GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1], opt_flags);
1013       }
1014       break;
1015 
1016     case Instruction::SUB_INT:
1017     case Instruction::SUB_INT_2ADDR:
1018     case Instruction::DIV_INT:
1019     case Instruction::DIV_INT_2ADDR:
1020     case Instruction::REM_INT:
1021     case Instruction::REM_INT_2ADDR:
1022     case Instruction::SHL_INT:
1023     case Instruction::SHL_INT_2ADDR:
1024     case Instruction::SHR_INT:
1025     case Instruction::SHR_INT_2ADDR:
1026     case Instruction::USHR_INT:
1027     case Instruction::USHR_INT_2ADDR:
1028       if (rl_src[1].is_const &&
1029           InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]), opcode)) {
1030         GenArithOpIntLit(opcode, rl_dest, rl_src[0], mir_graph_->ConstantValue(rl_src[1]));
1031       } else {
1032         GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1], opt_flags);
1033       }
1034       break;
1035 
1036     case Instruction::ADD_LONG:
1037     case Instruction::SUB_LONG:
1038     case Instruction::AND_LONG:
1039     case Instruction::OR_LONG:
1040     case Instruction::XOR_LONG:
1041     case Instruction::ADD_LONG_2ADDR:
1042     case Instruction::SUB_LONG_2ADDR:
1043     case Instruction::AND_LONG_2ADDR:
1044     case Instruction::OR_LONG_2ADDR:
1045     case Instruction::XOR_LONG_2ADDR:
1046       if (rl_src[0].is_const || rl_src[1].is_const) {
1047         GenArithImmOpLong(opcode, rl_dest, rl_src[0], rl_src[1], opt_flags);
1048         break;
1049       }
1050       FALLTHROUGH_INTENDED;
1051     case Instruction::MUL_LONG:
1052     case Instruction::DIV_LONG:
1053     case Instruction::REM_LONG:
1054     case Instruction::MUL_LONG_2ADDR:
1055     case Instruction::DIV_LONG_2ADDR:
1056     case Instruction::REM_LONG_2ADDR:
1057       GenArithOpLong(opcode, rl_dest, rl_src[0], rl_src[1], opt_flags);
1058       break;
1059 
1060     case Instruction::SHL_LONG:
1061     case Instruction::SHR_LONG:
1062     case Instruction::USHR_LONG:
1063     case Instruction::SHL_LONG_2ADDR:
1064     case Instruction::SHR_LONG_2ADDR:
1065     case Instruction::USHR_LONG_2ADDR:
1066       if (rl_src[1].is_const) {
1067         GenShiftImmOpLong(opcode, rl_dest, rl_src[0], rl_src[1], opt_flags);
1068       } else {
1069         GenShiftOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
1070       }
1071       break;
1072 
1073     case Instruction::DIV_FLOAT:
1074     case Instruction::DIV_FLOAT_2ADDR:
1075       if (HandleEasyFloatingPointDiv(rl_dest, rl_src[0], rl_src[1])) {
1076         break;
1077       }
1078       FALLTHROUGH_INTENDED;
1079     case Instruction::ADD_FLOAT:
1080     case Instruction::SUB_FLOAT:
1081     case Instruction::MUL_FLOAT:
1082     case Instruction::REM_FLOAT:
1083     case Instruction::ADD_FLOAT_2ADDR:
1084     case Instruction::SUB_FLOAT_2ADDR:
1085     case Instruction::MUL_FLOAT_2ADDR:
1086     case Instruction::REM_FLOAT_2ADDR:
1087       GenArithOpFloat(opcode, rl_dest, rl_src[0], rl_src[1]);
1088       break;
1089 
1090     case Instruction::DIV_DOUBLE:
1091     case Instruction::DIV_DOUBLE_2ADDR:
1092       if (HandleEasyFloatingPointDiv(rl_dest, rl_src[0], rl_src[1])) {
1093         break;
1094       }
1095       FALLTHROUGH_INTENDED;
1096     case Instruction::ADD_DOUBLE:
1097     case Instruction::SUB_DOUBLE:
1098     case Instruction::MUL_DOUBLE:
1099     case Instruction::REM_DOUBLE:
1100     case Instruction::ADD_DOUBLE_2ADDR:
1101     case Instruction::SUB_DOUBLE_2ADDR:
1102     case Instruction::MUL_DOUBLE_2ADDR:
1103     case Instruction::REM_DOUBLE_2ADDR:
1104       GenArithOpDouble(opcode, rl_dest, rl_src[0], rl_src[1]);
1105       break;
1106 
1107     case Instruction::RSUB_INT:
1108     case Instruction::ADD_INT_LIT16:
1109     case Instruction::MUL_INT_LIT16:
1110     case Instruction::DIV_INT_LIT16:
1111     case Instruction::REM_INT_LIT16:
1112     case Instruction::AND_INT_LIT16:
1113     case Instruction::OR_INT_LIT16:
1114     case Instruction::XOR_INT_LIT16:
1115     case Instruction::ADD_INT_LIT8:
1116     case Instruction::RSUB_INT_LIT8:
1117     case Instruction::MUL_INT_LIT8:
1118     case Instruction::DIV_INT_LIT8:
1119     case Instruction::REM_INT_LIT8:
1120     case Instruction::AND_INT_LIT8:
1121     case Instruction::OR_INT_LIT8:
1122     case Instruction::XOR_INT_LIT8:
1123     case Instruction::SHL_INT_LIT8:
1124     case Instruction::SHR_INT_LIT8:
1125     case Instruction::USHR_INT_LIT8:
1126       GenArithOpIntLit(opcode, rl_dest, rl_src[0], vC);
1127       break;
1128 
1129     default:
1130       LOG(FATAL) << "Unexpected opcode: " << opcode;
1131   }
1132   DCHECK(CheckCorePoolSanity());
1133 }  // NOLINT(readability/fn_size)
1134 
1135 // Process extended MIR instructions
HandleExtendedMethodMIR(BasicBlock * bb,MIR * mir)1136 void Mir2Lir::HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
1137   switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
1138     case kMirOpCopy: {
1139       RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
1140       RegLocation rl_dest = mir_graph_->GetDest(mir);
1141       StoreValue(rl_dest, rl_src);
1142       break;
1143     }
1144     case kMirOpFusedCmplFloat:
1145       if (mir_graph_->IsBackEdge(bb, bb->taken) || mir_graph_->IsBackEdge(bb, bb->fall_through)) {
1146         GenSuspendTest(mir->optimization_flags);
1147       }
1148       GenFusedFPCmpBranch(bb, mir, false /*gt bias*/, false /*double*/);
1149       break;
1150     case kMirOpFusedCmpgFloat:
1151       if (mir_graph_->IsBackEdge(bb, bb->taken) || mir_graph_->IsBackEdge(bb, bb->fall_through)) {
1152         GenSuspendTest(mir->optimization_flags);
1153       }
1154       GenFusedFPCmpBranch(bb, mir, true /*gt bias*/, false /*double*/);
1155       break;
1156     case kMirOpFusedCmplDouble:
1157       if (mir_graph_->IsBackEdge(bb, bb->taken) || mir_graph_->IsBackEdge(bb, bb->fall_through)) {
1158         GenSuspendTest(mir->optimization_flags);
1159       }
1160       GenFusedFPCmpBranch(bb, mir, false /*gt bias*/, true /*double*/);
1161       break;
1162     case kMirOpFusedCmpgDouble:
1163       if (mir_graph_->IsBackEdge(bb, bb->taken) || mir_graph_->IsBackEdge(bb, bb->fall_through)) {
1164         GenSuspendTest(mir->optimization_flags);
1165       }
1166       GenFusedFPCmpBranch(bb, mir, true /*gt bias*/, true /*double*/);
1167       break;
1168     case kMirOpFusedCmpLong:
1169       if (mir_graph_->IsBackEdge(bb, bb->taken) || mir_graph_->IsBackEdge(bb, bb->fall_through)) {
1170         GenSuspendTest(mir->optimization_flags);
1171       }
1172       GenFusedLongCmpBranch(bb, mir);
1173       break;
1174     case kMirOpSelect:
1175       GenSelect(bb, mir);
1176       break;
1177     case kMirOpNullCheck: {
1178       RegLocation rl_obj = mir_graph_->GetSrc(mir, 0);
1179       rl_obj = LoadValue(rl_obj, kRefReg);
1180       // An explicit check is done because it is not expected that when this is used,
1181       // that it will actually trip up the implicit checks (since an invalid access
1182       // is needed on the null object).
1183       GenExplicitNullCheck(rl_obj.reg, mir->optimization_flags);
1184       break;
1185     }
1186     case kMirOpPhi:
1187     case kMirOpNop:
1188     case kMirOpRangeCheck:
1189     case kMirOpDivZeroCheck:
1190     case kMirOpCheck:
1191       // Ignore these known opcodes
1192       break;
1193     default:
1194       // Give the backends a chance to handle unknown extended MIR opcodes.
1195       GenMachineSpecificExtendedMethodMIR(bb, mir);
1196       break;
1197   }
1198 }
1199 
GenPrintLabel(MIR * mir)1200 void Mir2Lir::GenPrintLabel(MIR* mir) {
1201   // Mark the beginning of a Dalvik instruction for line tracking.
1202   if (cu_->verbose) {
1203      char* inst_str = mir_graph_->GetDalvikDisassembly(mir);
1204      MarkBoundary(mir->offset, inst_str);
1205   }
1206 }
1207 
1208 // Handle the content in each basic block.
MethodBlockCodeGen(BasicBlock * bb)1209 bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
1210   if (bb->block_type == kDead) return false;
1211   current_dalvik_offset_ = bb->start_offset;
1212   MIR* mir;
1213   int block_id = bb->id;
1214 
1215   block_label_list_[block_id].operands[0] = bb->start_offset;
1216 
1217   // Insert the block label.
1218   block_label_list_[block_id].opcode = kPseudoNormalBlockLabel;
1219   block_label_list_[block_id].flags.fixup = kFixupLabel;
1220   AppendLIR(&block_label_list_[block_id]);
1221 
1222   LIR* head_lir = nullptr;
1223 
1224   // If this is a catch block, export the start address.
1225   if (bb->catch_entry) {
1226     head_lir = NewLIR0(kPseudoExportedPC);
1227   }
1228 
1229   // Free temp registers and reset redundant store tracking.
1230   ClobberAllTemps();
1231 
1232   if (bb->block_type == kEntryBlock) {
1233     ResetRegPool();
1234     int start_vreg = mir_graph_->GetFirstInVR();
1235     AppendLIR(NewLIR0(kPseudoPrologueBegin));
1236     DCHECK_EQ(cu_->target64, Is64BitInstructionSet(cu_->instruction_set));
1237     if (cu_->target64) {
1238       DCHECK(mir_graph_->GetMethodLoc().wide);
1239     }
1240     GenEntrySequence(&mir_graph_->reg_location_[start_vreg], mir_graph_->GetMethodLoc());
1241     AppendLIR(NewLIR0(kPseudoPrologueEnd));
1242     DCHECK_EQ(cfi_.GetCurrentCFAOffset(), frame_size_);
1243   } else if (bb->block_type == kExitBlock) {
1244     ResetRegPool();
1245     DCHECK_EQ(cfi_.GetCurrentCFAOffset(), frame_size_);
1246     AppendLIR(NewLIR0(kPseudoEpilogueBegin));
1247     GenExitSequence();
1248     AppendLIR(NewLIR0(kPseudoEpilogueEnd));
1249     DCHECK_EQ(cfi_.GetCurrentCFAOffset(), frame_size_);
1250   }
1251 
1252   for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1253     ResetRegPool();
1254     if (cu_->disable_opt & (1 << kTrackLiveTemps)) {
1255       ClobberAllTemps();
1256       // Reset temp allocation to minimize differences when A/B testing.
1257       reg_pool_->ResetNextTemp();
1258     }
1259 
1260     if (cu_->disable_opt & (1 << kSuppressLoads)) {
1261       ResetDefTracking();
1262     }
1263 
1264     // Reset temp tracking sanity check.
1265     if (kIsDebugBuild) {
1266       live_sreg_ = INVALID_SREG;
1267     }
1268 
1269     current_dalvik_offset_ = mir->offset;
1270     current_mir_ = mir;
1271     int opcode = mir->dalvikInsn.opcode;
1272 
1273     GenPrintLabel(mir);
1274 
1275     // Remember the first LIR for this block.
1276     if (head_lir == nullptr) {
1277       head_lir = &block_label_list_[bb->id];
1278       // Set the first label as a scheduling barrier.
1279       DCHECK(!head_lir->flags.use_def_invalid);
1280       head_lir->u.m.def_mask = &kEncodeAll;
1281     }
1282 
1283     if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
1284       HandleExtendedMethodMIR(bb, mir);
1285       continue;
1286     }
1287 
1288     CompileDalvikInstruction(mir, bb, block_label_list_);
1289   }
1290 
1291   if (head_lir) {
1292     // Eliminate redundant loads/stores and delay stores into later slots.
1293     ApplyLocalOptimizations(head_lir, last_lir_insn_);
1294   }
1295   return false;
1296 }
1297 
SpecialMIR2LIR(const InlineMethod & special)1298 bool Mir2Lir::SpecialMIR2LIR(const InlineMethod& special) {
1299   cu_->NewTimingSplit("SpecialMIR2LIR");
1300   // Find the first DalvikByteCode block.
1301   DCHECK_EQ(mir_graph_->GetNumReachableBlocks(), mir_graph_->GetDfsOrder().size());
1302   BasicBlock*bb = nullptr;
1303   for (BasicBlockId dfs_id : mir_graph_->GetDfsOrder()) {
1304     BasicBlock* candidate = mir_graph_->GetBasicBlock(dfs_id);
1305     if (candidate->block_type == kDalvikByteCode) {
1306       bb = candidate;
1307       break;
1308     }
1309   }
1310   if (bb == nullptr) {
1311     return false;
1312   }
1313   DCHECK_EQ(bb->start_offset, 0);
1314   DCHECK(bb->first_mir_insn != nullptr);
1315 
1316   // Get the first instruction.
1317   MIR* mir = bb->first_mir_insn;
1318 
1319   // Free temp registers and reset redundant store tracking.
1320   ResetRegPool();
1321   ResetDefTracking();
1322   ClobberAllTemps();
1323 
1324   return GenSpecialCase(bb, mir, special);
1325 }
1326 
MethodMIR2LIR()1327 void Mir2Lir::MethodMIR2LIR() {
1328   cu_->NewTimingSplit("MIR2LIR");
1329 
1330   // Hold the labels of each block.
1331   block_label_list_ = arena_->AllocArray<LIR>(mir_graph_->GetNumBlocks(), kArenaAllocLIR);
1332 
1333   PreOrderDfsIterator iter(mir_graph_);
1334   BasicBlock* curr_bb = iter.Next();
1335   BasicBlock* next_bb = iter.Next();
1336   while (curr_bb != nullptr) {
1337     MethodBlockCodeGen(curr_bb);
1338     // If the fall_through block is no longer laid out consecutively, drop in a branch.
1339     BasicBlock* curr_bb_fall_through = mir_graph_->GetBasicBlock(curr_bb->fall_through);
1340     if ((curr_bb_fall_through != nullptr) && (curr_bb_fall_through != next_bb)) {
1341       OpUnconditionalBranch(&block_label_list_[curr_bb->fall_through]);
1342     }
1343     curr_bb = next_bb;
1344     do {
1345       next_bb = iter.Next();
1346     } while ((next_bb != nullptr) && (next_bb->block_type == kDead));
1347   }
1348   HandleSlowPaths();
1349 }
1350 
1351 //
1352 // LIR Slow Path
1353 //
1354 
GenerateTargetLabel(int opcode)1355 LIR* Mir2Lir::LIRSlowPath::GenerateTargetLabel(int opcode) {
1356   m2l_->SetCurrentDexPc(current_dex_pc_);
1357   m2l_->current_mir_ = current_mir_;
1358   LIR* target = m2l_->NewLIR0(opcode);
1359   fromfast_->target = target;
1360   return target;
1361 }
1362 
1363 
CheckRegStorageImpl(RegStorage rs,WidenessCheck wide,RefCheck ref,FPCheck fp,bool fail,bool report) const1364 void Mir2Lir::CheckRegStorageImpl(RegStorage rs, WidenessCheck wide, RefCheck ref, FPCheck fp,
1365                                   bool fail, bool report)
1366     const  {
1367   if (rs.Valid()) {
1368     if (ref == RefCheck::kCheckRef) {
1369       if (cu_->target64 && !rs.Is64Bit()) {
1370         if (fail) {
1371           CHECK(false) << "Reg storage not 64b for ref.";
1372         } else if (report) {
1373           LOG(WARNING) << "Reg storage not 64b for ref.";
1374         }
1375       }
1376     }
1377     if (wide == WidenessCheck::kCheckWide) {
1378       if (!rs.Is64Bit()) {
1379         if (fail) {
1380           CHECK(false) << "Reg storage not 64b for wide.";
1381         } else if (report) {
1382           LOG(WARNING) << "Reg storage not 64b for wide.";
1383         }
1384       }
1385     }
1386     // A tighter check would be nice, but for now soft-float will not check float at all.
1387     if (fp == FPCheck::kCheckFP && cu_->instruction_set != kArm) {
1388       if (!rs.IsFloat()) {
1389         if (fail) {
1390           CHECK(false) << "Reg storage not float for fp.";
1391         } else if (report) {
1392           LOG(WARNING) << "Reg storage not float for fp.";
1393         }
1394       }
1395     } else if (fp == FPCheck::kCheckNotFP) {
1396       if (rs.IsFloat()) {
1397         if (fail) {
1398           CHECK(false) << "Reg storage float for not-fp.";
1399         } else if (report) {
1400           LOG(WARNING) << "Reg storage float for not-fp.";
1401         }
1402       }
1403     }
1404   }
1405 }
1406 
CheckRegLocationImpl(RegLocation rl,bool fail,bool report) const1407 void Mir2Lir::CheckRegLocationImpl(RegLocation rl, bool fail, bool report) const {
1408   // Regrettably can't use the fp part of rl, as that is not really indicative of where a value
1409   // will be stored.
1410   CheckRegStorageImpl(rl.reg, rl.wide ? WidenessCheck::kCheckWide : WidenessCheck::kCheckNotWide,
1411       rl.ref ? RefCheck::kCheckRef : RefCheck::kCheckNotRef, FPCheck::kIgnoreFP, fail, report);
1412 }
1413 
GetInstructionOffset(LIR * lir)1414 size_t Mir2Lir::GetInstructionOffset(LIR* lir) {
1415   UNUSED(lir);
1416   UNIMPLEMENTED(FATAL) << "Unsupported GetInstructionOffset()";
1417   UNREACHABLE();
1418 }
1419 
Initialize(ShortyIterator * shorty,InToRegStorageMapper * mapper)1420 void Mir2Lir::InToRegStorageMapping::Initialize(ShortyIterator* shorty,
1421                                                 InToRegStorageMapper* mapper) {
1422   DCHECK(mapper != nullptr);
1423   DCHECK(shorty != nullptr);
1424   DCHECK(!IsInitialized());
1425   DCHECK_EQ(end_mapped_in_, 0u);
1426   DCHECK(!has_arguments_on_stack_);
1427   while (shorty->Next()) {
1428      ShortyArg arg = shorty->GetArg();
1429      RegStorage reg = mapper->GetNextReg(arg);
1430      mapping_.emplace_back(arg, reg);
1431      if (arg.IsWide()) {
1432        mapping_.emplace_back(ShortyArg(kInvalidShorty), RegStorage::InvalidReg());
1433      }
1434      if (reg.Valid()) {
1435        end_mapped_in_ = mapping_.size();
1436        // If the VR is wide but wasn't mapped as wide then account for it.
1437        if (arg.IsWide() && !reg.Is64Bit()) {
1438          --end_mapped_in_;
1439        }
1440      } else {
1441        has_arguments_on_stack_ = true;
1442      }
1443   }
1444   initialized_ = true;
1445 }
1446 
GetReg(size_t in_position)1447 RegStorage Mir2Lir::InToRegStorageMapping::GetReg(size_t in_position) {
1448   DCHECK(IsInitialized());
1449   DCHECK_LT(in_position, mapping_.size());
1450   DCHECK_NE(mapping_[in_position].first.GetType(), kInvalidShorty);
1451   return mapping_[in_position].second;
1452 }
1453 
GetShorty(size_t in_position)1454 Mir2Lir::ShortyArg Mir2Lir::InToRegStorageMapping::GetShorty(size_t in_position) {
1455   DCHECK(IsInitialized());
1456   DCHECK_LT(static_cast<size_t>(in_position), mapping_.size());
1457   DCHECK_NE(mapping_[in_position].first.GetType(), kInvalidShorty);
1458   return mapping_[in_position].first;
1459 }
1460 
1461 }  // namespace art
1462