• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "mir_to_lir-inl.h"
18 
19 #include <functional>
20 
21 #include "arch/arm/instruction_set_features_arm.h"
22 #include "base/bit_utils.h"
23 #include "base/macros.h"
24 #include "dex/compiler_ir.h"
25 #include "dex/mir_graph.h"
26 #include "dex/quick/arm/arm_lir.h"
27 #include "driver/compiler_driver.h"
28 #include "driver/compiler_options.h"
29 #include "entrypoints/quick/quick_entrypoints.h"
30 #include "mirror/array.h"
31 #include "mirror/object_array-inl.h"
32 #include "mirror/object-inl.h"
33 #include "mirror/object_reference.h"
34 #include "utils/dex_cache_arrays_layout-inl.h"
35 #include "verifier/method_verifier.h"
36 
37 namespace art {
38 
39 // Shortcuts to repeatedly used long types.
40 typedef mirror::ObjectArray<mirror::Object> ObjArray;
41 typedef mirror::ObjectArray<mirror::Class> ClassArray;
42 
43 /*
44  * This source files contains "gen" codegen routines that should
45  * be applicable to most targets.  Only mid-level support utilities
46  * and "op" calls may be used here.
47  */
48 
ForceSlowFieldPath(CompilationUnit * cu)49 ALWAYS_INLINE static inline bool ForceSlowFieldPath(CompilationUnit* cu) {
50   return (cu->enable_debug & (1 << kDebugSlowFieldPath)) != 0;
51 }
52 
ForceSlowStringPath(CompilationUnit * cu)53 ALWAYS_INLINE static inline bool ForceSlowStringPath(CompilationUnit* cu) {
54   return (cu->enable_debug & (1 << kDebugSlowStringPath)) != 0;
55 }
56 
ForceSlowTypePath(CompilationUnit * cu)57 ALWAYS_INLINE static inline bool ForceSlowTypePath(CompilationUnit* cu) {
58   return (cu->enable_debug & (1 << kDebugSlowTypePath)) != 0;
59 }
60 
GenIfNullUseHelperImm(RegStorage r_result,QuickEntrypointEnum trampoline,int imm)61 void Mir2Lir::GenIfNullUseHelperImm(RegStorage r_result, QuickEntrypointEnum trampoline, int imm) {
62   class CallHelperImmMethodSlowPath : public LIRSlowPath {
63    public:
64     CallHelperImmMethodSlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont,
65                                 QuickEntrypointEnum trampoline_in, int imm_in,
66                                 RegStorage r_result_in)
67         : LIRSlowPath(m2l, fromfast, cont), trampoline_(trampoline_in),
68           imm_(imm_in), r_result_(r_result_in) {
69     }
70 
71     void Compile() {
72       GenerateTargetLabel();
73       m2l_->CallRuntimeHelperImm(trampoline_, imm_, true);
74       m2l_->OpRegCopy(r_result_,  m2l_->TargetReg(kRet0, kRef));
75       m2l_->OpUnconditionalBranch(cont_);
76     }
77 
78    private:
79     QuickEntrypointEnum trampoline_;
80     const int imm_;
81     const RegStorage r_result_;
82   };
83 
84   LIR* branch = OpCmpImmBranch(kCondEq, r_result, 0, nullptr);
85   LIR* cont = NewLIR0(kPseudoTargetLabel);
86 
87   AddSlowPath(new (arena_) CallHelperImmMethodSlowPath(this, branch, cont, trampoline, imm,
88                                                        r_result));
89 }
90 
GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo & field_info,int opt_flags)91 RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& field_info,
92                                                int opt_flags) {
93   DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex);
94   // May do runtime call so everything to home locations.
95   FlushAllRegs();
96   RegStorage r_base = TargetReg(kArg0, kRef);
97   LockTemp(r_base);
98   if (CanUseOpPcRelDexCacheArrayLoad()) {
99     uint32_t offset = dex_cache_arrays_layout_.TypeOffset(field_info.StorageIndex());
100     OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, r_base, false);
101   } else {
102     // Using fixed register to sync with possible call to runtime support.
103     RegStorage r_method = LoadCurrMethodWithHint(r_base);
104     LoadRefDisp(r_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base,
105                 kNotVolatile);
106     int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value();
107     LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile);
108   }
109   // r_base now points at static storage (Class*) or null if the type is not yet resolved.
110   LIR* unresolved_branch = nullptr;
111   if (!field_info.IsClassInDexCache() && (opt_flags & MIR_CLASS_IS_IN_DEX_CACHE) == 0) {
112     // Check if r_base is null.
113     unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, nullptr);
114   }
115   LIR* uninit_branch = nullptr;
116   if (!field_info.IsClassInitialized() && (opt_flags & MIR_CLASS_IS_INITIALIZED) == 0) {
117     // Check if r_base is not yet initialized class.
118     RegStorage r_tmp = TargetReg(kArg2, kNotWide);
119     LockTemp(r_tmp);
120     uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base,
121                                       mirror::Class::StatusOffset().Int32Value(),
122                                       mirror::Class::kStatusInitialized, nullptr, nullptr);
123     FreeTemp(r_tmp);
124   }
125   if (unresolved_branch != nullptr || uninit_branch != nullptr) {
126     //
127     // Slow path to ensure a class is initialized for sget/sput.
128     //
129     class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath {
130      public:
131       // There are up to two branches to the static field slow path, the "unresolved" when the type
132       // entry in the dex cache is null, and the "uninit" when the class is not yet initialized.
133       // At least one will be non-null here, otherwise we wouldn't generate the slow path.
134       StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index,
135                           RegStorage r_base_in)
136           : LIRSlowPath(m2l, unresolved != nullptr ? unresolved : uninit, cont),
137             second_branch_(unresolved != nullptr ? uninit : nullptr),
138             storage_index_(storage_index), r_base_(r_base_in) {
139       }
140 
141       void Compile() {
142         LIR* target = GenerateTargetLabel();
143         if (second_branch_ != nullptr) {
144           second_branch_->target = target;
145         }
146         m2l_->CallRuntimeHelperImm(kQuickInitializeStaticStorage, storage_index_, true);
147         // Copy helper's result into r_base, a no-op on all but MIPS.
148         m2l_->OpRegCopy(r_base_,  m2l_->TargetReg(kRet0, kRef));
149 
150         m2l_->OpUnconditionalBranch(cont_);
151       }
152 
153      private:
154       // Second branch to the slow path, or null if there's only one branch.
155       LIR* const second_branch_;
156 
157       const int storage_index_;
158       const RegStorage r_base_;
159     };
160 
161     // The slow path is invoked if the r_base is null or the class pointed
162     // to by it is not initialized.
163     LIR* cont = NewLIR0(kPseudoTargetLabel);
164     AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
165                                                  field_info.StorageIndex(), r_base));
166   }
167   return r_base;
168 }
169 
170 /*
171  * Generate a kPseudoBarrier marker to indicate the boundary of special
172  * blocks.
173  */
GenBarrier()174 void Mir2Lir::GenBarrier() {
175   LIR* barrier = NewLIR0(kPseudoBarrier);
176   /* Mark all resources as being clobbered */
177   DCHECK(!barrier->flags.use_def_invalid);
178   barrier->u.m.def_mask = &kEncodeAll;
179 }
180 
GenDivZeroException()181 void Mir2Lir::GenDivZeroException() {
182   LIR* branch = OpUnconditionalBranch(nullptr);
183   AddDivZeroCheckSlowPath(branch);
184 }
185 
GenDivZeroCheck(ConditionCode c_code)186 void Mir2Lir::GenDivZeroCheck(ConditionCode c_code) {
187   LIR* branch = OpCondBranch(c_code, nullptr);
188   AddDivZeroCheckSlowPath(branch);
189 }
190 
GenDivZeroCheck(RegStorage reg)191 void Mir2Lir::GenDivZeroCheck(RegStorage reg) {
192   LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr);
193   AddDivZeroCheckSlowPath(branch);
194 }
195 
AddDivZeroCheckSlowPath(LIR * branch)196 void Mir2Lir::AddDivZeroCheckSlowPath(LIR* branch) {
197   class DivZeroCheckSlowPath : public Mir2Lir::LIRSlowPath {
198    public:
199     DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch_in)
200         : LIRSlowPath(m2l, branch_in) {
201     }
202 
203     void Compile() OVERRIDE {
204       m2l_->ResetRegPool();
205       m2l_->ResetDefTracking();
206       GenerateTargetLabel(kPseudoThrowTarget);
207       m2l_->CallRuntimeHelper(kQuickThrowDivZero, true);
208     }
209   };
210 
211   AddSlowPath(new (arena_) DivZeroCheckSlowPath(this, branch));
212 }
213 
GenArrayBoundsCheck(RegStorage index,RegStorage length)214 void Mir2Lir::GenArrayBoundsCheck(RegStorage index, RegStorage length) {
215   class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath {
216    public:
217     ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in, RegStorage index_in,
218                              RegStorage length_in)
219         : LIRSlowPath(m2l, branch_in),
220           index_(index_in), length_(length_in) {
221     }
222 
223     void Compile() OVERRIDE {
224       m2l_->ResetRegPool();
225       m2l_->ResetDefTracking();
226       GenerateTargetLabel(kPseudoThrowTarget);
227       m2l_->CallRuntimeHelperRegReg(kQuickThrowArrayBounds, index_, length_, true);
228     }
229 
230    private:
231     const RegStorage index_;
232     const RegStorage length_;
233   };
234 
235   LIR* branch = OpCmpBranch(kCondUge, index, length, nullptr);
236   AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length));
237 }
238 
GenArrayBoundsCheck(int index,RegStorage length)239 void Mir2Lir::GenArrayBoundsCheck(int index, RegStorage length) {
240   class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath {
241    public:
242     ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in, int index_in, RegStorage length_in)
243         : LIRSlowPath(m2l, branch_in),
244           index_(index_in), length_(length_in) {
245     }
246 
247     void Compile() OVERRIDE {
248       m2l_->ResetRegPool();
249       m2l_->ResetDefTracking();
250       GenerateTargetLabel(kPseudoThrowTarget);
251 
252       RegStorage arg1_32 = m2l_->TargetReg(kArg1, kNotWide);
253       RegStorage arg0_32 = m2l_->TargetReg(kArg0, kNotWide);
254 
255       m2l_->OpRegCopy(arg1_32, length_);
256       m2l_->LoadConstant(arg0_32, index_);
257       m2l_->CallRuntimeHelperRegReg(kQuickThrowArrayBounds, arg0_32, arg1_32, true);
258     }
259 
260    private:
261     const int32_t index_;
262     const RegStorage length_;
263   };
264 
265   LIR* branch = OpCmpImmBranch(kCondLs, length, index, nullptr);
266   AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length));
267 }
268 
GenNullCheck(RegStorage reg)269 LIR* Mir2Lir::GenNullCheck(RegStorage reg) {
270   class NullCheckSlowPath : public Mir2Lir::LIRSlowPath {
271    public:
272     NullCheckSlowPath(Mir2Lir* m2l, LIR* branch)
273         : LIRSlowPath(m2l, branch) {
274     }
275 
276     void Compile() OVERRIDE {
277       m2l_->ResetRegPool();
278       m2l_->ResetDefTracking();
279       GenerateTargetLabel(kPseudoThrowTarget);
280       m2l_->CallRuntimeHelper(kQuickThrowNullPointer, true);
281     }
282   };
283 
284   LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr);
285   AddSlowPath(new (arena_) NullCheckSlowPath(this, branch));
286   return branch;
287 }
288 
289 /* Perform null-check on a register.  */
GenNullCheck(RegStorage m_reg,int opt_flags)290 LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) {
291   if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
292     return GenExplicitNullCheck(m_reg, opt_flags);
293   }
294   // If null check has not been eliminated, reset redundant store tracking.
295   if ((opt_flags & MIR_IGNORE_NULL_CHECK) == 0) {
296     ResetDefTracking();
297   }
298   return nullptr;
299 }
300 
301 /* Perform an explicit null-check on a register.  */
GenExplicitNullCheck(RegStorage m_reg,int opt_flags)302 LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) {
303   if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
304     return nullptr;
305   }
306   return GenNullCheck(m_reg);
307 }
308 
MarkPossibleNullPointerException(int opt_flags)309 void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) {
310   if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
311     if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
312       return;
313     }
314     // Insert after last instruction.
315     MarkSafepointPC(last_lir_insn_);
316   }
317 }
318 
MarkPossibleNullPointerExceptionAfter(int opt_flags,LIR * after)319 void Mir2Lir::MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after) {
320   if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
321     if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
322       return;
323     }
324     MarkSafepointPCAfter(after);
325   }
326 }
327 
MarkPossibleStackOverflowException()328 void Mir2Lir::MarkPossibleStackOverflowException() {
329   if (cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
330     MarkSafepointPC(last_lir_insn_);
331   }
332 }
333 
ForceImplicitNullCheck(RegStorage reg,int opt_flags)334 void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) {
335   if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
336     if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
337       return;
338     }
339     // Force an implicit null check by performing a memory operation (load) from the given
340     // register with offset 0.  This will cause a signal if the register contains 0 (null).
341     RegStorage tmp = AllocTemp();
342     // TODO: for Mips, would be best to use rZERO as the bogus register target.
343     LIR* load = Load32Disp(reg, 0, tmp);
344     FreeTemp(tmp);
345     MarkSafepointPC(load);
346   }
347 }
348 
GenCompareAndBranch(Instruction::Code opcode,RegLocation rl_src1,RegLocation rl_src2,LIR * taken)349 void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
350                                   RegLocation rl_src2, LIR* taken) {
351   ConditionCode cond;
352   RegisterClass reg_class = (rl_src1.ref || rl_src2.ref) ? kRefReg : kCoreReg;
353   switch (opcode) {
354     case Instruction::IF_EQ:
355       cond = kCondEq;
356       break;
357     case Instruction::IF_NE:
358       cond = kCondNe;
359       break;
360     case Instruction::IF_LT:
361       cond = kCondLt;
362       break;
363     case Instruction::IF_GE:
364       cond = kCondGe;
365       break;
366     case Instruction::IF_GT:
367       cond = kCondGt;
368       break;
369     case Instruction::IF_LE:
370       cond = kCondLe;
371       break;
372     default:
373       cond = static_cast<ConditionCode>(0);
374       LOG(FATAL) << "Unexpected opcode " << opcode;
375   }
376 
377   // Normalize such that if either operand is constant, src2 will be constant
378   if (rl_src1.is_const) {
379     RegLocation rl_temp = rl_src1;
380     rl_src1 = rl_src2;
381     rl_src2 = rl_temp;
382     cond = FlipComparisonOrder(cond);
383   }
384 
385   rl_src1 = LoadValue(rl_src1, reg_class);
386   // Is this really an immediate comparison?
387   if (rl_src2.is_const) {
388     // If it's already live in a register or not easily materialized, just keep going
389     RegLocation rl_temp = UpdateLoc(rl_src2);
390     int32_t constant_value = mir_graph_->ConstantValue(rl_src2);
391     if ((rl_temp.location == kLocDalvikFrame) &&
392         InexpensiveConstantInt(constant_value, opcode)) {
393       // OK - convert this to a compare immediate and branch
394       OpCmpImmBranch(cond, rl_src1.reg, mir_graph_->ConstantValue(rl_src2), taken);
395       return;
396     }
397 
398     // It's also commonly more efficient to have a test against zero with Eq/Ne. This is not worse
399     // for x86, and allows a cbz/cbnz for Arm and Mips. At the same time, it works around a register
400     // mismatch for 64b systems, where a reference is compared against null, as dex bytecode uses
401     // the 32b literal 0 for null.
402     if (constant_value == 0 && (cond == kCondEq || cond == kCondNe)) {
403       // Use the OpCmpImmBranch and ignore the value in the register.
404       OpCmpImmBranch(cond, rl_src1.reg, 0, taken);
405       return;
406     }
407   }
408 
409   rl_src2 = LoadValue(rl_src2, reg_class);
410   OpCmpBranch(cond, rl_src1.reg, rl_src2.reg, taken);
411 }
412 
GenCompareZeroAndBranch(Instruction::Code opcode,RegLocation rl_src,LIR * taken)413 void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken) {
414   ConditionCode cond;
415   RegisterClass reg_class = rl_src.ref ? kRefReg : kCoreReg;
416   rl_src = LoadValue(rl_src, reg_class);
417   switch (opcode) {
418     case Instruction::IF_EQZ:
419       cond = kCondEq;
420       break;
421     case Instruction::IF_NEZ:
422       cond = kCondNe;
423       break;
424     case Instruction::IF_LTZ:
425       cond = kCondLt;
426       break;
427     case Instruction::IF_GEZ:
428       cond = kCondGe;
429       break;
430     case Instruction::IF_GTZ:
431       cond = kCondGt;
432       break;
433     case Instruction::IF_LEZ:
434       cond = kCondLe;
435       break;
436     default:
437       cond = static_cast<ConditionCode>(0);
438       LOG(FATAL) << "Unexpected opcode " << opcode;
439   }
440   OpCmpImmBranch(cond, rl_src.reg, 0, taken);
441 }
442 
GenIntToLong(RegLocation rl_dest,RegLocation rl_src)443 void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
444   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
445   if (rl_src.location == kLocPhysReg) {
446     OpRegCopy(rl_result.reg, rl_src.reg);
447   } else {
448     LoadValueDirect(rl_src, rl_result.reg.GetLow());
449   }
450   OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_result.reg.GetLow(), 31);
451   StoreValueWide(rl_dest, rl_result);
452 }
453 
GenLongToInt(RegLocation rl_dest,RegLocation rl_src)454 void Mir2Lir::GenLongToInt(RegLocation rl_dest, RegLocation rl_src) {
455   rl_src = UpdateLocWide(rl_src);
456   rl_src = NarrowRegLoc(rl_src);
457   StoreValue(rl_dest, rl_src);
458 }
459 
GenIntNarrowing(Instruction::Code opcode,RegLocation rl_dest,RegLocation rl_src)460 void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
461                               RegLocation rl_src) {
462   rl_src = LoadValue(rl_src, kCoreReg);
463   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
464   OpKind op = kOpInvalid;
465   switch (opcode) {
466     case Instruction::INT_TO_BYTE:
467       op = kOp2Byte;
468       break;
469     case Instruction::INT_TO_SHORT:
470        op = kOp2Short;
471        break;
472     case Instruction::INT_TO_CHAR:
473        op = kOp2Char;
474        break;
475     default:
476       LOG(ERROR) << "Bad int conversion type";
477   }
478   OpRegReg(op, rl_result.reg, rl_src.reg);
479   StoreValue(rl_dest, rl_result);
480 }
481 
482 /*
483  * Let helper function take care of everything.  Will call
484  * Array::AllocFromCode(type_idx, method, count);
485  * Note: AllocFromCode will handle checks for errNegativeArraySize.
486  */
GenNewArray(uint32_t type_idx,RegLocation rl_dest,RegLocation rl_src)487 void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest,
488                           RegLocation rl_src) {
489   FlushAllRegs();  /* Everything to home location */
490   const DexFile* dex_file = cu_->dex_file;
491   CompilerDriver* driver = cu_->compiler_driver;
492   if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *dex_file, type_idx)) {
493     bool is_type_initialized;  // Ignored as an array does not have an initializer.
494     bool use_direct_type_ptr;
495     uintptr_t direct_type_ptr;
496     bool is_finalizable;
497     if (kEmbedClassInCode &&
498         driver->CanEmbedTypeInCode(*dex_file, type_idx, &is_type_initialized, &use_direct_type_ptr,
499                                    &direct_type_ptr, &is_finalizable)) {
500       // The fast path.
501       if (!use_direct_type_ptr) {
502         LoadClassType(*dex_file, type_idx, kArg0);
503         CallRuntimeHelperRegRegLocationMethod(kQuickAllocArrayResolved, TargetReg(kArg0, kNotWide),
504                                               rl_src, true);
505       } else {
506         // Use the direct pointer.
507         CallRuntimeHelperImmRegLocationMethod(kQuickAllocArrayResolved, direct_type_ptr, rl_src,
508                                               true);
509       }
510     } else {
511       // The slow path.
512       CallRuntimeHelperImmRegLocationMethod(kQuickAllocArray, type_idx, rl_src, true);
513     }
514   } else {
515     CallRuntimeHelperImmRegLocationMethod(kQuickAllocArrayWithAccessCheck, type_idx, rl_src, true);
516   }
517   StoreValue(rl_dest, GetReturn(kRefReg));
518 }
519 
520 /*
521  * Similar to GenNewArray, but with post-allocation initialization.
522  * Verifier guarantees we're dealing with an array class.  Current
523  * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
524  * Current code also throws internal unimp if not 'L', '[' or 'I'.
525  */
GenFilledNewArray(CallInfo * info)526 void Mir2Lir::GenFilledNewArray(CallInfo* info) {
527   size_t elems = info->num_arg_words;
528   int type_idx = info->index;
529   FlushAllRegs();  /* Everything to home location */
530   QuickEntrypointEnum target;
531   if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
532                                                        type_idx)) {
533     target = kQuickCheckAndAllocArray;
534   } else {
535     target = kQuickCheckAndAllocArrayWithAccessCheck;
536   }
537   CallRuntimeHelperImmImmMethod(target, type_idx, elems, true);
538   FreeTemp(TargetReg(kArg2, kNotWide));
539   FreeTemp(TargetReg(kArg1, kNotWide));
540   /*
541    * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the
542    * return region.  Because AllocFromCode placed the new array
543    * in kRet0, we'll just lock it into place.  When debugger support is
544    * added, it may be necessary to additionally copy all return
545    * values to a home location in thread-local storage
546    */
547   RegStorage ref_reg = TargetReg(kRet0, kRef);
548   LockTemp(ref_reg);
549 
550   // TODO: use the correct component size, currently all supported types
551   // share array alignment with ints (see comment at head of function)
552   size_t component_size = sizeof(int32_t);
553 
554   if (elems > 5) {
555     DCHECK(info->is_range);  // Non-range insn can't encode more than 5 elems.
556     /*
557      * Bit of ugliness here.  We're going generate a mem copy loop
558      * on the register range, but it is possible that some regs
559      * in the range have been promoted.  This is unlikely, but
560      * before generating the copy, we'll just force a flush
561      * of any regs in the source range that have been promoted to
562      * home location.
563      */
564     for (size_t i = 0; i < elems; i++) {
565       RegLocation loc = UpdateLoc(info->args[i]);
566       if (loc.location == kLocPhysReg) {
567         ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
568         if (loc.ref) {
569           StoreRefDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kNotVolatile);
570         } else {
571           Store32Disp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg);
572         }
573       }
574     }
575     /*
576      * TUNING note: generated code here could be much improved, but
577      * this is an uncommon operation and isn't especially performance
578      * critical.
579      */
580     // This is addressing the stack, which may be out of the 4G area.
581     RegStorage r_src = AllocTempRef();
582     RegStorage r_dst = AllocTempRef();
583     RegStorage r_idx = AllocTempRef();  // Not really a reference, but match src/dst.
584     RegStorage r_val;
585     switch (cu_->instruction_set) {
586       case kThumb2:
587       case kArm64:
588         r_val = TargetReg(kLr, kNotWide);
589         break;
590       case kX86:
591       case kX86_64:
592         FreeTemp(ref_reg);
593         r_val = AllocTemp();
594         break;
595       case kMips:
596       case kMips64:
597         r_val = AllocTemp();
598         break;
599       default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
600     }
601     // Set up source pointer
602     RegLocation rl_first = info->args[0];
603     OpRegRegImm(kOpAdd, r_src, TargetPtrReg(kSp), SRegOffset(rl_first.s_reg_low));
604     // Set up the target pointer
605     OpRegRegImm(kOpAdd, r_dst, ref_reg,
606                 mirror::Array::DataOffset(component_size).Int32Value());
607     // Set up the loop counter (known to be > 0)
608     LoadConstant(r_idx, static_cast<int>(elems - 1));
609     // Generate the copy loop.  Going backwards for convenience
610     LIR* loop_head_target = NewLIR0(kPseudoTargetLabel);
611     // Copy next element
612     {
613       ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
614       LoadBaseIndexed(r_src, r_idx, r_val, 2, k32);
615       // NOTE: No dalvik register annotation, local optimizations will be stopped
616       // by the loop boundaries.
617     }
618     StoreBaseIndexed(r_dst, r_idx, r_val, 2, k32);
619     FreeTemp(r_val);
620     OpDecAndBranch(kCondGe, r_idx, loop_head_target);
621     if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
622       // Restore the target pointer
623       OpRegRegImm(kOpAdd, ref_reg, r_dst,
624                   -mirror::Array::DataOffset(component_size).Int32Value());
625     }
626     FreeTemp(r_idx);
627     FreeTemp(r_dst);
628     FreeTemp(r_src);
629   } else {
630     DCHECK_LE(elems, 5u);  // Usually but not necessarily non-range.
631     // TUNING: interleave
632     for (size_t i = 0; i < elems; i++) {
633       RegLocation rl_arg;
634       if (info->args[i].ref) {
635         rl_arg = LoadValue(info->args[i], kRefReg);
636         StoreRefDisp(ref_reg,
637                     mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg,
638                     kNotVolatile);
639       } else {
640         rl_arg = LoadValue(info->args[i], kCoreReg);
641         Store32Disp(ref_reg,
642                     mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg);
643       }
644       // If the LoadValue caused a temp to be allocated, free it
645       if (IsTemp(rl_arg.reg)) {
646         FreeTemp(rl_arg.reg);
647       }
648     }
649   }
650   if (elems != 0 && info->args[0].ref) {
651     // If there is at least one potentially non-null value, unconditionally mark the GC card.
652     for (size_t i = 0; i < elems; i++) {
653       if (!mir_graph_->IsConstantNullRef(info->args[i])) {
654         UnconditionallyMarkGCCard(ref_reg);
655         break;
656       }
657     }
658   }
659   if (info->result.location != kLocInvalid) {
660     StoreValue(info->result, GetReturn(kRefReg));
661   }
662 }
663 
664 /*
665  * Array data table format:
666  *  ushort ident = 0x0300   magic value
667  *  ushort width            width of each element in the table
668  *  uint   size             number of elements in the table
669  *  ubyte  data[size*width] table of data values (may contain a single-byte
670  *                          padding at the end)
671  *
672  * Total size is 4+(width * size + 1)/2 16-bit code units.
673  */
GenFillArrayData(MIR * mir,DexOffset table_offset,RegLocation rl_src)674 void Mir2Lir::GenFillArrayData(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
675   if (kIsDebugBuild) {
676     const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
677     const Instruction::ArrayDataPayload* payload =
678         reinterpret_cast<const Instruction::ArrayDataPayload*>(table);
679     CHECK_EQ(payload->ident, static_cast<uint16_t>(Instruction::kArrayDataSignature));
680   }
681   uint32_t table_offset_from_start = mir->offset + static_cast<int32_t>(table_offset);
682   CallRuntimeHelperImmRegLocation(kQuickHandleFillArrayData, table_offset_from_start, rl_src, true);
683 }
684 
GenSput(MIR * mir,RegLocation rl_src,OpSize size)685 void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, OpSize size) {
686   const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
687   DCHECK_EQ(SPutMemAccessType(mir->dalvikInsn.opcode), field_info.MemAccessType());
688   cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass());
689   if (!ForceSlowFieldPath(cu_) && field_info.FastPut()) {
690     DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
691     RegStorage r_base;
692     if (field_info.IsReferrersClass()) {
693       // Fast path, static storage base is this method's class
694       r_base = AllocTempRef();
695       RegStorage r_method = LoadCurrMethodWithHint(r_base);
696       LoadRefDisp(r_method, ArtMethod::DeclaringClassOffset().Int32Value(), r_base,
697                   kNotVolatile);
698     } else {
699       // Medium path, static storage base in a different class which requires checks that the other
700       // class is initialized.
701       r_base = GenGetOtherTypeForSgetSput(field_info, mir->optimization_flags);
702       if (!field_info.IsClassInitialized() &&
703           (mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0) {
704         // Ensure load of status and store of value don't re-order.
705         // TODO: Presumably the actual value store is control-dependent on the status load,
706         // and will thus not be reordered in any case, since stores are never speculated.
707         // Does later code "know" that the class is now initialized?  If so, we still
708         // need the barrier to guard later static loads.
709         GenMemBarrier(kLoadAny);
710       }
711     }
712     // rBase now holds static storage base
713     RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile());
714     if (IsWide(size)) {
715       rl_src = LoadValueWide(rl_src, reg_class);
716     } else {
717       rl_src = LoadValue(rl_src, reg_class);
718     }
719     if (IsRef(size)) {
720       StoreRefDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg,
721                    field_info.IsVolatile() ? kVolatile : kNotVolatile);
722     } else {
723       StoreBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, size,
724                     field_info.IsVolatile() ? kVolatile : kNotVolatile);
725     }
726     if (IsRef(size) && !mir_graph_->IsConstantNullRef(rl_src)) {
727       MarkGCCard(mir->optimization_flags, rl_src.reg, r_base);
728     }
729     FreeTemp(r_base);
730   } else {
731     FlushAllRegs();  // Everything to home locations
732     QuickEntrypointEnum target;
733     switch (size) {
734       case kReference:
735         target = kQuickSetObjStatic;
736         break;
737       case k64:
738       case kDouble:
739         target = kQuickSet64Static;
740         break;
741       case k32:
742       case kSingle:
743         target = kQuickSet32Static;
744         break;
745       case kSignedHalf:
746       case kUnsignedHalf:
747         target = kQuickSet16Static;
748         break;
749       case kSignedByte:
750       case kUnsignedByte:
751         target = kQuickSet8Static;
752         break;
753       case kWord:  // Intentional fallthrough.
754       default:
755         LOG(FATAL) << "Can't determine entrypoint for: " << size;
756         target = kQuickSet32Static;
757     }
758     CallRuntimeHelperImmRegLocation(target, field_info.FieldIndex(), rl_src, true);
759   }
760 }
761 
GenSget(MIR * mir,RegLocation rl_dest,OpSize size,Primitive::Type type)762 void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, OpSize size, Primitive::Type type) {
763   const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
764   DCHECK_EQ(SGetMemAccessType(mir->dalvikInsn.opcode), field_info.MemAccessType());
765   cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass());
766 
767   if (!ForceSlowFieldPath(cu_) && field_info.FastGet()) {
768     DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
769     RegStorage r_base;
770     if (field_info.IsReferrersClass()) {
771       // Fast path, static storage base is this method's class
772       r_base = AllocTempRef();
773       RegStorage r_method = LoadCurrMethodWithHint(r_base);
774       LoadRefDisp(r_method, ArtMethod::DeclaringClassOffset().Int32Value(), r_base,
775                   kNotVolatile);
776     } else {
777       // Medium path, static storage base in a different class which requires checks that the other
778       // class is initialized
779       r_base = GenGetOtherTypeForSgetSput(field_info, mir->optimization_flags);
780       if (!field_info.IsClassInitialized() &&
781           (mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0) {
782         // Ensure load of status and load of value don't re-order.
783         GenMemBarrier(kLoadAny);
784       }
785     }
786     // r_base now holds static storage base
787     RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile());
788     RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
789 
790     int field_offset = field_info.FieldOffset().Int32Value();
791     if (IsRef(size)) {
792       // TODO: DCHECK?
793       LoadRefDisp(r_base, field_offset, rl_result.reg, field_info.IsVolatile() ? kVolatile :
794           kNotVolatile);
795     } else {
796       LoadBaseDisp(r_base, field_offset, rl_result.reg, size, field_info.IsVolatile() ?
797           kVolatile : kNotVolatile);
798     }
799     FreeTemp(r_base);
800 
801     if (IsWide(size)) {
802       StoreValueWide(rl_dest, rl_result);
803     } else {
804       StoreValue(rl_dest, rl_result);
805     }
806   } else {
807     DCHECK(SizeMatchesTypeForEntrypoint(size, type));
808     FlushAllRegs();  // Everything to home locations
809     QuickEntrypointEnum target;
810     switch (type) {
811       case Primitive::kPrimNot:
812         target = kQuickGetObjStatic;
813         break;
814       case Primitive::kPrimLong:
815       case Primitive::kPrimDouble:
816         target = kQuickGet64Static;
817         break;
818       case Primitive::kPrimInt:
819       case Primitive::kPrimFloat:
820         target = kQuickGet32Static;
821         break;
822       case Primitive::kPrimShort:
823         target = kQuickGetShortStatic;
824         break;
825       case Primitive::kPrimChar:
826         target = kQuickGetCharStatic;
827         break;
828       case Primitive::kPrimByte:
829         target = kQuickGetByteStatic;
830         break;
831       case Primitive::kPrimBoolean:
832         target = kQuickGetBooleanStatic;
833         break;
834       case Primitive::kPrimVoid:  // Intentional fallthrough.
835       default:
836         LOG(FATAL) << "Can't determine entrypoint for: " << type;
837         target = kQuickGet32Static;
838     }
839     CallRuntimeHelperImm(target, field_info.FieldIndex(), true);
840 
841     // FIXME: pGetXXStatic always return an int or int64 regardless of rl_dest.fp.
842     if (IsWide(size)) {
843       RegLocation rl_result = GetReturnWide(kCoreReg);
844       StoreValueWide(rl_dest, rl_result);
845     } else {
846       RegLocation rl_result = GetReturn(rl_dest.ref ? kRefReg : kCoreReg);
847       StoreValue(rl_dest, rl_result);
848     }
849   }
850 }
851 
852 // Generate code for all slow paths.
HandleSlowPaths()853 void Mir2Lir::HandleSlowPaths() {
854   // We should check slow_paths_.Size() every time, because a new slow path
855   // may be created during slowpath->Compile().
856   for (LIRSlowPath* slowpath : slow_paths_) {
857     slowpath->Compile();
858   }
859   slow_paths_.clear();
860 }
861 
GenIGet(MIR * mir,int opt_flags,OpSize size,Primitive::Type type,RegLocation rl_dest,RegLocation rl_obj)862 void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, Primitive::Type type,
863                       RegLocation rl_dest, RegLocation rl_obj) {
864   const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
865   if (kIsDebugBuild) {
866     auto mem_access_type = IsInstructionIGetQuickOrIPutQuick(mir->dalvikInsn.opcode) ?
867         IGetQuickOrIPutQuickMemAccessType(mir->dalvikInsn.opcode) :
868         IGetMemAccessType(mir->dalvikInsn.opcode);
869     DCHECK_EQ(mem_access_type, field_info.MemAccessType()) << mir->dalvikInsn.opcode;
870   }
871   cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet());
872   if (!ForceSlowFieldPath(cu_) && field_info.FastGet()) {
873     RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile());
874     // A load of the class will lead to an iget with offset 0.
875     DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
876     rl_obj = LoadValue(rl_obj, kRefReg);
877     GenNullCheck(rl_obj.reg, opt_flags);
878     RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
879     int field_offset = field_info.FieldOffset().Int32Value();
880     LIR* load_lir;
881     if (IsRef(size)) {
882       load_lir = LoadRefDisp(rl_obj.reg, field_offset, rl_result.reg, field_info.IsVolatile() ?
883           kVolatile : kNotVolatile);
884     } else {
885       load_lir = LoadBaseDisp(rl_obj.reg, field_offset, rl_result.reg, size,
886                               field_info.IsVolatile() ? kVolatile : kNotVolatile);
887     }
888     MarkPossibleNullPointerExceptionAfter(opt_flags, load_lir);
889     if (IsWide(size)) {
890       StoreValueWide(rl_dest, rl_result);
891     } else {
892       StoreValue(rl_dest, rl_result);
893     }
894   } else {
895     DCHECK(SizeMatchesTypeForEntrypoint(size, type));
896     QuickEntrypointEnum target;
897     switch (type) {
898       case Primitive::kPrimNot:
899         target = kQuickGetObjInstance;
900         break;
901       case Primitive::kPrimLong:
902       case Primitive::kPrimDouble:
903         target = kQuickGet64Instance;
904         break;
905       case Primitive::kPrimFloat:
906       case Primitive::kPrimInt:
907         target = kQuickGet32Instance;
908         break;
909       case Primitive::kPrimShort:
910         target = kQuickGetShortInstance;
911         break;
912       case Primitive::kPrimChar:
913         target = kQuickGetCharInstance;
914         break;
915       case Primitive::kPrimByte:
916         target = kQuickGetByteInstance;
917         break;
918       case Primitive::kPrimBoolean:
919         target = kQuickGetBooleanInstance;
920         break;
921       case Primitive::kPrimVoid:  // Intentional fallthrough.
922       default:
923         LOG(FATAL) << "Can't determine entrypoint for: " << type;
924         target = kQuickGet32Instance;
925     }
926     // Second argument of pGetXXInstance is always a reference.
927     DCHECK_EQ(static_cast<unsigned int>(rl_obj.wide), 0U);
928     CallRuntimeHelperImmRegLocation(target, field_info.FieldIndex(), rl_obj, true);
929 
930     // FIXME: pGetXXInstance always return an int or int64 regardless of rl_dest.fp.
931     if (IsWide(size)) {
932       RegLocation rl_result = GetReturnWide(kCoreReg);
933       StoreValueWide(rl_dest, rl_result);
934     } else {
935       RegLocation rl_result = GetReturn(rl_dest.ref ? kRefReg : kCoreReg);
936       StoreValue(rl_dest, rl_result);
937     }
938   }
939 }
940 
GenIPut(MIR * mir,int opt_flags,OpSize size,RegLocation rl_src,RegLocation rl_obj)941 void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
942                       RegLocation rl_src, RegLocation rl_obj) {
943   const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
944   if (kIsDebugBuild) {
945     auto mem_access_type = IsInstructionIGetQuickOrIPutQuick(mir->dalvikInsn.opcode) ?
946         IGetQuickOrIPutQuickMemAccessType(mir->dalvikInsn.opcode) :
947         IPutMemAccessType(mir->dalvikInsn.opcode);
948     DCHECK_EQ(mem_access_type, field_info.MemAccessType());
949   }
950   cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut());
951   if (!ForceSlowFieldPath(cu_) && field_info.FastPut()) {
952     RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile());
953     // Dex code never writes to the class field.
954     DCHECK_GE(static_cast<uint32_t>(field_info.FieldOffset().Int32Value()),
955               sizeof(mirror::HeapReference<mirror::Class>));
956     rl_obj = LoadValue(rl_obj, kRefReg);
957     if (IsWide(size)) {
958       rl_src = LoadValueWide(rl_src, reg_class);
959     } else {
960       rl_src = LoadValue(rl_src, reg_class);
961     }
962     GenNullCheck(rl_obj.reg, opt_flags);
963     int field_offset = field_info.FieldOffset().Int32Value();
964     LIR* null_ck_insn;
965     if (IsRef(size)) {
966       null_ck_insn = StoreRefDisp(rl_obj.reg, field_offset, rl_src.reg, field_info.IsVolatile() ?
967           kVolatile : kNotVolatile);
968     } else {
969       null_ck_insn = StoreBaseDisp(rl_obj.reg, field_offset, rl_src.reg, size,
970                                    field_info.IsVolatile() ? kVolatile : kNotVolatile);
971     }
972     MarkPossibleNullPointerExceptionAfter(opt_flags, null_ck_insn);
973     if (IsRef(size) && !mir_graph_->IsConstantNullRef(rl_src)) {
974       MarkGCCard(opt_flags, rl_src.reg, rl_obj.reg);
975     }
976   } else {
977     QuickEntrypointEnum target;
978     switch (size) {
979       case kReference:
980         target = kQuickSetObjInstance;
981         break;
982       case k64:
983       case kDouble:
984         target = kQuickSet64Instance;
985         break;
986       case k32:
987       case kSingle:
988         target = kQuickSet32Instance;
989         break;
990       case kSignedHalf:
991       case kUnsignedHalf:
992         target = kQuickSet16Instance;
993         break;
994       case kSignedByte:
995       case kUnsignedByte:
996         target = kQuickSet8Instance;
997         break;
998       case kWord:  // Intentional fallthrough.
999       default:
1000         LOG(FATAL) << "Can't determine entrypoint for: " << size;
1001         target = kQuickSet32Instance;
1002     }
1003     CallRuntimeHelperImmRegLocationRegLocation(target, field_info.FieldIndex(), rl_obj, rl_src,
1004                                                true);
1005   }
1006 }
1007 
GenArrayObjPut(int opt_flags,RegLocation rl_array,RegLocation rl_index,RegLocation rl_src)1008 void Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
1009                              RegLocation rl_src) {
1010   bool needs_range_check = !(opt_flags & MIR_IGNORE_RANGE_CHECK);
1011   bool needs_null_check = !((cu_->disable_opt & (1 << kNullCheckElimination)) &&
1012       (opt_flags & MIR_IGNORE_NULL_CHECK));
1013   QuickEntrypointEnum target = needs_range_check
1014         ? (needs_null_check ? kQuickAputObjectWithNullAndBoundCheck
1015                             : kQuickAputObjectWithBoundCheck)
1016         : kQuickAputObject;
1017   CallRuntimeHelperRegLocationRegLocationRegLocation(target, rl_array, rl_index, rl_src, true);
1018 }
1019 
GenConstClass(uint32_t type_idx,RegLocation rl_dest)1020 void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
1021   RegLocation rl_result;
1022   if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
1023                                                         *cu_->dex_file,
1024                                                         type_idx)) {
1025     // Call out to helper which resolves type and verifies access.
1026     // Resolved type returned in kRet0.
1027     CallRuntimeHelperImm(kQuickInitializeTypeAndVerifyAccess, type_idx, true);
1028     rl_result = GetReturn(kRefReg);
1029   } else {
1030     rl_result = EvalLoc(rl_dest, kRefReg, true);
1031     // We don't need access checks, load type from dex cache
1032     if (CanUseOpPcRelDexCacheArrayLoad()) {
1033       size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx);
1034       OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, rl_result.reg, false);
1035     } else {
1036       int32_t dex_cache_offset =
1037           ArtMethod::DexCacheResolvedTypesOffset().Int32Value();
1038       RegStorage res_reg = AllocTempRef();
1039       RegStorage r_method = LoadCurrMethodWithHint(res_reg);
1040       LoadRefDisp(r_method, dex_cache_offset, res_reg, kNotVolatile);
1041       int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
1042       LoadRefDisp(res_reg, offset_of_type, rl_result.reg, kNotVolatile);
1043       FreeTemp(res_reg);
1044     }
1045     if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file,
1046         type_idx) || ForceSlowTypePath(cu_)) {
1047       // Slow path, at runtime test if type is null and if so initialize
1048       FlushAllRegs();
1049       GenIfNullUseHelperImm(rl_result.reg, kQuickInitializeType, type_idx);
1050     }
1051   }
1052   StoreValue(rl_dest, rl_result);
1053 }
1054 
GenConstString(uint32_t string_idx,RegLocation rl_dest)1055 void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
1056   /* NOTE: Most strings should be available at compile time */
1057   int32_t offset_of_string = mirror::ObjectArray<mirror::String>::OffsetOfElement(string_idx).
1058                                                                                       Int32Value();
1059   if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache(
1060       *cu_->dex_file, string_idx) || ForceSlowStringPath(cu_)) {
1061     // slow path, resolve string if not in dex cache
1062     FlushAllRegs();
1063     LockCallTemps();  // Using explicit registers
1064 
1065     // Might call out to helper, which will return resolved string in kRet0
1066     RegStorage ret0 = TargetReg(kRet0, kRef);
1067     if (CanUseOpPcRelDexCacheArrayLoad()) {
1068       size_t offset = dex_cache_arrays_layout_.StringOffset(string_idx);
1069       OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, ret0, false);
1070     } else {
1071       // Method to declaring class.
1072       RegStorage arg0 = TargetReg(kArg0, kRef);
1073       RegStorage r_method = LoadCurrMethodWithHint(arg0);
1074       LoadRefDisp(r_method, ArtMethod::DeclaringClassOffset().Int32Value(), arg0, kNotVolatile);
1075       // Declaring class to dex cache strings.
1076       LoadRefDisp(arg0, mirror::Class::DexCacheStringsOffset().Int32Value(), arg0, kNotVolatile);
1077 
1078       LoadRefDisp(arg0, offset_of_string, ret0, kNotVolatile);
1079     }
1080     GenIfNullUseHelperImm(ret0, kQuickResolveString, string_idx);
1081 
1082     GenBarrier();
1083     StoreValue(rl_dest, GetReturn(kRefReg));
1084   } else {
1085     RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
1086     if (CanUseOpPcRelDexCacheArrayLoad()) {
1087       size_t offset = dex_cache_arrays_layout_.StringOffset(string_idx);
1088       OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, rl_result.reg, false);
1089     } else {
1090       RegLocation rl_method = LoadCurrMethod();
1091       RegStorage res_reg = AllocTempRef();
1092       LoadRefDisp(rl_method.reg, ArtMethod::DeclaringClassOffset().Int32Value(), res_reg,
1093                   kNotVolatile);
1094       LoadRefDisp(res_reg, mirror::Class::DexCacheStringsOffset().Int32Value(), res_reg,
1095                   kNotVolatile);
1096       LoadRefDisp(res_reg, offset_of_string, rl_result.reg, kNotVolatile);
1097       FreeTemp(res_reg);
1098     }
1099     StoreValue(rl_dest, rl_result);
1100   }
1101 }
1102 
1103 /*
1104  * Let helper function take care of everything.  Will
1105  * call Class::NewInstanceFromCode(type_idx, method);
1106  */
GenNewInstance(uint32_t type_idx,RegLocation rl_dest)1107 void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) {
1108   FlushAllRegs();  /* Everything to home location */
1109   // alloc will always check for resolution, do we also need to verify
1110   // access because the verifier was unable to?
1111   const DexFile* dex_file = cu_->dex_file;
1112   CompilerDriver* driver = cu_->compiler_driver;
1113   if (driver->CanAccessInstantiableTypeWithoutChecks(cu_->method_idx, *dex_file, type_idx)) {
1114     bool is_type_initialized;
1115     bool use_direct_type_ptr;
1116     uintptr_t direct_type_ptr;
1117     bool is_finalizable;
1118     if (kEmbedClassInCode &&
1119         driver->CanEmbedTypeInCode(*dex_file, type_idx, &is_type_initialized, &use_direct_type_ptr,
1120                                    &direct_type_ptr, &is_finalizable) &&
1121                                    !is_finalizable) {
1122       // The fast path.
1123       if (!use_direct_type_ptr) {
1124         LoadClassType(*dex_file, type_idx, kArg0);
1125         if (!is_type_initialized) {
1126           CallRuntimeHelperRegMethod(kQuickAllocObjectResolved, TargetReg(kArg0, kRef), true);
1127         } else {
1128           CallRuntimeHelperRegMethod(kQuickAllocObjectInitialized, TargetReg(kArg0, kRef), true);
1129         }
1130       } else {
1131         // Use the direct pointer.
1132         if (!is_type_initialized) {
1133           CallRuntimeHelperImmMethod(kQuickAllocObjectResolved, direct_type_ptr, true);
1134         } else {
1135           CallRuntimeHelperImmMethod(kQuickAllocObjectInitialized, direct_type_ptr, true);
1136         }
1137       }
1138     } else {
1139       // The slow path.
1140       CallRuntimeHelperImmMethod(kQuickAllocObject, type_idx, true);
1141     }
1142   } else {
1143     CallRuntimeHelperImmMethod(kQuickAllocObjectWithAccessCheck, type_idx, true);
1144   }
1145   StoreValue(rl_dest, GetReturn(kRefReg));
1146 }
1147 
GenThrow(RegLocation rl_src)1148 void Mir2Lir::GenThrow(RegLocation rl_src) {
1149   FlushAllRegs();
1150   CallRuntimeHelperRegLocation(kQuickDeliverException, rl_src, true);
1151 }
1152 
1153 // For final classes there are no sub-classes to check and so we can answer the instance-of
1154 // question with simple comparisons.
GenInstanceofFinal(bool use_declaring_class,uint32_t type_idx,RegLocation rl_dest,RegLocation rl_src)1155 void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
1156                                  RegLocation rl_src) {
1157   // X86 has its own implementation.
1158   DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64);
1159 
1160   RegLocation object = LoadValue(rl_src, kRefReg);
1161   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1162   RegStorage result_reg = rl_result.reg;
1163   if (IsSameReg(result_reg, object.reg)) {
1164     result_reg = AllocTypedTemp(false, kCoreReg);
1165     DCHECK(!IsSameReg(result_reg, object.reg));
1166   }
1167   LoadConstant(result_reg, 0);     // assume false
1168   LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, nullptr);
1169 
1170   RegStorage check_class = AllocTypedTemp(false, kRefReg);
1171   RegStorage object_class = AllocTypedTemp(false, kRefReg);
1172 
1173   if (use_declaring_class) {
1174     RegStorage r_method = LoadCurrMethodWithHint(check_class);
1175     LoadRefDisp(r_method, ArtMethod::DeclaringClassOffset().Int32Value(), check_class,
1176                 kNotVolatile);
1177     LoadRefDisp(object.reg,  mirror::Object::ClassOffset().Int32Value(), object_class,
1178                 kNotVolatile);
1179   } else if (CanUseOpPcRelDexCacheArrayLoad()) {
1180     size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx);
1181     OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, check_class, false);
1182     LoadRefDisp(object.reg,  mirror::Object::ClassOffset().Int32Value(), object_class,
1183                 kNotVolatile);
1184   } else {
1185     RegStorage r_method = LoadCurrMethodWithHint(check_class);
1186     LoadRefDisp(r_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
1187                 check_class, kNotVolatile);
1188     LoadRefDisp(object.reg,  mirror::Object::ClassOffset().Int32Value(), object_class,
1189                 kNotVolatile);
1190     int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
1191     LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile);
1192   }
1193 
1194   // FIXME: what should we be comparing here? compressed or decompressed references?
1195   if (cu_->instruction_set == kThumb2) {
1196     OpRegReg(kOpCmp, check_class, object_class);  // Same?
1197     LIR* it = OpIT(kCondEq, "");   // if-convert the test
1198     LoadConstant(result_reg, 1);     // .eq case - load true
1199     OpEndIT(it);
1200   } else {
1201     GenSelectConst32(check_class, object_class, kCondEq, 1, 0, result_reg, kCoreReg);
1202   }
1203   LIR* target = NewLIR0(kPseudoTargetLabel);
1204   null_branchover->target = target;
1205   FreeTemp(object_class);
1206   FreeTemp(check_class);
1207   if (IsTemp(result_reg)) {
1208     OpRegCopy(rl_result.reg, result_reg);
1209     FreeTemp(result_reg);
1210   }
1211   StoreValue(rl_dest, rl_result);
1212 }
1213 
GenInstanceofCallingHelper(bool needs_access_check,bool type_known_final,bool type_known_abstract,bool use_declaring_class,bool can_assume_type_is_in_dex_cache,uint32_t type_idx,RegLocation rl_dest,RegLocation rl_src)1214 void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
1215                                          bool type_known_abstract, bool use_declaring_class,
1216                                          bool can_assume_type_is_in_dex_cache,
1217                                          uint32_t type_idx, RegLocation rl_dest,
1218                                          RegLocation rl_src) {
1219   FlushAllRegs();
1220   // May generate a call - use explicit registers
1221   LockCallTemps();
1222   RegStorage class_reg = TargetReg(kArg2, kRef);  // kArg2 will hold the Class*
1223   RegStorage ref_reg = TargetReg(kArg0, kRef);  // kArg0 will hold the ref.
1224   RegStorage ret_reg = GetReturn(kRefReg).reg;
1225   if (needs_access_check) {
1226     // Check we have access to type_idx and if not throw IllegalAccessError,
1227     // returns Class* in kArg0
1228     CallRuntimeHelperImmMethod(kQuickInitializeTypeAndVerifyAccess, type_idx, true);
1229     OpRegCopy(class_reg, ret_reg);  // Align usage with fast path
1230     LoadValueDirectFixed(rl_src, ref_reg);  // kArg0 <= ref
1231   } else if (use_declaring_class) {
1232     RegStorage r_method = LoadCurrMethodWithHint(TargetReg(kArg1, kRef));
1233     LoadValueDirectFixed(rl_src, ref_reg);  // kArg0 <= ref
1234     LoadRefDisp(r_method, ArtMethod::DeclaringClassOffset().Int32Value(),
1235                 class_reg, kNotVolatile);
1236   } else {
1237     if (can_assume_type_is_in_dex_cache) {
1238       // Conditionally, as in the other case we will also load it.
1239       LoadValueDirectFixed(rl_src, ref_reg);  // kArg0 <= ref
1240     }
1241 
1242     if (CanUseOpPcRelDexCacheArrayLoad()) {
1243       size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx);
1244       OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, class_reg, false);
1245     } else {
1246       RegStorage r_method = LoadCurrMethodWithHint(class_reg);
1247       // Load dex cache entry into class_reg (kArg2)
1248       LoadRefDisp(r_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
1249                   class_reg, kNotVolatile);
1250       int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
1251       LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile);
1252     }
1253     if (!can_assume_type_is_in_dex_cache) {
1254       GenIfNullUseHelperImm(class_reg, kQuickInitializeType, type_idx);
1255 
1256       // Should load value here.
1257       LoadValueDirectFixed(rl_src, ref_reg);  // kArg0 <= ref
1258     }
1259   }
1260   /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */
1261   RegLocation rl_result = GetReturn(kCoreReg);
1262   if (!IsSameReg(rl_result.reg, ref_reg)) {
1263     // On MIPS and x86_64 rArg0 != rl_result, place false in result if branch is taken.
1264     LoadConstant(rl_result.reg, 0);
1265   }
1266   LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, nullptr);
1267 
1268   /* load object->klass_ */
1269   RegStorage ref_class_reg = TargetReg(kArg1, kRef);  // kArg1 will hold the Class* of ref.
1270   DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
1271   LoadRefDisp(ref_reg, mirror::Object::ClassOffset().Int32Value(),
1272               ref_class_reg, kNotVolatile);
1273   /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
1274   LIR* branchover = nullptr;
1275   if (type_known_final) {
1276     // rl_result == ref == class.
1277     GenSelectConst32(ref_class_reg, class_reg, kCondEq, 1, 0, rl_result.reg,
1278                      kCoreReg);
1279   } else {
1280     if (cu_->instruction_set == kThumb2) {
1281       RegStorage r_tgt = LoadHelper(kQuickInstanceofNonTrivial);
1282       LIR* it = nullptr;
1283       if (!type_known_abstract) {
1284       /* Uses conditional nullification */
1285         OpRegReg(kOpCmp, ref_class_reg, class_reg);  // Same?
1286         it = OpIT(kCondEq, "EE");   // if-convert the test
1287         LoadConstant(rl_result.reg, 1);     // .eq case - load true
1288       }
1289       OpRegCopy(ref_reg, class_reg);    // .ne case - arg0 <= class
1290       OpReg(kOpBlx, r_tgt);    // .ne case: helper(class, ref->class)
1291       if (it != nullptr) {
1292         OpEndIT(it);
1293       }
1294       FreeTemp(r_tgt);
1295     } else {
1296       if (!type_known_abstract) {
1297         /* Uses branchovers */
1298         LoadConstant(rl_result.reg, 1);     // assume true
1299         branchover = OpCmpBranch(kCondEq, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), nullptr);
1300       }
1301 
1302       OpRegCopy(TargetReg(kArg0, kRef), class_reg);    // .ne case - arg0 <= class
1303       CallRuntimeHelper(kQuickInstanceofNonTrivial, false);
1304     }
1305   }
1306   // TODO: only clobber when type isn't final?
1307   ClobberCallerSave();
1308   /* branch targets here */
1309   LIR* target = NewLIR0(kPseudoTargetLabel);
1310   StoreValue(rl_dest, rl_result);
1311   branch1->target = target;
1312   if (branchover != nullptr) {
1313     branchover->target = target;
1314   }
1315 }
1316 
GenInstanceof(uint32_t type_idx,RegLocation rl_dest,RegLocation rl_src)1317 void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) {
1318   bool type_known_final, type_known_abstract, use_declaring_class;
1319   bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
1320                                                                               *cu_->dex_file,
1321                                                                               type_idx,
1322                                                                               &type_known_final,
1323                                                                               &type_known_abstract,
1324                                                                               &use_declaring_class);
1325   bool can_assume_type_is_in_dex_cache = !needs_access_check &&
1326       cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx);
1327 
1328   if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) {
1329     GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src);
1330   } else {
1331     GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract,
1332                                use_declaring_class, can_assume_type_is_in_dex_cache,
1333                                type_idx, rl_dest, rl_src);
1334   }
1335 }
1336 
GenCheckCast(int opt_flags,uint32_t insn_idx,uint32_t type_idx,RegLocation rl_src)1337 void Mir2Lir::GenCheckCast(int opt_flags, uint32_t insn_idx, uint32_t type_idx,
1338                            RegLocation rl_src) {
1339   if ((opt_flags & MIR_IGNORE_CHECK_CAST) != 0) {
1340     // Compiler analysis proved that this check-cast would never cause an exception.
1341     return;
1342   }
1343   bool type_known_final, type_known_abstract, use_declaring_class;
1344   bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
1345                                                                               *cu_->dex_file,
1346                                                                               type_idx,
1347                                                                               &type_known_final,
1348                                                                               &type_known_abstract,
1349                                                                               &use_declaring_class);
1350   // Note: currently type_known_final is unused, as optimizing will only improve the performance
1351   // of the exception throw path.
1352   DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit();
1353   if (!needs_access_check && cu_->compiler_driver->IsSafeCast(cu, insn_idx)) {
1354     // Verifier type analysis proved this check cast would never cause an exception.
1355     return;
1356   }
1357   FlushAllRegs();
1358   // May generate a call - use explicit registers
1359   LockCallTemps();
1360   RegStorage class_reg = TargetReg(kArg2, kRef);  // kArg2 will hold the Class*
1361   if (needs_access_check) {
1362     // Check we have access to type_idx and if not throw IllegalAccessError,
1363     // returns Class* in kRet0
1364     // InitializeTypeAndVerifyAccess(idx, method)
1365     CallRuntimeHelperImmMethod(kQuickInitializeTypeAndVerifyAccess, type_idx, true);
1366     OpRegCopy(class_reg, TargetReg(kRet0, kRef));  // Align usage with fast path
1367   } else if (use_declaring_class) {
1368     RegStorage method_reg = LoadCurrMethodWithHint(TargetReg(kArg1, kRef));
1369     LoadRefDisp(method_reg, ArtMethod::DeclaringClassOffset().Int32Value(),
1370                 class_reg, kNotVolatile);
1371   } else {
1372     // Load dex cache entry into class_reg (kArg2)
1373     if (CanUseOpPcRelDexCacheArrayLoad()) {
1374       size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx);
1375       OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, class_reg, false);
1376     } else {
1377       RegStorage r_method = LoadCurrMethodWithHint(class_reg);
1378 
1379       LoadRefDisp(r_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
1380                   class_reg, kNotVolatile);
1381       int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
1382       LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile);
1383     }
1384     if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) {
1385       // Need to test presence of type in dex cache at runtime
1386       GenIfNullUseHelperImm(class_reg, kQuickInitializeType, type_idx);
1387     }
1388   }
1389   // At this point, class_reg (kArg2) has class
1390   LoadValueDirectFixed(rl_src, TargetReg(kArg0, kRef));  // kArg0 <= ref
1391 
1392   // Slow path for the case where the classes are not equal.  In this case we need
1393   // to call a helper function to do the check.
1394   class SlowPath : public LIRSlowPath {
1395    public:
1396     SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, bool load)
1397         : LIRSlowPath(m2l, fromfast, cont), load_(load) {
1398     }
1399 
1400     void Compile() {
1401       GenerateTargetLabel();
1402 
1403       if (load_) {
1404         m2l_->LoadRefDisp(m2l_->TargetReg(kArg0, kRef), mirror::Object::ClassOffset().Int32Value(),
1405                           m2l_->TargetReg(kArg1, kRef), kNotVolatile);
1406       }
1407       m2l_->CallRuntimeHelperRegReg(kQuickCheckCast, m2l_->TargetReg(kArg2, kRef),
1408                                     m2l_->TargetReg(kArg1, kRef), true);
1409       m2l_->OpUnconditionalBranch(cont_);
1410     }
1411 
1412    private:
1413     const bool load_;
1414   };
1415 
1416   if (type_known_abstract) {
1417     // Easier case, run slow path if target is non-null (slow path will load from target)
1418     LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kArg0, kRef), 0, nullptr);
1419     LIR* cont = NewLIR0(kPseudoTargetLabel);
1420     AddSlowPath(new (arena_) SlowPath(this, branch, cont, true));
1421   } else {
1422     // Harder, more common case.  We need to generate a forward branch over the load
1423     // if the target is null.  If it's non-null we perform the load and branch to the
1424     // slow path if the classes are not equal.
1425 
1426     /* Null is OK - continue */
1427     LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0, kRef), 0, nullptr);
1428     /* load object->klass_ */
1429     DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
1430     LoadRefDisp(TargetReg(kArg0, kRef), mirror::Object::ClassOffset().Int32Value(),
1431                 TargetReg(kArg1, kRef), kNotVolatile);
1432 
1433     LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1, kRef), class_reg, nullptr);
1434     LIR* cont = NewLIR0(kPseudoTargetLabel);
1435 
1436     // Add the slow path that will not perform load since this is already done.
1437     AddSlowPath(new (arena_) SlowPath(this, branch2, cont, false));
1438 
1439     // Set the null check to branch to the continuation.
1440     branch1->target = cont;
1441   }
1442 }
1443 
GenLong3Addr(OpKind first_op,OpKind second_op,RegLocation rl_dest,RegLocation rl_src1,RegLocation rl_src2)1444 void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest,
1445                            RegLocation rl_src1, RegLocation rl_src2) {
1446   RegLocation rl_result;
1447   if (cu_->instruction_set == kThumb2) {
1448     /*
1449      * NOTE:  This is the one place in the code in which we might have
1450      * as many as six live temporary registers.  There are 5 in the normal
1451      * set for Arm.  Until we have spill capabilities, temporarily add
1452      * lr to the temp set.  It is safe to do this locally, but note that
1453      * lr is used explicitly elsewhere in the code generator and cannot
1454      * normally be used as a general temp register.
1455      */
1456     MarkTemp(TargetReg(kLr, kNotWide));   // Add lr to the temp pool
1457     FreeTemp(TargetReg(kLr, kNotWide));   // and make it available
1458   }
1459   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1460   rl_src2 = LoadValueWide(rl_src2, kCoreReg);
1461   rl_result = EvalLoc(rl_dest, kCoreReg, true);
1462   // The longs may overlap - use intermediate temp if so
1463   if ((rl_result.reg.GetLowReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg())) {
1464     RegStorage t_reg = AllocTemp();
1465     OpRegRegReg(first_op, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
1466     OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
1467     OpRegCopy(rl_result.reg.GetLow(), t_reg);
1468     FreeTemp(t_reg);
1469   } else {
1470     OpRegRegReg(first_op, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
1471     OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
1472   }
1473   /*
1474    * NOTE: If rl_dest refers to a frame variable in a large frame, the
1475    * following StoreValueWide might need to allocate a temp register.
1476    * To further work around the lack of a spill capability, explicitly
1477    * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result.
1478    * Remove when spill is functional.
1479    */
1480   FreeRegLocTemps(rl_result, rl_src1);
1481   FreeRegLocTemps(rl_result, rl_src2);
1482   StoreValueWide(rl_dest, rl_result);
1483   if (cu_->instruction_set == kThumb2) {
1484     Clobber(TargetReg(kLr, kNotWide));
1485     UnmarkTemp(TargetReg(kLr, kNotWide));  // Remove lr from the temp pool
1486   }
1487 }
1488 
GenShiftOpLong(Instruction::Code opcode,RegLocation rl_dest,RegLocation rl_src1,RegLocation rl_shift)1489 void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
1490                              RegLocation rl_src1, RegLocation rl_shift) {
1491   QuickEntrypointEnum target;
1492   switch (opcode) {
1493     case Instruction::SHL_LONG:
1494     case Instruction::SHL_LONG_2ADDR:
1495       target = kQuickShlLong;
1496       break;
1497     case Instruction::SHR_LONG:
1498     case Instruction::SHR_LONG_2ADDR:
1499       target = kQuickShrLong;
1500       break;
1501     case Instruction::USHR_LONG:
1502     case Instruction::USHR_LONG_2ADDR:
1503       target = kQuickUshrLong;
1504       break;
1505     default:
1506       LOG(FATAL) << "Unexpected case";
1507       target = kQuickShlLong;
1508   }
1509   FlushAllRegs();   /* Send everything to home location */
1510   CallRuntimeHelperRegLocationRegLocation(target, rl_src1, rl_shift, false);
1511   RegLocation rl_result = GetReturnWide(kCoreReg);
1512   StoreValueWide(rl_dest, rl_result);
1513 }
1514 
1515 
GenArithOpInt(Instruction::Code opcode,RegLocation rl_dest,RegLocation rl_src1,RegLocation rl_src2,int flags)1516 void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
1517                             RegLocation rl_src1, RegLocation rl_src2, int flags) {
1518   DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64);
1519   OpKind op = kOpBkpt;
1520   bool is_div_rem = false;
1521   bool check_zero = false;
1522   bool unary = false;
1523   RegLocation rl_result;
1524   bool shift_op = false;
1525   switch (opcode) {
1526     case Instruction::NEG_INT:
1527       op = kOpNeg;
1528       unary = true;
1529       break;
1530     case Instruction::NOT_INT:
1531       op = kOpMvn;
1532       unary = true;
1533       break;
1534     case Instruction::ADD_INT:
1535     case Instruction::ADD_INT_2ADDR:
1536       op = kOpAdd;
1537       break;
1538     case Instruction::SUB_INT:
1539     case Instruction::SUB_INT_2ADDR:
1540       op = kOpSub;
1541       break;
1542     case Instruction::MUL_INT:
1543     case Instruction::MUL_INT_2ADDR:
1544       op = kOpMul;
1545       break;
1546     case Instruction::DIV_INT:
1547     case Instruction::DIV_INT_2ADDR:
1548       check_zero = true;
1549       op = kOpDiv;
1550       is_div_rem = true;
1551       break;
1552     /* NOTE: returns in kArg1 */
1553     case Instruction::REM_INT:
1554     case Instruction::REM_INT_2ADDR:
1555       check_zero = true;
1556       op = kOpRem;
1557       is_div_rem = true;
1558       break;
1559     case Instruction::AND_INT:
1560     case Instruction::AND_INT_2ADDR:
1561       op = kOpAnd;
1562       break;
1563     case Instruction::OR_INT:
1564     case Instruction::OR_INT_2ADDR:
1565       op = kOpOr;
1566       break;
1567     case Instruction::XOR_INT:
1568     case Instruction::XOR_INT_2ADDR:
1569       op = kOpXor;
1570       break;
1571     case Instruction::SHL_INT:
1572     case Instruction::SHL_INT_2ADDR:
1573       shift_op = true;
1574       op = kOpLsl;
1575       break;
1576     case Instruction::SHR_INT:
1577     case Instruction::SHR_INT_2ADDR:
1578       shift_op = true;
1579       op = kOpAsr;
1580       break;
1581     case Instruction::USHR_INT:
1582     case Instruction::USHR_INT_2ADDR:
1583       shift_op = true;
1584       op = kOpLsr;
1585       break;
1586     default:
1587       LOG(FATAL) << "Invalid word arith op: " << opcode;
1588   }
1589   if (!is_div_rem) {
1590     if (unary) {
1591       rl_src1 = LoadValue(rl_src1, kCoreReg);
1592       rl_result = EvalLoc(rl_dest, kCoreReg, true);
1593       OpRegReg(op, rl_result.reg, rl_src1.reg);
1594     } else {
1595       if ((shift_op) && (cu_->instruction_set != kArm64)) {
1596         rl_src2 = LoadValue(rl_src2, kCoreReg);
1597         RegStorage t_reg = AllocTemp();
1598         OpRegRegImm(kOpAnd, t_reg, rl_src2.reg, 31);
1599         rl_src1 = LoadValue(rl_src1, kCoreReg);
1600         rl_result = EvalLoc(rl_dest, kCoreReg, true);
1601         OpRegRegReg(op, rl_result.reg, rl_src1.reg, t_reg);
1602         FreeTemp(t_reg);
1603       } else {
1604         rl_src1 = LoadValue(rl_src1, kCoreReg);
1605         rl_src2 = LoadValue(rl_src2, kCoreReg);
1606         rl_result = EvalLoc(rl_dest, kCoreReg, true);
1607         OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg);
1608       }
1609     }
1610     StoreValue(rl_dest, rl_result);
1611   } else {
1612     bool done = false;      // Set to true if we happen to find a way to use a real instruction.
1613     if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64 ||
1614         cu_->instruction_set == kArm64) {
1615       rl_src1 = LoadValue(rl_src1, kCoreReg);
1616       rl_src2 = LoadValue(rl_src2, kCoreReg);
1617       if (check_zero && (flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
1618         GenDivZeroCheck(rl_src2.reg);
1619       }
1620       rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv);
1621       done = true;
1622     } else if (cu_->instruction_set == kThumb2) {
1623       if (cu_->compiler_driver->GetInstructionSetFeatures()->AsArmInstructionSetFeatures()->
1624               HasDivideInstruction()) {
1625         // Use ARM SDIV instruction for division.  For remainder we also need to
1626         // calculate using a MUL and subtract.
1627         rl_src1 = LoadValue(rl_src1, kCoreReg);
1628         rl_src2 = LoadValue(rl_src2, kCoreReg);
1629         if (check_zero && (flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
1630           GenDivZeroCheck(rl_src2.reg);
1631         }
1632         rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv);
1633         done = true;
1634       }
1635     }
1636 
1637     // If we haven't already generated the code use the callout function.
1638     if (!done) {
1639       FlushAllRegs();   /* Send everything to home location */
1640       LoadValueDirectFixed(rl_src2, TargetReg(kArg1, kNotWide));
1641       RegStorage r_tgt = CallHelperSetup(kQuickIdivmod);
1642       LoadValueDirectFixed(rl_src1, TargetReg(kArg0, kNotWide));
1643       if (check_zero && (flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
1644         GenDivZeroCheck(TargetReg(kArg1, kNotWide));
1645       }
1646       // NOTE: callout here is not a safepoint.
1647       CallHelper(r_tgt, kQuickIdivmod, false /* not a safepoint */);
1648       if (op == kOpDiv)
1649         rl_result = GetReturn(kCoreReg);
1650       else
1651         rl_result = GetReturnAlt();
1652     }
1653     StoreValue(rl_dest, rl_result);
1654   }
1655 }
1656 
1657 /*
1658  * The following are the first-level codegen routines that analyze the format
1659  * of each bytecode then either dispatch special purpose codegen routines
1660  * or produce corresponding Thumb instructions directly.
1661  */
1662 
1663 // Returns true if no more than two bits are set in 'x'.
IsPopCountLE2(unsigned int x)1664 static bool IsPopCountLE2(unsigned int x) {
1665   x &= x - 1;
1666   return (x & (x - 1)) == 0;
1667 }
1668 
1669 // Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
1670 // and store the result in 'rl_dest'.
HandleEasyDivRem(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED,bool is_div,RegLocation rl_src,RegLocation rl_dest,int lit)1671 bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED, bool is_div,
1672                                RegLocation rl_src, RegLocation rl_dest, int lit) {
1673   if ((lit < 2) || (!IsPowerOfTwo(lit))) {
1674     return false;
1675   }
1676   int k = CTZ(lit);
1677   if (k >= 30) {
1678     // Avoid special cases.
1679     return false;
1680   }
1681   rl_src = LoadValue(rl_src, kCoreReg);
1682   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1683   if (is_div) {
1684     RegStorage t_reg = AllocTemp();
1685     if (lit == 2) {
1686       // Division by 2 is by far the most common division by constant.
1687       OpRegRegImm(kOpLsr, t_reg, rl_src.reg, 32 - k);
1688       OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg);
1689       OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
1690     } else {
1691       OpRegRegImm(kOpAsr, t_reg, rl_src.reg, 31);
1692       OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k);
1693       OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg);
1694       OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
1695     }
1696   } else {
1697     RegStorage t_reg1 = AllocTemp();
1698     RegStorage t_reg2 = AllocTemp();
1699     if (lit == 2) {
1700       OpRegRegImm(kOpLsr, t_reg1, rl_src.reg, 32 - k);
1701       OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg);
1702       OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1);
1703       OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1);
1704     } else {
1705       OpRegRegImm(kOpAsr, t_reg1, rl_src.reg, 31);
1706       OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k);
1707       OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg);
1708       OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1);
1709       OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1);
1710     }
1711   }
1712   StoreValue(rl_dest, rl_result);
1713   return true;
1714 }
1715 
1716 // Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit'
1717 // and store the result in 'rl_dest'.
HandleEasyMultiply(RegLocation rl_src,RegLocation rl_dest,int lit)1718 bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
1719   if (lit < 0) {
1720     return false;
1721   }
1722   if (lit == 0) {
1723     RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1724     LoadConstant(rl_result.reg, 0);
1725     StoreValue(rl_dest, rl_result);
1726     return true;
1727   }
1728   if (lit == 1) {
1729     rl_src = LoadValue(rl_src, kCoreReg);
1730     RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1731     OpRegCopy(rl_result.reg, rl_src.reg);
1732     StoreValue(rl_dest, rl_result);
1733     return true;
1734   }
1735   // There is RegRegRegShift on Arm, so check for more special cases
1736   if (cu_->instruction_set == kThumb2) {
1737     return EasyMultiply(rl_src, rl_dest, lit);
1738   }
1739   // Can we simplify this multiplication?
1740   bool power_of_two = false;
1741   bool pop_count_le2 = false;
1742   bool power_of_two_minus_one = false;
1743   if (IsPowerOfTwo(lit)) {
1744     power_of_two = true;
1745   } else if (IsPopCountLE2(lit)) {
1746     pop_count_le2 = true;
1747   } else if (IsPowerOfTwo(lit + 1)) {
1748     power_of_two_minus_one = true;
1749   } else {
1750     return false;
1751   }
1752   rl_src = LoadValue(rl_src, kCoreReg);
1753   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1754   if (power_of_two) {
1755     // Shift.
1756     OpRegRegImm(kOpLsl, rl_result.reg, rl_src.reg, CTZ(lit));
1757   } else if (pop_count_le2) {
1758     // Shift and add and shift.
1759     int first_bit = CTZ(lit);
1760     int second_bit = CTZ(lit ^ (1 << first_bit));
1761     GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit);
1762   } else {
1763     // Reverse subtract: (src << (shift + 1)) - src.
1764     DCHECK(power_of_two_minus_one);
1765     // TUNING: rsb dst, src, src lsl#CTZ(lit + 1)
1766     RegStorage t_reg = AllocTemp();
1767     OpRegRegImm(kOpLsl, t_reg, rl_src.reg, CTZ(lit + 1));
1768     OpRegRegReg(kOpSub, rl_result.reg, t_reg, rl_src.reg);
1769   }
1770   StoreValue(rl_dest, rl_result);
1771   return true;
1772 }
1773 
1774 // Returns true if it generates instructions.
HandleEasyFloatingPointDiv(RegLocation rl_dest,RegLocation rl_src1,RegLocation rl_src2)1775 bool Mir2Lir::HandleEasyFloatingPointDiv(RegLocation rl_dest, RegLocation rl_src1,
1776                                          RegLocation rl_src2) {
1777   if (!rl_src2.is_const ||
1778       ((cu_->instruction_set != kThumb2) && (cu_->instruction_set != kArm64))) {
1779     return false;
1780   }
1781 
1782   if (!rl_src2.wide) {
1783     int32_t divisor = mir_graph_->ConstantValue(rl_src2);
1784     if (CanDivideByReciprocalMultiplyFloat(divisor)) {
1785       // Generate multiply by reciprocal instead of div.
1786       float recip = 1.0f/bit_cast<float, int32_t>(divisor);
1787       GenMultiplyByConstantFloat(rl_dest, rl_src1, bit_cast<int32_t, float>(recip));
1788       return true;
1789     }
1790   } else {
1791     int64_t divisor = mir_graph_->ConstantValueWide(rl_src2);
1792     if (CanDivideByReciprocalMultiplyDouble(divisor)) {
1793       // Generate multiply by reciprocal instead of div.
1794       double recip = 1.0/bit_cast<double, int64_t>(divisor);
1795       GenMultiplyByConstantDouble(rl_dest, rl_src1, bit_cast<int64_t, double>(recip));
1796       return true;
1797     }
1798   }
1799   return false;
1800 }
1801 
GenArithOpIntLit(Instruction::Code opcode,RegLocation rl_dest,RegLocation rl_src,int lit)1802 void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src,
1803                                int lit) {
1804   RegLocation rl_result;
1805   OpKind op = static_cast<OpKind>(0);    /* Make gcc happy */
1806   int shift_op = false;
1807   bool is_div = false;
1808 
1809   switch (opcode) {
1810     case Instruction::RSUB_INT_LIT8:
1811     case Instruction::RSUB_INT: {
1812       rl_src = LoadValue(rl_src, kCoreReg);
1813       rl_result = EvalLoc(rl_dest, kCoreReg, true);
1814       if (cu_->instruction_set == kThumb2) {
1815         OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, lit);
1816       } else {
1817         OpRegReg(kOpNeg, rl_result.reg, rl_src.reg);
1818         OpRegImm(kOpAdd, rl_result.reg, lit);
1819       }
1820       StoreValue(rl_dest, rl_result);
1821       return;
1822     }
1823 
1824     case Instruction::SUB_INT:
1825     case Instruction::SUB_INT_2ADDR:
1826       lit = -lit;
1827       FALLTHROUGH_INTENDED;
1828     case Instruction::ADD_INT:
1829     case Instruction::ADD_INT_2ADDR:
1830     case Instruction::ADD_INT_LIT8:
1831     case Instruction::ADD_INT_LIT16:
1832       op = kOpAdd;
1833       break;
1834     case Instruction::MUL_INT:
1835     case Instruction::MUL_INT_2ADDR:
1836     case Instruction::MUL_INT_LIT8:
1837     case Instruction::MUL_INT_LIT16: {
1838       if (HandleEasyMultiply(rl_src, rl_dest, lit)) {
1839         return;
1840       }
1841       op = kOpMul;
1842       break;
1843     }
1844     case Instruction::AND_INT:
1845     case Instruction::AND_INT_2ADDR:
1846     case Instruction::AND_INT_LIT8:
1847     case Instruction::AND_INT_LIT16:
1848       op = kOpAnd;
1849       break;
1850     case Instruction::OR_INT:
1851     case Instruction::OR_INT_2ADDR:
1852     case Instruction::OR_INT_LIT8:
1853     case Instruction::OR_INT_LIT16:
1854       op = kOpOr;
1855       break;
1856     case Instruction::XOR_INT:
1857     case Instruction::XOR_INT_2ADDR:
1858     case Instruction::XOR_INT_LIT8:
1859     case Instruction::XOR_INT_LIT16:
1860       op = kOpXor;
1861       break;
1862     case Instruction::SHL_INT_LIT8:
1863     case Instruction::SHL_INT:
1864     case Instruction::SHL_INT_2ADDR:
1865       lit &= 31;
1866       shift_op = true;
1867       op = kOpLsl;
1868       break;
1869     case Instruction::SHR_INT_LIT8:
1870     case Instruction::SHR_INT:
1871     case Instruction::SHR_INT_2ADDR:
1872       lit &= 31;
1873       shift_op = true;
1874       op = kOpAsr;
1875       break;
1876     case Instruction::USHR_INT_LIT8:
1877     case Instruction::USHR_INT:
1878     case Instruction::USHR_INT_2ADDR:
1879       lit &= 31;
1880       shift_op = true;
1881       op = kOpLsr;
1882       break;
1883 
1884     case Instruction::DIV_INT:
1885     case Instruction::DIV_INT_2ADDR:
1886     case Instruction::DIV_INT_LIT8:
1887     case Instruction::DIV_INT_LIT16:
1888     case Instruction::REM_INT:
1889     case Instruction::REM_INT_2ADDR:
1890     case Instruction::REM_INT_LIT8:
1891     case Instruction::REM_INT_LIT16: {
1892       if (lit == 0) {
1893         GenDivZeroException();
1894         return;
1895       }
1896       if ((opcode == Instruction::DIV_INT) ||
1897           (opcode == Instruction::DIV_INT_2ADDR) ||
1898           (opcode == Instruction::DIV_INT_LIT8) ||
1899           (opcode == Instruction::DIV_INT_LIT16)) {
1900         is_div = true;
1901       } else {
1902         is_div = false;
1903       }
1904       if (HandleEasyDivRem(opcode, is_div, rl_src, rl_dest, lit)) {
1905         return;
1906       }
1907 
1908       bool done = false;
1909       if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64 ||
1910           cu_->instruction_set == kArm64) {
1911         rl_src = LoadValue(rl_src, kCoreReg);
1912         rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div);
1913         done = true;
1914       } else if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
1915         rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div);
1916         done = true;
1917       } else if (cu_->instruction_set == kThumb2) {
1918         if (cu_->compiler_driver->GetInstructionSetFeatures()->AsArmInstructionSetFeatures()->
1919                 HasDivideInstruction()) {
1920           // Use ARM SDIV instruction for division.  For remainder we also need to
1921           // calculate using a MUL and subtract.
1922           rl_src = LoadValue(rl_src, kCoreReg);
1923           rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div);
1924           done = true;
1925         }
1926       }
1927 
1928       if (!done) {
1929         FlushAllRegs();   /* Everything to home location. */
1930         LoadValueDirectFixed(rl_src, TargetReg(kArg0, kNotWide));
1931         Clobber(TargetReg(kArg0, kNotWide));
1932         CallRuntimeHelperRegImm(kQuickIdivmod, TargetReg(kArg0, kNotWide), lit, false);
1933         if (is_div)
1934           rl_result = GetReturn(kCoreReg);
1935         else
1936           rl_result = GetReturnAlt();
1937       }
1938       StoreValue(rl_dest, rl_result);
1939       return;
1940     }
1941     default:
1942       LOG(FATAL) << "Unexpected opcode " << opcode;
1943   }
1944   rl_src = LoadValue(rl_src, kCoreReg);
1945   rl_result = EvalLoc(rl_dest, kCoreReg, true);
1946   // Avoid shifts by literal 0 - no support in Thumb.  Change to copy.
1947   if (shift_op && (lit == 0)) {
1948     OpRegCopy(rl_result.reg, rl_src.reg);
1949   } else {
1950     OpRegRegImm(op, rl_result.reg, rl_src.reg, lit);
1951   }
1952   StoreValue(rl_dest, rl_result);
1953 }
1954 
GenArithOpLong(Instruction::Code opcode,RegLocation rl_dest,RegLocation rl_src1,RegLocation rl_src2,int flags)1955 void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
1956                              RegLocation rl_src1, RegLocation rl_src2, int flags) {
1957   RegLocation rl_result;
1958   OpKind first_op = kOpBkpt;
1959   OpKind second_op = kOpBkpt;
1960   bool call_out = false;
1961   bool check_zero = false;
1962   int ret_reg = TargetReg(kRet0, kNotWide).GetReg();
1963   QuickEntrypointEnum target;
1964 
1965   switch (opcode) {
1966     case Instruction::NOT_LONG:
1967       rl_src2 = LoadValueWide(rl_src2, kCoreReg);
1968       rl_result = EvalLoc(rl_dest, kCoreReg, true);
1969       // Check for destructive overlap
1970       if (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg()) {
1971         RegStorage t_reg = AllocTemp();
1972         OpRegCopy(t_reg, rl_src2.reg.GetHigh());
1973         OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
1974         OpRegReg(kOpMvn, rl_result.reg.GetHigh(), t_reg);
1975         FreeTemp(t_reg);
1976       } else {
1977         OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
1978         OpRegReg(kOpMvn, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh());
1979       }
1980       StoreValueWide(rl_dest, rl_result);
1981       return;
1982     case Instruction::ADD_LONG:
1983     case Instruction::ADD_LONG_2ADDR:
1984       first_op = kOpAdd;
1985       second_op = kOpAdc;
1986       break;
1987     case Instruction::SUB_LONG:
1988     case Instruction::SUB_LONG_2ADDR:
1989       first_op = kOpSub;
1990       second_op = kOpSbc;
1991       break;
1992     case Instruction::MUL_LONG:
1993     case Instruction::MUL_LONG_2ADDR:
1994       call_out = true;
1995       ret_reg = TargetReg(kRet0, kNotWide).GetReg();
1996       target = kQuickLmul;
1997       break;
1998     case Instruction::DIV_LONG:
1999     case Instruction::DIV_LONG_2ADDR:
2000       call_out = true;
2001       check_zero = true;
2002       ret_reg = TargetReg(kRet0, kNotWide).GetReg();
2003       target = kQuickLdiv;
2004       break;
2005     case Instruction::REM_LONG:
2006     case Instruction::REM_LONG_2ADDR:
2007       call_out = true;
2008       check_zero = true;
2009       target = kQuickLmod;
2010       /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */
2011       ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2, kNotWide).GetReg() :
2012           TargetReg(kRet0, kNotWide).GetReg();
2013       break;
2014     case Instruction::AND_LONG_2ADDR:
2015     case Instruction::AND_LONG:
2016       first_op = kOpAnd;
2017       second_op = kOpAnd;
2018       break;
2019     case Instruction::OR_LONG:
2020     case Instruction::OR_LONG_2ADDR:
2021       first_op = kOpOr;
2022       second_op = kOpOr;
2023       break;
2024     case Instruction::XOR_LONG:
2025     case Instruction::XOR_LONG_2ADDR:
2026       first_op = kOpXor;
2027       second_op = kOpXor;
2028       break;
2029     default:
2030       LOG(FATAL) << "Invalid long arith op";
2031   }
2032   if (!call_out) {
2033     GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2);
2034   } else {
2035     FlushAllRegs();   /* Send everything to home location */
2036     if (check_zero) {
2037       RegStorage r_tmp1 = TargetReg(kArg0, kWide);
2038       RegStorage r_tmp2 = TargetReg(kArg2, kWide);
2039       LoadValueDirectWideFixed(rl_src2, r_tmp2);
2040       RegStorage r_tgt = CallHelperSetup(target);
2041       if ((flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
2042         GenDivZeroCheckWide(r_tmp2);
2043       }
2044       LoadValueDirectWideFixed(rl_src1, r_tmp1);
2045       // NOTE: callout here is not a safepoint
2046       CallHelper(r_tgt, target, false /* not safepoint */);
2047     } else {
2048       CallRuntimeHelperRegLocationRegLocation(target, rl_src1, rl_src2, false);
2049     }
2050     // Adjust return regs in to handle case of rem returning kArg2/kArg3
2051     if (ret_reg == TargetReg(kRet0, kNotWide).GetReg())
2052       rl_result = GetReturnWide(kCoreReg);
2053     else
2054       rl_result = GetReturnWideAlt();
2055     StoreValueWide(rl_dest, rl_result);
2056   }
2057 }
2058 
GenConst(RegLocation rl_dest,int value)2059 void Mir2Lir::GenConst(RegLocation rl_dest, int value) {
2060   RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
2061   LoadConstantNoClobber(rl_result.reg, value);
2062   StoreValue(rl_dest, rl_result);
2063 }
2064 
GenConversionCall(QuickEntrypointEnum trampoline,RegLocation rl_dest,RegLocation rl_src,RegisterClass return_reg_class)2065 void Mir2Lir::GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest,
2066                                 RegLocation rl_src, RegisterClass return_reg_class) {
2067   /*
2068    * Don't optimize the register usage since it calls out to support
2069    * functions
2070    */
2071 
2072   FlushAllRegs();   /* Send everything to home location */
2073   CallRuntimeHelperRegLocation(trampoline, rl_src, false);
2074   if (rl_dest.wide) {
2075     RegLocation rl_result = GetReturnWide(return_reg_class);
2076     StoreValueWide(rl_dest, rl_result);
2077   } else {
2078     RegLocation rl_result = GetReturn(return_reg_class);
2079     StoreValue(rl_dest, rl_result);
2080   }
2081 }
2082 
2083 class Mir2Lir::SuspendCheckSlowPath : public Mir2Lir::LIRSlowPath {
2084  public:
SuspendCheckSlowPath(Mir2Lir * m2l,LIR * branch,LIR * cont)2085   SuspendCheckSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont)
2086       : LIRSlowPath(m2l, branch, cont) {
2087   }
2088 
Compile()2089   void Compile() OVERRIDE {
2090     m2l_->ResetRegPool();
2091     m2l_->ResetDefTracking();
2092     GenerateTargetLabel(kPseudoSuspendTarget);
2093     m2l_->CallRuntimeHelper(kQuickTestSuspend, true);
2094     if (cont_ != nullptr) {
2095       m2l_->OpUnconditionalBranch(cont_);
2096     }
2097   }
2098 };
2099 
2100 /* Check if we need to check for pending suspend request */
GenSuspendTest(int opt_flags)2101 void Mir2Lir::GenSuspendTest(int opt_flags) {
2102   if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK) != 0) {
2103     return;
2104   }
2105   if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitSuspendChecks()) {
2106     FlushAllRegs();
2107     LIR* branch = OpTestSuspend(nullptr);
2108     LIR* cont = NewLIR0(kPseudoTargetLabel);
2109     AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, cont));
2110   } else {
2111     FlushAllRegs();     // TODO: needed?
2112     LIR* inst = CheckSuspendUsingLoad();
2113     MarkSafepointPC(inst);
2114   }
2115 }
2116 
2117 /* Check if we need to check for pending suspend request */
GenSuspendTestAndBranch(int opt_flags,LIR * target)2118 void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) {
2119   if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK) != 0) {
2120     OpUnconditionalBranch(target);
2121     return;
2122   }
2123   if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitSuspendChecks()) {
2124     OpTestSuspend(target);
2125     FlushAllRegs();
2126     LIR* branch = OpUnconditionalBranch(nullptr);
2127     AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, target));
2128   } else {
2129     // For the implicit suspend check, just perform the trigger
2130     // load and branch to the target.
2131     FlushAllRegs();
2132     LIR* inst = CheckSuspendUsingLoad();
2133     MarkSafepointPC(inst);
2134     OpUnconditionalBranch(target);
2135   }
2136 }
2137 
2138 /* Call out to helper assembly routine that will null check obj and then lock it. */
GenMonitorEnter(int opt_flags,RegLocation rl_src)2139 void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
2140   UNUSED(opt_flags);  // TODO: avoid null check with specialized non-null helper.
2141   FlushAllRegs();
2142   CallRuntimeHelperRegLocation(kQuickLockObject, rl_src, true);
2143 }
2144 
2145 /* Call out to helper assembly routine that will null check obj and then unlock it. */
GenMonitorExit(int opt_flags,RegLocation rl_src)2146 void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
2147   UNUSED(opt_flags);  // TODO: avoid null check with specialized non-null helper.
2148   FlushAllRegs();
2149   CallRuntimeHelperRegLocation(kQuickUnlockObject, rl_src, true);
2150 }
2151 
2152 /* Generic code for generating a wide constant into a VR. */
GenConstWide(RegLocation rl_dest,int64_t value)2153 void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
2154   RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
2155   LoadConstantWide(rl_result.reg, value);
2156   StoreValueWide(rl_dest, rl_result);
2157 }
2158 
GenSmallPackedSwitch(MIR * mir,DexOffset table_offset,RegLocation rl_src)2159 void Mir2Lir::GenSmallPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
2160   BasicBlock* bb = mir_graph_->GetBasicBlock(mir->bb);
2161   DCHECK(bb != nullptr);
2162   ArenaVector<SuccessorBlockInfo*>::const_iterator succ_bb_iter = bb->successor_blocks.cbegin();
2163   const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
2164   const uint16_t entries = table[1];
2165   // Chained cmp-and-branch.
2166   const int32_t* as_int32 = reinterpret_cast<const int32_t*>(&table[2]);
2167   int32_t starting_key = as_int32[0];
2168   rl_src = LoadValue(rl_src, kCoreReg);
2169   int i = 0;
2170   for (; i < entries; ++i, ++succ_bb_iter) {
2171     if (!InexpensiveConstantInt(starting_key + i, Instruction::Code::IF_EQ)) {
2172       // Switch to using a temp and add.
2173       break;
2174     }
2175     SuccessorBlockInfo* successor_block_info = *succ_bb_iter;
2176     DCHECK(successor_block_info != nullptr);
2177     int case_block_id = successor_block_info->block;
2178     DCHECK_EQ(starting_key + i, successor_block_info->key);
2179     OpCmpImmBranch(kCondEq, rl_src.reg, starting_key + i, &block_label_list_[case_block_id]);
2180   }
2181   if (i < entries) {
2182     // The rest do not seem to be inexpensive. Try to allocate a temp and use add.
2183     RegStorage key_temp = AllocTypedTemp(false, kCoreReg, false);
2184     if (key_temp.Valid()) {
2185       LoadConstantNoClobber(key_temp, starting_key + i);
2186       for (; i < entries - 1; ++i, ++succ_bb_iter) {
2187         SuccessorBlockInfo* successor_block_info = *succ_bb_iter;
2188         DCHECK(successor_block_info != nullptr);
2189         int case_block_id = successor_block_info->block;
2190         DCHECK_EQ(starting_key + i, successor_block_info->key);
2191         OpCmpBranch(kCondEq, rl_src.reg, key_temp, &block_label_list_[case_block_id]);
2192         OpRegImm(kOpAdd, key_temp, 1);  // Increment key.
2193       }
2194       SuccessorBlockInfo* successor_block_info = *succ_bb_iter;
2195       DCHECK(successor_block_info != nullptr);
2196       int case_block_id = successor_block_info->block;
2197       DCHECK_EQ(starting_key + i, successor_block_info->key);
2198       OpCmpBranch(kCondEq, rl_src.reg, key_temp, &block_label_list_[case_block_id]);
2199     } else {
2200       // No free temp, just finish the old loop.
2201       for (; i < entries; ++i, ++succ_bb_iter) {
2202         SuccessorBlockInfo* successor_block_info = *succ_bb_iter;
2203         DCHECK(successor_block_info != nullptr);
2204         int case_block_id = successor_block_info->block;
2205         DCHECK_EQ(starting_key + i, successor_block_info->key);
2206         OpCmpImmBranch(kCondEq, rl_src.reg, starting_key + i, &block_label_list_[case_block_id]);
2207       }
2208     }
2209   }
2210 }
2211 
GenPackedSwitch(MIR * mir,DexOffset table_offset,RegLocation rl_src)2212 void Mir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
2213   const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
2214   if (cu_->verbose) {
2215     DumpPackedSwitchTable(table);
2216   }
2217 
2218   const uint16_t entries = table[1];
2219   if (entries <= kSmallSwitchThreshold) {
2220     GenSmallPackedSwitch(mir, table_offset, rl_src);
2221   } else {
2222     // Use the backend-specific implementation.
2223     GenLargePackedSwitch(mir, table_offset, rl_src);
2224   }
2225 }
2226 
GenSmallSparseSwitch(MIR * mir,DexOffset table_offset,RegLocation rl_src)2227 void Mir2Lir::GenSmallSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
2228   BasicBlock* bb = mir_graph_->GetBasicBlock(mir->bb);
2229   DCHECK(bb != nullptr);
2230   const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
2231   const uint16_t entries = table[1];
2232   // Chained cmp-and-branch.
2233   rl_src = LoadValue(rl_src, kCoreReg);
2234   int i = 0;
2235   for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) {
2236     int case_block_id = successor_block_info->block;
2237     int key = successor_block_info->key;
2238     OpCmpImmBranch(kCondEq, rl_src.reg, key, &block_label_list_[case_block_id]);
2239     i++;
2240   }
2241   DCHECK_EQ(i, entries);
2242 }
2243 
GenSparseSwitch(MIR * mir,DexOffset table_offset,RegLocation rl_src)2244 void Mir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
2245   const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
2246   if (cu_->verbose) {
2247     DumpSparseSwitchTable(table);
2248   }
2249 
2250   const uint16_t entries = table[1];
2251   if (entries <= kSmallSwitchThreshold) {
2252     GenSmallSparseSwitch(mir, table_offset, rl_src);
2253   } else {
2254     // Use the backend-specific implementation.
2255     GenLargeSparseSwitch(mir, table_offset, rl_src);
2256   }
2257 }
2258 
SizeMatchesTypeForEntrypoint(OpSize size,Primitive::Type type)2259 bool Mir2Lir::SizeMatchesTypeForEntrypoint(OpSize size, Primitive::Type type) {
2260   switch (size) {
2261     case kReference:
2262       return type == Primitive::kPrimNot;
2263     case k64:
2264     case kDouble:
2265       return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
2266     case k32:
2267     case kSingle:
2268       return type == Primitive::kPrimInt || type == Primitive::kPrimFloat;
2269     case kSignedHalf:
2270       return type == Primitive::kPrimShort;
2271     case kUnsignedHalf:
2272       return type == Primitive::kPrimChar;
2273     case kSignedByte:
2274       return type == Primitive::kPrimByte;
2275     case kUnsignedByte:
2276       return type == Primitive::kPrimBoolean;
2277     case kWord:  // Intentional fallthrough.
2278     default:
2279       return false;  // There are no sane types with this op size.
2280   }
2281 }
2282 
2283 }  // namespace art
2284