1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "dex/compiler_internals.h"
18 #include "dex/dataflow_iterator-inl.h"
19 #include "dex/quick/dex_file_method_inliner.h"
20 #include "mir_to_lir-inl.h"
21 #include "thread-inl.h"
22
23 namespace art {
24
ShortyToRegClass(char shorty_type)25 RegisterClass Mir2Lir::ShortyToRegClass(char shorty_type) {
26 RegisterClass res;
27 switch (shorty_type) {
28 case 'L':
29 res = kRefReg;
30 break;
31 case 'F':
32 // Expected fallthrough.
33 case 'D':
34 res = kFPReg;
35 break;
36 default:
37 res = kCoreReg;
38 }
39 return res;
40 }
41
LocToRegClass(RegLocation loc)42 RegisterClass Mir2Lir::LocToRegClass(RegLocation loc) {
43 RegisterClass res;
44 if (loc.fp) {
45 DCHECK(!loc.ref) << "At most, one of ref/fp may be set";
46 res = kFPReg;
47 } else if (loc.ref) {
48 res = kRefReg;
49 } else {
50 res = kCoreReg;
51 }
52 return res;
53 }
54
LockArg(int in_position,bool wide)55 void Mir2Lir::LockArg(int in_position, bool wide) {
56 RegStorage reg_arg_low = GetArgMappingToPhysicalReg(in_position);
57 RegStorage reg_arg_high = wide ? GetArgMappingToPhysicalReg(in_position + 1) :
58 RegStorage::InvalidReg();
59
60 if (reg_arg_low.Valid()) {
61 LockTemp(reg_arg_low);
62 }
63 if (reg_arg_high.Valid() && reg_arg_low.NotExactlyEquals(reg_arg_high)) {
64 LockTemp(reg_arg_high);
65 }
66 }
67
68 // TODO: simplify when 32-bit targets go hard-float.
LoadArg(int in_position,RegisterClass reg_class,bool wide)69 RegStorage Mir2Lir::LoadArg(int in_position, RegisterClass reg_class, bool wide) {
70 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
71 int offset = StackVisitor::GetOutVROffset(in_position, cu_->instruction_set);
72
73 if (cu_->instruction_set == kX86) {
74 /*
75 * When doing a call for x86, it moves the stack pointer in order to push return.
76 * Thus, we add another 4 bytes to figure out the out of caller (in of callee).
77 */
78 offset += sizeof(uint32_t);
79 }
80
81 if (cu_->instruction_set == kX86_64) {
82 /*
83 * When doing a call for x86, it moves the stack pointer in order to push return.
84 * Thus, we add another 8 bytes to figure out the out of caller (in of callee).
85 */
86 offset += sizeof(uint64_t);
87 }
88
89 if (cu_->target64) {
90 RegStorage reg_arg = GetArgMappingToPhysicalReg(in_position);
91 if (!reg_arg.Valid()) {
92 RegStorage new_reg =
93 wide ? AllocTypedTempWide(false, reg_class) : AllocTypedTemp(false, reg_class);
94 LoadBaseDisp(TargetPtrReg(kSp), offset, new_reg, wide ? k64 : k32, kNotVolatile);
95 return new_reg;
96 } else {
97 // Check if we need to copy the arg to a different reg_class.
98 if (!RegClassMatches(reg_class, reg_arg)) {
99 if (wide) {
100 RegStorage new_reg = AllocTypedTempWide(false, reg_class);
101 OpRegCopyWide(new_reg, reg_arg);
102 reg_arg = new_reg;
103 } else {
104 RegStorage new_reg = AllocTypedTemp(false, reg_class);
105 OpRegCopy(new_reg, reg_arg);
106 reg_arg = new_reg;
107 }
108 }
109 }
110 return reg_arg;
111 }
112
113 RegStorage reg_arg_low = GetArgMappingToPhysicalReg(in_position);
114 RegStorage reg_arg_high = wide ? GetArgMappingToPhysicalReg(in_position + 1) :
115 RegStorage::InvalidReg();
116
117 // If the VR is wide and there is no register for high part, we need to load it.
118 if (wide && !reg_arg_high.Valid()) {
119 // If the low part is not in a reg, we allocate a pair. Otherwise, we just load to high reg.
120 if (!reg_arg_low.Valid()) {
121 RegStorage new_regs = AllocTypedTempWide(false, reg_class);
122 LoadBaseDisp(TargetPtrReg(kSp), offset, new_regs, k64, kNotVolatile);
123 return new_regs; // The reg_class is OK, we can return.
124 } else {
125 // Assume that no ABI allows splitting a wide fp reg between a narrow fp reg and memory,
126 // i.e. the low part is in a core reg. Load the second part in a core reg as well for now.
127 DCHECK(!reg_arg_low.IsFloat());
128 reg_arg_high = AllocTemp();
129 int offset_high = offset + sizeof(uint32_t);
130 Load32Disp(TargetPtrReg(kSp), offset_high, reg_arg_high);
131 // Continue below to check the reg_class.
132 }
133 }
134
135 // If the low part is not in a register yet, we need to load it.
136 if (!reg_arg_low.Valid()) {
137 // Assume that if the low part of a wide arg is passed in memory, so is the high part,
138 // thus we don't get here for wide args as it's handled above. Big-endian ABIs could
139 // conceivably break this assumption but Android supports only little-endian architectures.
140 DCHECK(!wide);
141 reg_arg_low = AllocTypedTemp(false, reg_class);
142 Load32Disp(TargetPtrReg(kSp), offset, reg_arg_low);
143 return reg_arg_low; // The reg_class is OK, we can return.
144 }
145
146 RegStorage reg_arg = wide ? RegStorage::MakeRegPair(reg_arg_low, reg_arg_high) : reg_arg_low;
147 // Check if we need to copy the arg to a different reg_class.
148 if (!RegClassMatches(reg_class, reg_arg)) {
149 if (wide) {
150 RegStorage new_regs = AllocTypedTempWide(false, reg_class);
151 OpRegCopyWide(new_regs, reg_arg);
152 reg_arg = new_regs;
153 } else {
154 RegStorage new_reg = AllocTypedTemp(false, reg_class);
155 OpRegCopy(new_reg, reg_arg);
156 reg_arg = new_reg;
157 }
158 }
159 return reg_arg;
160 }
161
162 // TODO: simpilfy when 32-bit targets go hard float.
LoadArgDirect(int in_position,RegLocation rl_dest)163 void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) {
164 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
165 int offset = StackVisitor::GetOutVROffset(in_position, cu_->instruction_set);
166 if (cu_->instruction_set == kX86) {
167 /*
168 * When doing a call for x86, it moves the stack pointer in order to push return.
169 * Thus, we add another 4 bytes to figure out the out of caller (in of callee).
170 */
171 offset += sizeof(uint32_t);
172 }
173
174 if (cu_->instruction_set == kX86_64) {
175 /*
176 * When doing a call for x86, it moves the stack pointer in order to push return.
177 * Thus, we add another 8 bytes to figure out the out of caller (in of callee).
178 */
179 offset += sizeof(uint64_t);
180 }
181
182 if (!rl_dest.wide) {
183 RegStorage reg = GetArgMappingToPhysicalReg(in_position);
184 if (reg.Valid()) {
185 OpRegCopy(rl_dest.reg, reg);
186 } else {
187 Load32Disp(TargetPtrReg(kSp), offset, rl_dest.reg);
188 }
189 } else {
190 if (cu_->target64) {
191 RegStorage reg = GetArgMappingToPhysicalReg(in_position);
192 if (reg.Valid()) {
193 OpRegCopy(rl_dest.reg, reg);
194 } else {
195 LoadBaseDisp(TargetPtrReg(kSp), offset, rl_dest.reg, k64, kNotVolatile);
196 }
197 return;
198 }
199
200 RegStorage reg_arg_low = GetArgMappingToPhysicalReg(in_position);
201 RegStorage reg_arg_high = GetArgMappingToPhysicalReg(in_position + 1);
202
203 if (reg_arg_low.Valid() && reg_arg_high.Valid()) {
204 OpRegCopyWide(rl_dest.reg, RegStorage::MakeRegPair(reg_arg_low, reg_arg_high));
205 } else if (reg_arg_low.Valid() && !reg_arg_high.Valid()) {
206 OpRegCopy(rl_dest.reg, reg_arg_low);
207 int offset_high = offset + sizeof(uint32_t);
208 Load32Disp(TargetPtrReg(kSp), offset_high, rl_dest.reg.GetHigh());
209 } else if (!reg_arg_low.Valid() && reg_arg_high.Valid()) {
210 OpRegCopy(rl_dest.reg.GetHigh(), reg_arg_high);
211 Load32Disp(TargetPtrReg(kSp), offset, rl_dest.reg.GetLow());
212 } else {
213 LoadBaseDisp(TargetPtrReg(kSp), offset, rl_dest.reg, k64, kNotVolatile);
214 }
215 }
216 }
217
GenSpecialIGet(MIR * mir,const InlineMethod & special)218 bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) {
219 // FastInstance() already checked by DexFileMethodInliner.
220 const InlineIGetIPutData& data = special.d.ifield_data;
221 if (data.method_is_static != 0u || data.object_arg != 0u) {
222 // The object is not "this" and has to be null-checked.
223 return false;
224 }
225
226 bool wide = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE));
227 bool ref = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT));
228 OpSize size = LoadStoreOpSize(wide, ref);
229
230 // Point of no return - no aborts after this
231 GenPrintLabel(mir);
232 LockArg(data.object_arg);
233 RegStorage reg_obj = LoadArg(data.object_arg, kRefReg);
234 RegisterClass reg_class = RegClassForFieldLoadStore(size, data.is_volatile);
235 RegisterClass ret_reg_class = ShortyToRegClass(cu_->shorty[0]);
236 RegLocation rl_dest = wide ? GetReturnWide(ret_reg_class) : GetReturn(ret_reg_class);
237 RegStorage r_result = rl_dest.reg;
238 if (!RegClassMatches(reg_class, r_result)) {
239 r_result = wide ? AllocTypedTempWide(rl_dest.fp, reg_class)
240 : AllocTypedTemp(rl_dest.fp, reg_class);
241 }
242 if (ref) {
243 LoadRefDisp(reg_obj, data.field_offset, r_result, data.is_volatile ? kVolatile : kNotVolatile);
244 } else {
245 LoadBaseDisp(reg_obj, data.field_offset, r_result, size, data.is_volatile ? kVolatile :
246 kNotVolatile);
247 }
248 if (r_result.NotExactlyEquals(rl_dest.reg)) {
249 if (wide) {
250 OpRegCopyWide(rl_dest.reg, r_result);
251 } else {
252 OpRegCopy(rl_dest.reg, r_result);
253 }
254 }
255 return true;
256 }
257
GenSpecialIPut(MIR * mir,const InlineMethod & special)258 bool Mir2Lir::GenSpecialIPut(MIR* mir, const InlineMethod& special) {
259 // FastInstance() already checked by DexFileMethodInliner.
260 const InlineIGetIPutData& data = special.d.ifield_data;
261 if (data.method_is_static != 0u || data.object_arg != 0u) {
262 // The object is not "this" and has to be null-checked.
263 return false;
264 }
265 if (data.return_arg_plus1 != 0u) {
266 // The setter returns a method argument which we don't support here.
267 return false;
268 }
269
270 bool wide = (data.op_variant == InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE));
271 bool ref = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT));
272 OpSize size = LoadStoreOpSize(wide, ref);
273
274 // Point of no return - no aborts after this
275 GenPrintLabel(mir);
276 LockArg(data.object_arg);
277 LockArg(data.src_arg, wide);
278 RegStorage reg_obj = LoadArg(data.object_arg, kRefReg);
279 RegisterClass reg_class = RegClassForFieldLoadStore(size, data.is_volatile);
280 RegStorage reg_src = LoadArg(data.src_arg, reg_class, wide);
281 if (ref) {
282 StoreRefDisp(reg_obj, data.field_offset, reg_src, data.is_volatile ? kVolatile : kNotVolatile);
283 } else {
284 StoreBaseDisp(reg_obj, data.field_offset, reg_src, size, data.is_volatile ? kVolatile :
285 kNotVolatile);
286 }
287 if (ref) {
288 MarkGCCard(reg_src, reg_obj);
289 }
290 return true;
291 }
292
GenSpecialIdentity(MIR * mir,const InlineMethod & special)293 bool Mir2Lir::GenSpecialIdentity(MIR* mir, const InlineMethod& special) {
294 const InlineReturnArgData& data = special.d.return_data;
295 bool wide = (data.is_wide != 0u);
296
297 // Point of no return - no aborts after this
298 GenPrintLabel(mir);
299 LockArg(data.arg, wide);
300 RegisterClass reg_class = ShortyToRegClass(cu_->shorty[0]);
301 RegLocation rl_dest = wide ? GetReturnWide(reg_class) : GetReturn(reg_class);
302 LoadArgDirect(data.arg, rl_dest);
303 return true;
304 }
305
306 /*
307 * Special-case code generation for simple non-throwing leaf methods.
308 */
GenSpecialCase(BasicBlock * bb,MIR * mir,const InlineMethod & special)309 bool Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special) {
310 DCHECK(special.flags & kInlineSpecial);
311 current_dalvik_offset_ = mir->offset;
312 MIR* return_mir = nullptr;
313 bool successful = false;
314
315 switch (special.opcode) {
316 case kInlineOpNop:
317 successful = true;
318 DCHECK_EQ(mir->dalvikInsn.opcode, Instruction::RETURN_VOID);
319 return_mir = mir;
320 break;
321 case kInlineOpNonWideConst: {
322 successful = true;
323 RegLocation rl_dest = GetReturn(ShortyToRegClass(cu_->shorty[0]));
324 GenPrintLabel(mir);
325 LoadConstant(rl_dest.reg, static_cast<int>(special.d.data));
326 return_mir = bb->GetNextUnconditionalMir(mir_graph_, mir);
327 break;
328 }
329 case kInlineOpReturnArg:
330 successful = GenSpecialIdentity(mir, special);
331 return_mir = mir;
332 break;
333 case kInlineOpIGet:
334 successful = GenSpecialIGet(mir, special);
335 return_mir = bb->GetNextUnconditionalMir(mir_graph_, mir);
336 break;
337 case kInlineOpIPut:
338 successful = GenSpecialIPut(mir, special);
339 return_mir = bb->GetNextUnconditionalMir(mir_graph_, mir);
340 break;
341 default:
342 break;
343 }
344
345 if (successful) {
346 if (kIsDebugBuild) {
347 // Clear unreachable catch entries.
348 mir_graph_->catches_.clear();
349 }
350
351 // Handle verbosity for return MIR.
352 if (return_mir != nullptr) {
353 current_dalvik_offset_ = return_mir->offset;
354 // Not handling special identity case because it already generated code as part
355 // of the return. The label should have been added before any code was generated.
356 if (special.opcode != kInlineOpReturnArg) {
357 GenPrintLabel(return_mir);
358 }
359 }
360 GenSpecialExitSequence();
361
362 core_spill_mask_ = 0;
363 num_core_spills_ = 0;
364 fp_spill_mask_ = 0;
365 num_fp_spills_ = 0;
366 frame_size_ = 0;
367 core_vmap_table_.clear();
368 fp_vmap_table_.clear();
369 }
370
371 return successful;
372 }
373
374 /*
375 * Target-independent code generation. Use only high-level
376 * load/store utilities here, or target-dependent genXX() handlers
377 * when necessary.
378 */
CompileDalvikInstruction(MIR * mir,BasicBlock * bb,LIR * label_list)379 void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list) {
380 RegLocation rl_src[3];
381 RegLocation rl_dest = mir_graph_->GetBadLoc();
382 RegLocation rl_result = mir_graph_->GetBadLoc();
383 Instruction::Code opcode = mir->dalvikInsn.opcode;
384 int opt_flags = mir->optimization_flags;
385 uint32_t vB = mir->dalvikInsn.vB;
386 uint32_t vC = mir->dalvikInsn.vC;
387 DCHECK(CheckCorePoolSanity()) << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " @ 0x:"
388 << std::hex << current_dalvik_offset_;
389
390 // Prep Src and Dest locations.
391 int next_sreg = 0;
392 int next_loc = 0;
393 uint64_t attrs = MIRGraph::GetDataFlowAttributes(opcode);
394 rl_src[0] = rl_src[1] = rl_src[2] = mir_graph_->GetBadLoc();
395 if (attrs & DF_UA) {
396 if (attrs & DF_A_WIDE) {
397 rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
398 next_sreg+= 2;
399 } else {
400 rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
401 next_sreg++;
402 }
403 }
404 if (attrs & DF_UB) {
405 if (attrs & DF_B_WIDE) {
406 rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
407 next_sreg+= 2;
408 } else {
409 rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
410 next_sreg++;
411 }
412 }
413 if (attrs & DF_UC) {
414 if (attrs & DF_C_WIDE) {
415 rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
416 } else {
417 rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
418 }
419 }
420 if (attrs & DF_DA) {
421 if (attrs & DF_A_WIDE) {
422 rl_dest = mir_graph_->GetDestWide(mir);
423 } else {
424 rl_dest = mir_graph_->GetDest(mir);
425 }
426 }
427 switch (opcode) {
428 case Instruction::NOP:
429 break;
430
431 case Instruction::MOVE_EXCEPTION:
432 GenMoveException(rl_dest);
433 break;
434
435 case Instruction::RETURN_VOID:
436 if (((cu_->access_flags & kAccConstructor) != 0) &&
437 cu_->compiler_driver->RequiresConstructorBarrier(Thread::Current(), cu_->dex_file,
438 cu_->class_def_idx)) {
439 GenMemBarrier(kStoreStore);
440 }
441 if (!kLeafOptimization || !mir_graph_->MethodIsLeaf()) {
442 GenSuspendTest(opt_flags);
443 }
444 break;
445
446 case Instruction::RETURN_OBJECT:
447 DCHECK(rl_src[0].ref);
448 // Intentional fallthrough.
449 case Instruction::RETURN:
450 if (!kLeafOptimization || !mir_graph_->MethodIsLeaf()) {
451 GenSuspendTest(opt_flags);
452 }
453 DCHECK_EQ(LocToRegClass(rl_src[0]), ShortyToRegClass(cu_->shorty[0]));
454 StoreValue(GetReturn(LocToRegClass(rl_src[0])), rl_src[0]);
455 break;
456
457 case Instruction::RETURN_WIDE:
458 if (!kLeafOptimization || !mir_graph_->MethodIsLeaf()) {
459 GenSuspendTest(opt_flags);
460 }
461 DCHECK_EQ(LocToRegClass(rl_src[0]), ShortyToRegClass(cu_->shorty[0]));
462 StoreValueWide(GetReturnWide(LocToRegClass(rl_src[0])), rl_src[0]);
463 break;
464
465 case Instruction::MOVE_RESULT_WIDE:
466 if ((opt_flags & MIR_INLINED) != 0) {
467 break; // Nop - combined w/ previous invoke.
468 }
469 StoreValueWide(rl_dest, GetReturnWide(LocToRegClass(rl_dest)));
470 break;
471
472 case Instruction::MOVE_RESULT:
473 case Instruction::MOVE_RESULT_OBJECT:
474 if ((opt_flags & MIR_INLINED) != 0) {
475 break; // Nop - combined w/ previous invoke.
476 }
477 StoreValue(rl_dest, GetReturn(LocToRegClass(rl_dest)));
478 break;
479
480 case Instruction::MOVE:
481 case Instruction::MOVE_OBJECT:
482 case Instruction::MOVE_16:
483 case Instruction::MOVE_OBJECT_16:
484 case Instruction::MOVE_FROM16:
485 case Instruction::MOVE_OBJECT_FROM16:
486 StoreValue(rl_dest, rl_src[0]);
487 break;
488
489 case Instruction::MOVE_WIDE:
490 case Instruction::MOVE_WIDE_16:
491 case Instruction::MOVE_WIDE_FROM16:
492 StoreValueWide(rl_dest, rl_src[0]);
493 break;
494
495 case Instruction::CONST:
496 case Instruction::CONST_4:
497 case Instruction::CONST_16:
498 GenConst(rl_dest, vB);
499 break;
500
501 case Instruction::CONST_HIGH16:
502 GenConst(rl_dest, vB << 16);
503 break;
504
505 case Instruction::CONST_WIDE_16:
506 case Instruction::CONST_WIDE_32:
507 GenConstWide(rl_dest, static_cast<int64_t>(static_cast<int32_t>(vB)));
508 break;
509
510 case Instruction::CONST_WIDE:
511 GenConstWide(rl_dest, mir->dalvikInsn.vB_wide);
512 break;
513
514 case Instruction::CONST_WIDE_HIGH16:
515 rl_result = EvalLoc(rl_dest, kAnyReg, true);
516 LoadConstantWide(rl_result.reg, static_cast<int64_t>(vB) << 48);
517 StoreValueWide(rl_dest, rl_result);
518 break;
519
520 case Instruction::MONITOR_ENTER:
521 GenMonitorEnter(opt_flags, rl_src[0]);
522 break;
523
524 case Instruction::MONITOR_EXIT:
525 GenMonitorExit(opt_flags, rl_src[0]);
526 break;
527
528 case Instruction::CHECK_CAST: {
529 GenCheckCast(mir->offset, vB, rl_src[0]);
530 break;
531 }
532 case Instruction::INSTANCE_OF:
533 GenInstanceof(vC, rl_dest, rl_src[0]);
534 break;
535
536 case Instruction::NEW_INSTANCE:
537 GenNewInstance(vB, rl_dest);
538 break;
539
540 case Instruction::THROW:
541 GenThrow(rl_src[0]);
542 break;
543
544 case Instruction::ARRAY_LENGTH:
545 int len_offset;
546 len_offset = mirror::Array::LengthOffset().Int32Value();
547 rl_src[0] = LoadValue(rl_src[0], kRefReg);
548 GenNullCheck(rl_src[0].reg, opt_flags);
549 rl_result = EvalLoc(rl_dest, kCoreReg, true);
550 Load32Disp(rl_src[0].reg, len_offset, rl_result.reg);
551 MarkPossibleNullPointerException(opt_flags);
552 StoreValue(rl_dest, rl_result);
553 break;
554
555 case Instruction::CONST_STRING:
556 case Instruction::CONST_STRING_JUMBO:
557 GenConstString(vB, rl_dest);
558 break;
559
560 case Instruction::CONST_CLASS:
561 GenConstClass(vB, rl_dest);
562 break;
563
564 case Instruction::FILL_ARRAY_DATA:
565 GenFillArrayData(vB, rl_src[0]);
566 break;
567
568 case Instruction::FILLED_NEW_ARRAY:
569 GenFilledNewArray(mir_graph_->NewMemCallInfo(bb, mir, kStatic,
570 false /* not range */));
571 break;
572
573 case Instruction::FILLED_NEW_ARRAY_RANGE:
574 GenFilledNewArray(mir_graph_->NewMemCallInfo(bb, mir, kStatic,
575 true /* range */));
576 break;
577
578 case Instruction::NEW_ARRAY:
579 GenNewArray(vC, rl_dest, rl_src[0]);
580 break;
581
582 case Instruction::GOTO:
583 case Instruction::GOTO_16:
584 case Instruction::GOTO_32:
585 if (mir_graph_->IsBackedge(bb, bb->taken) &&
586 (kLeafOptimization || !mir_graph_->HasSuspendTestBetween(bb, bb->taken))) {
587 GenSuspendTestAndBranch(opt_flags, &label_list[bb->taken]);
588 } else {
589 OpUnconditionalBranch(&label_list[bb->taken]);
590 }
591 break;
592
593 case Instruction::PACKED_SWITCH:
594 GenPackedSwitch(mir, vB, rl_src[0]);
595 break;
596
597 case Instruction::SPARSE_SWITCH:
598 GenSparseSwitch(mir, vB, rl_src[0]);
599 break;
600
601 case Instruction::CMPL_FLOAT:
602 case Instruction::CMPG_FLOAT:
603 case Instruction::CMPL_DOUBLE:
604 case Instruction::CMPG_DOUBLE:
605 GenCmpFP(opcode, rl_dest, rl_src[0], rl_src[1]);
606 break;
607
608 case Instruction::CMP_LONG:
609 GenCmpLong(rl_dest, rl_src[0], rl_src[1]);
610 break;
611
612 case Instruction::IF_EQ:
613 case Instruction::IF_NE:
614 case Instruction::IF_LT:
615 case Instruction::IF_GE:
616 case Instruction::IF_GT:
617 case Instruction::IF_LE: {
618 LIR* taken = &label_list[bb->taken];
619 LIR* fall_through = &label_list[bb->fall_through];
620 // Result known at compile time?
621 if (rl_src[0].is_const && rl_src[1].is_const) {
622 bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg),
623 mir_graph_->ConstantValue(rl_src[1].orig_sreg));
624 BasicBlockId target_id = is_taken ? bb->taken : bb->fall_through;
625 if (mir_graph_->IsBackedge(bb, target_id) &&
626 (kLeafOptimization || !mir_graph_->HasSuspendTestBetween(bb, target_id))) {
627 GenSuspendTest(opt_flags);
628 }
629 OpUnconditionalBranch(&label_list[target_id]);
630 } else {
631 if (mir_graph_->IsBackwardsBranch(bb) &&
632 (kLeafOptimization || !mir_graph_->HasSuspendTestBetween(bb, bb->taken) ||
633 !mir_graph_->HasSuspendTestBetween(bb, bb->fall_through))) {
634 GenSuspendTest(opt_flags);
635 }
636 GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken, fall_through);
637 }
638 break;
639 }
640
641 case Instruction::IF_EQZ:
642 case Instruction::IF_NEZ:
643 case Instruction::IF_LTZ:
644 case Instruction::IF_GEZ:
645 case Instruction::IF_GTZ:
646 case Instruction::IF_LEZ: {
647 LIR* taken = &label_list[bb->taken];
648 LIR* fall_through = &label_list[bb->fall_through];
649 // Result known at compile time?
650 if (rl_src[0].is_const) {
651 bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg), 0);
652 BasicBlockId target_id = is_taken ? bb->taken : bb->fall_through;
653 if (mir_graph_->IsBackedge(bb, target_id) &&
654 (kLeafOptimization || !mir_graph_->HasSuspendTestBetween(bb, target_id))) {
655 GenSuspendTest(opt_flags);
656 }
657 OpUnconditionalBranch(&label_list[target_id]);
658 } else {
659 if (mir_graph_->IsBackwardsBranch(bb) &&
660 (kLeafOptimization || !mir_graph_->HasSuspendTestBetween(bb, bb->taken) ||
661 !mir_graph_->HasSuspendTestBetween(bb, bb->fall_through))) {
662 GenSuspendTest(opt_flags);
663 }
664 GenCompareZeroAndBranch(opcode, rl_src[0], taken, fall_through);
665 }
666 break;
667 }
668
669 case Instruction::AGET_WIDE:
670 GenArrayGet(opt_flags, k64, rl_src[0], rl_src[1], rl_dest, 3);
671 break;
672 case Instruction::AGET_OBJECT:
673 GenArrayGet(opt_flags, kReference, rl_src[0], rl_src[1], rl_dest, 2);
674 break;
675 case Instruction::AGET:
676 GenArrayGet(opt_flags, k32, rl_src[0], rl_src[1], rl_dest, 2);
677 break;
678 case Instruction::AGET_BOOLEAN:
679 GenArrayGet(opt_flags, kUnsignedByte, rl_src[0], rl_src[1], rl_dest, 0);
680 break;
681 case Instruction::AGET_BYTE:
682 GenArrayGet(opt_flags, kSignedByte, rl_src[0], rl_src[1], rl_dest, 0);
683 break;
684 case Instruction::AGET_CHAR:
685 GenArrayGet(opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
686 break;
687 case Instruction::AGET_SHORT:
688 GenArrayGet(opt_flags, kSignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
689 break;
690 case Instruction::APUT_WIDE:
691 GenArrayPut(opt_flags, k64, rl_src[1], rl_src[2], rl_src[0], 3, false);
692 break;
693 case Instruction::APUT:
694 GenArrayPut(opt_flags, k32, rl_src[1], rl_src[2], rl_src[0], 2, false);
695 break;
696 case Instruction::APUT_OBJECT: {
697 bool is_null = mir_graph_->IsConstantNullRef(rl_src[0]);
698 bool is_safe = is_null; // Always safe to store null.
699 if (!is_safe) {
700 // Check safety from verifier type information.
701 const DexCompilationUnit* unit = mir_graph_->GetCurrentDexCompilationUnit();
702 is_safe = cu_->compiler_driver->IsSafeCast(unit, mir->offset);
703 }
704 if (is_null || is_safe) {
705 // Store of constant null doesn't require an assignability test and can be generated inline
706 // without fixed register usage or a card mark.
707 GenArrayPut(opt_flags, kReference, rl_src[1], rl_src[2], rl_src[0], 2, !is_null);
708 } else {
709 GenArrayObjPut(opt_flags, rl_src[1], rl_src[2], rl_src[0]);
710 }
711 break;
712 }
713 case Instruction::APUT_SHORT:
714 case Instruction::APUT_CHAR:
715 GenArrayPut(opt_flags, kUnsignedHalf, rl_src[1], rl_src[2], rl_src[0], 1, false);
716 break;
717 case Instruction::APUT_BYTE:
718 case Instruction::APUT_BOOLEAN:
719 GenArrayPut(opt_flags, kUnsignedByte, rl_src[1], rl_src[2], rl_src[0], 0, false);
720 break;
721
722 case Instruction::IGET_OBJECT:
723 GenIGet(mir, opt_flags, kReference, rl_dest, rl_src[0], false, true);
724 break;
725
726 case Instruction::IGET_WIDE:
727 GenIGet(mir, opt_flags, k64, rl_dest, rl_src[0], true, false);
728 break;
729
730 case Instruction::IGET:
731 GenIGet(mir, opt_flags, k32, rl_dest, rl_src[0], false, false);
732 break;
733
734 case Instruction::IGET_CHAR:
735 GenIGet(mir, opt_flags, kUnsignedHalf, rl_dest, rl_src[0], false, false);
736 break;
737
738 case Instruction::IGET_SHORT:
739 GenIGet(mir, opt_flags, kSignedHalf, rl_dest, rl_src[0], false, false);
740 break;
741
742 case Instruction::IGET_BOOLEAN:
743 case Instruction::IGET_BYTE:
744 GenIGet(mir, opt_flags, kUnsignedByte, rl_dest, rl_src[0], false, false);
745 break;
746
747 case Instruction::IPUT_WIDE:
748 GenIPut(mir, opt_flags, k64, rl_src[0], rl_src[1], true, false);
749 break;
750
751 case Instruction::IPUT_OBJECT:
752 GenIPut(mir, opt_flags, kReference, rl_src[0], rl_src[1], false, true);
753 break;
754
755 case Instruction::IPUT:
756 GenIPut(mir, opt_flags, k32, rl_src[0], rl_src[1], false, false);
757 break;
758
759 case Instruction::IPUT_BOOLEAN:
760 case Instruction::IPUT_BYTE:
761 GenIPut(mir, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], false, false);
762 break;
763
764 case Instruction::IPUT_CHAR:
765 GenIPut(mir, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], false, false);
766 break;
767
768 case Instruction::IPUT_SHORT:
769 GenIPut(mir, opt_flags, kSignedHalf, rl_src[0], rl_src[1], false, false);
770 break;
771
772 case Instruction::SGET_OBJECT:
773 GenSget(mir, rl_dest, false, true);
774 break;
775 case Instruction::SGET:
776 case Instruction::SGET_BOOLEAN:
777 case Instruction::SGET_BYTE:
778 case Instruction::SGET_CHAR:
779 case Instruction::SGET_SHORT:
780 GenSget(mir, rl_dest, false, false);
781 break;
782
783 case Instruction::SGET_WIDE:
784 GenSget(mir, rl_dest, true, false);
785 break;
786
787 case Instruction::SPUT_OBJECT:
788 GenSput(mir, rl_src[0], false, true);
789 break;
790
791 case Instruction::SPUT:
792 case Instruction::SPUT_BOOLEAN:
793 case Instruction::SPUT_BYTE:
794 case Instruction::SPUT_CHAR:
795 case Instruction::SPUT_SHORT:
796 GenSput(mir, rl_src[0], false, false);
797 break;
798
799 case Instruction::SPUT_WIDE:
800 GenSput(mir, rl_src[0], true, false);
801 break;
802
803 case Instruction::INVOKE_STATIC_RANGE:
804 GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kStatic, true));
805 if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
806 // If the invocation is not inlined, we can assume there is already a
807 // suspend check at the return site
808 mir_graph_->AppendGenSuspendTestList(bb);
809 }
810 break;
811 case Instruction::INVOKE_STATIC:
812 GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kStatic, false));
813 if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
814 mir_graph_->AppendGenSuspendTestList(bb);
815 }
816 break;
817
818 case Instruction::INVOKE_DIRECT:
819 GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kDirect, false));
820 if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
821 mir_graph_->AppendGenSuspendTestList(bb);
822 }
823 break;
824 case Instruction::INVOKE_DIRECT_RANGE:
825 GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kDirect, true));
826 if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
827 mir_graph_->AppendGenSuspendTestList(bb);
828 }
829 break;
830
831 case Instruction::INVOKE_VIRTUAL:
832 GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, false));
833 if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
834 mir_graph_->AppendGenSuspendTestList(bb);
835 }
836 break;
837 case Instruction::INVOKE_VIRTUAL_RANGE:
838 GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, true));
839 if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
840 mir_graph_->AppendGenSuspendTestList(bb);
841 }
842 break;
843
844 case Instruction::INVOKE_SUPER:
845 GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kSuper, false));
846 if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
847 mir_graph_->AppendGenSuspendTestList(bb);
848 }
849 break;
850 case Instruction::INVOKE_SUPER_RANGE:
851 GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kSuper, true));
852 if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
853 mir_graph_->AppendGenSuspendTestList(bb);
854 }
855 break;
856
857 case Instruction::INVOKE_INTERFACE:
858 GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kInterface, false));
859 if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
860 mir_graph_->AppendGenSuspendTestList(bb);
861 }
862 break;
863 case Instruction::INVOKE_INTERFACE_RANGE:
864 GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kInterface, true));
865 if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
866 mir_graph_->AppendGenSuspendTestList(bb);
867 }
868 break;
869
870 case Instruction::NEG_INT:
871 case Instruction::NOT_INT:
872 GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[0]);
873 break;
874
875 case Instruction::NEG_LONG:
876 case Instruction::NOT_LONG:
877 GenArithOpLong(opcode, rl_dest, rl_src[0], rl_src[0]);
878 break;
879
880 case Instruction::NEG_FLOAT:
881 GenArithOpFloat(opcode, rl_dest, rl_src[0], rl_src[0]);
882 break;
883
884 case Instruction::NEG_DOUBLE:
885 GenArithOpDouble(opcode, rl_dest, rl_src[0], rl_src[0]);
886 break;
887
888 case Instruction::INT_TO_LONG:
889 GenIntToLong(rl_dest, rl_src[0]);
890 break;
891
892 case Instruction::LONG_TO_INT:
893 rl_src[0] = UpdateLocWide(rl_src[0]);
894 rl_src[0] = NarrowRegLoc(rl_src[0]);
895 StoreValue(rl_dest, rl_src[0]);
896 break;
897
898 case Instruction::INT_TO_BYTE:
899 case Instruction::INT_TO_SHORT:
900 case Instruction::INT_TO_CHAR:
901 GenIntNarrowing(opcode, rl_dest, rl_src[0]);
902 break;
903
904 case Instruction::INT_TO_FLOAT:
905 case Instruction::INT_TO_DOUBLE:
906 case Instruction::LONG_TO_FLOAT:
907 case Instruction::LONG_TO_DOUBLE:
908 case Instruction::FLOAT_TO_INT:
909 case Instruction::FLOAT_TO_LONG:
910 case Instruction::FLOAT_TO_DOUBLE:
911 case Instruction::DOUBLE_TO_INT:
912 case Instruction::DOUBLE_TO_LONG:
913 case Instruction::DOUBLE_TO_FLOAT:
914 GenConversion(opcode, rl_dest, rl_src[0]);
915 break;
916
917
918 case Instruction::ADD_INT:
919 case Instruction::ADD_INT_2ADDR:
920 case Instruction::MUL_INT:
921 case Instruction::MUL_INT_2ADDR:
922 case Instruction::AND_INT:
923 case Instruction::AND_INT_2ADDR:
924 case Instruction::OR_INT:
925 case Instruction::OR_INT_2ADDR:
926 case Instruction::XOR_INT:
927 case Instruction::XOR_INT_2ADDR:
928 if (rl_src[0].is_const &&
929 InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[0]), opcode)) {
930 GenArithOpIntLit(opcode, rl_dest, rl_src[1],
931 mir_graph_->ConstantValue(rl_src[0].orig_sreg));
932 } else if (rl_src[1].is_const &&
933 InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]), opcode)) {
934 GenArithOpIntLit(opcode, rl_dest, rl_src[0],
935 mir_graph_->ConstantValue(rl_src[1].orig_sreg));
936 } else {
937 GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1]);
938 }
939 break;
940
941 case Instruction::SUB_INT:
942 case Instruction::SUB_INT_2ADDR:
943 case Instruction::DIV_INT:
944 case Instruction::DIV_INT_2ADDR:
945 case Instruction::REM_INT:
946 case Instruction::REM_INT_2ADDR:
947 case Instruction::SHL_INT:
948 case Instruction::SHL_INT_2ADDR:
949 case Instruction::SHR_INT:
950 case Instruction::SHR_INT_2ADDR:
951 case Instruction::USHR_INT:
952 case Instruction::USHR_INT_2ADDR:
953 if (rl_src[1].is_const &&
954 InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]), opcode)) {
955 GenArithOpIntLit(opcode, rl_dest, rl_src[0], mir_graph_->ConstantValue(rl_src[1]));
956 } else {
957 GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1]);
958 }
959 break;
960
961 case Instruction::ADD_LONG:
962 case Instruction::SUB_LONG:
963 case Instruction::AND_LONG:
964 case Instruction::OR_LONG:
965 case Instruction::XOR_LONG:
966 case Instruction::ADD_LONG_2ADDR:
967 case Instruction::SUB_LONG_2ADDR:
968 case Instruction::AND_LONG_2ADDR:
969 case Instruction::OR_LONG_2ADDR:
970 case Instruction::XOR_LONG_2ADDR:
971 if (rl_src[0].is_const || rl_src[1].is_const) {
972 GenArithImmOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
973 break;
974 }
975 // Note: intentional fallthrough.
976
977 case Instruction::MUL_LONG:
978 case Instruction::DIV_LONG:
979 case Instruction::REM_LONG:
980 case Instruction::MUL_LONG_2ADDR:
981 case Instruction::DIV_LONG_2ADDR:
982 case Instruction::REM_LONG_2ADDR:
983 GenArithOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
984 break;
985
986 case Instruction::SHL_LONG:
987 case Instruction::SHR_LONG:
988 case Instruction::USHR_LONG:
989 case Instruction::SHL_LONG_2ADDR:
990 case Instruction::SHR_LONG_2ADDR:
991 case Instruction::USHR_LONG_2ADDR:
992 if (rl_src[1].is_const) {
993 GenShiftImmOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
994 } else {
995 GenShiftOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
996 }
997 break;
998
999 case Instruction::ADD_FLOAT:
1000 case Instruction::SUB_FLOAT:
1001 case Instruction::MUL_FLOAT:
1002 case Instruction::DIV_FLOAT:
1003 case Instruction::REM_FLOAT:
1004 case Instruction::ADD_FLOAT_2ADDR:
1005 case Instruction::SUB_FLOAT_2ADDR:
1006 case Instruction::MUL_FLOAT_2ADDR:
1007 case Instruction::DIV_FLOAT_2ADDR:
1008 case Instruction::REM_FLOAT_2ADDR:
1009 GenArithOpFloat(opcode, rl_dest, rl_src[0], rl_src[1]);
1010 break;
1011
1012 case Instruction::ADD_DOUBLE:
1013 case Instruction::SUB_DOUBLE:
1014 case Instruction::MUL_DOUBLE:
1015 case Instruction::DIV_DOUBLE:
1016 case Instruction::REM_DOUBLE:
1017 case Instruction::ADD_DOUBLE_2ADDR:
1018 case Instruction::SUB_DOUBLE_2ADDR:
1019 case Instruction::MUL_DOUBLE_2ADDR:
1020 case Instruction::DIV_DOUBLE_2ADDR:
1021 case Instruction::REM_DOUBLE_2ADDR:
1022 GenArithOpDouble(opcode, rl_dest, rl_src[0], rl_src[1]);
1023 break;
1024
1025 case Instruction::RSUB_INT:
1026 case Instruction::ADD_INT_LIT16:
1027 case Instruction::MUL_INT_LIT16:
1028 case Instruction::DIV_INT_LIT16:
1029 case Instruction::REM_INT_LIT16:
1030 case Instruction::AND_INT_LIT16:
1031 case Instruction::OR_INT_LIT16:
1032 case Instruction::XOR_INT_LIT16:
1033 case Instruction::ADD_INT_LIT8:
1034 case Instruction::RSUB_INT_LIT8:
1035 case Instruction::MUL_INT_LIT8:
1036 case Instruction::DIV_INT_LIT8:
1037 case Instruction::REM_INT_LIT8:
1038 case Instruction::AND_INT_LIT8:
1039 case Instruction::OR_INT_LIT8:
1040 case Instruction::XOR_INT_LIT8:
1041 case Instruction::SHL_INT_LIT8:
1042 case Instruction::SHR_INT_LIT8:
1043 case Instruction::USHR_INT_LIT8:
1044 GenArithOpIntLit(opcode, rl_dest, rl_src[0], vC);
1045 break;
1046
1047 default:
1048 LOG(FATAL) << "Unexpected opcode: " << opcode;
1049 }
1050 DCHECK(CheckCorePoolSanity());
1051 } // NOLINT(readability/fn_size)
1052
1053 // Process extended MIR instructions
HandleExtendedMethodMIR(BasicBlock * bb,MIR * mir)1054 void Mir2Lir::HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
1055 switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
1056 case kMirOpCopy: {
1057 RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
1058 RegLocation rl_dest = mir_graph_->GetDest(mir);
1059 StoreValue(rl_dest, rl_src);
1060 break;
1061 }
1062 case kMirOpFusedCmplFloat:
1063 GenFusedFPCmpBranch(bb, mir, false /*gt bias*/, false /*double*/);
1064 break;
1065 case kMirOpFusedCmpgFloat:
1066 GenFusedFPCmpBranch(bb, mir, true /*gt bias*/, false /*double*/);
1067 break;
1068 case kMirOpFusedCmplDouble:
1069 GenFusedFPCmpBranch(bb, mir, false /*gt bias*/, true /*double*/);
1070 break;
1071 case kMirOpFusedCmpgDouble:
1072 GenFusedFPCmpBranch(bb, mir, true /*gt bias*/, true /*double*/);
1073 break;
1074 case kMirOpFusedCmpLong:
1075 GenFusedLongCmpBranch(bb, mir);
1076 break;
1077 case kMirOpSelect:
1078 GenSelect(bb, mir);
1079 break;
1080 case kMirOpPhi:
1081 case kMirOpNop:
1082 case kMirOpNullCheck:
1083 case kMirOpRangeCheck:
1084 case kMirOpDivZeroCheck:
1085 case kMirOpCheck:
1086 case kMirOpCheckPart2:
1087 // Ignore these known opcodes
1088 break;
1089 default:
1090 // Give the backends a chance to handle unknown extended MIR opcodes.
1091 GenMachineSpecificExtendedMethodMIR(bb, mir);
1092 break;
1093 }
1094 }
1095
GenPrintLabel(MIR * mir)1096 void Mir2Lir::GenPrintLabel(MIR* mir) {
1097 // Mark the beginning of a Dalvik instruction for line tracking.
1098 if (cu_->verbose) {
1099 char* inst_str = mir_graph_->GetDalvikDisassembly(mir);
1100 MarkBoundary(mir->offset, inst_str);
1101 }
1102 }
1103
1104 // Handle the content in each basic block.
MethodBlockCodeGen(BasicBlock * bb)1105 bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
1106 if (bb->block_type == kDead) return false;
1107 current_dalvik_offset_ = bb->start_offset;
1108 MIR* mir;
1109 int block_id = bb->id;
1110
1111 block_label_list_[block_id].operands[0] = bb->start_offset;
1112
1113 // Insert the block label.
1114 block_label_list_[block_id].opcode = kPseudoNormalBlockLabel;
1115 block_label_list_[block_id].flags.fixup = kFixupLabel;
1116 AppendLIR(&block_label_list_[block_id]);
1117
1118 LIR* head_lir = NULL;
1119
1120 // If this is a catch block, export the start address.
1121 if (bb->catch_entry) {
1122 head_lir = NewLIR0(kPseudoExportedPC);
1123 }
1124
1125 // Free temp registers and reset redundant store tracking.
1126 ClobberAllTemps();
1127
1128 if (bb->block_type == kEntryBlock) {
1129 ResetRegPool();
1130 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
1131 GenEntrySequence(&mir_graph_->reg_location_[start_vreg],
1132 mir_graph_->reg_location_[mir_graph_->GetMethodSReg()]);
1133 } else if (bb->block_type == kExitBlock) {
1134 ResetRegPool();
1135 GenExitSequence();
1136 }
1137
1138 for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
1139 ResetRegPool();
1140 if (cu_->disable_opt & (1 << kTrackLiveTemps)) {
1141 ClobberAllTemps();
1142 // Reset temp allocation to minimize differences when A/B testing.
1143 reg_pool_->ResetNextTemp();
1144 }
1145
1146 if (cu_->disable_opt & (1 << kSuppressLoads)) {
1147 ResetDefTracking();
1148 }
1149
1150 // Reset temp tracking sanity check.
1151 if (kIsDebugBuild) {
1152 live_sreg_ = INVALID_SREG;
1153 }
1154
1155 current_dalvik_offset_ = mir->offset;
1156 int opcode = mir->dalvikInsn.opcode;
1157
1158 GenPrintLabel(mir);
1159
1160 // Remember the first LIR for this block.
1161 if (head_lir == NULL) {
1162 head_lir = &block_label_list_[bb->id];
1163 // Set the first label as a scheduling barrier.
1164 DCHECK(!head_lir->flags.use_def_invalid);
1165 head_lir->u.m.def_mask = &kEncodeAll;
1166 }
1167
1168 if (opcode == kMirOpCheck) {
1169 // Combine check and work halves of throwing instruction.
1170 MIR* work_half = mir->meta.throw_insn;
1171 mir->dalvikInsn.opcode = work_half->dalvikInsn.opcode;
1172 mir->meta = work_half->meta; // Whatever the work_half had, we need to copy it.
1173 opcode = work_half->dalvikInsn.opcode;
1174 SSARepresentation* ssa_rep = work_half->ssa_rep;
1175 work_half->ssa_rep = mir->ssa_rep;
1176 mir->ssa_rep = ssa_rep;
1177 work_half->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpCheckPart2);
1178 work_half->meta.throw_insn = mir;
1179 }
1180
1181 if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
1182 HandleExtendedMethodMIR(bb, mir);
1183 continue;
1184 }
1185
1186 CompileDalvikInstruction(mir, bb, block_label_list_);
1187 }
1188
1189 if (head_lir) {
1190 // Eliminate redundant loads/stores and delay stores into later slots.
1191 ApplyLocalOptimizations(head_lir, last_lir_insn_);
1192 }
1193 return false;
1194 }
1195
SpecialMIR2LIR(const InlineMethod & special)1196 bool Mir2Lir::SpecialMIR2LIR(const InlineMethod& special) {
1197 cu_->NewTimingSplit("SpecialMIR2LIR");
1198 // Find the first DalvikByteCode block.
1199 int num_reachable_blocks = mir_graph_->GetNumReachableBlocks();
1200 BasicBlock*bb = NULL;
1201 for (int idx = 0; idx < num_reachable_blocks; idx++) {
1202 // TODO: no direct access of growable lists.
1203 int dfs_index = mir_graph_->GetDfsOrder()->Get(idx);
1204 bb = mir_graph_->GetBasicBlock(dfs_index);
1205 if (bb->block_type == kDalvikByteCode) {
1206 break;
1207 }
1208 }
1209 if (bb == NULL) {
1210 return false;
1211 }
1212 DCHECK_EQ(bb->start_offset, 0);
1213 DCHECK(bb->first_mir_insn != NULL);
1214
1215 // Get the first instruction.
1216 MIR* mir = bb->first_mir_insn;
1217
1218 // Free temp registers and reset redundant store tracking.
1219 ResetRegPool();
1220 ResetDefTracking();
1221 ClobberAllTemps();
1222
1223 return GenSpecialCase(bb, mir, special);
1224 }
1225
MethodMIR2LIR()1226 void Mir2Lir::MethodMIR2LIR() {
1227 cu_->NewTimingSplit("MIR2LIR");
1228
1229 // Hold the labels of each block.
1230 block_label_list_ =
1231 static_cast<LIR*>(arena_->Alloc(sizeof(LIR) * mir_graph_->GetNumBlocks(),
1232 kArenaAllocLIR));
1233
1234 PreOrderDfsIterator iter(mir_graph_);
1235 BasicBlock* curr_bb = iter.Next();
1236 BasicBlock* next_bb = iter.Next();
1237 while (curr_bb != NULL) {
1238 MethodBlockCodeGen(curr_bb);
1239 // If the fall_through block is no longer laid out consecutively, drop in a branch.
1240 BasicBlock* curr_bb_fall_through = mir_graph_->GetBasicBlock(curr_bb->fall_through);
1241 if ((curr_bb_fall_through != NULL) && (curr_bb_fall_through != next_bb)) {
1242 OpUnconditionalBranch(&block_label_list_[curr_bb->fall_through]);
1243 }
1244 curr_bb = next_bb;
1245 do {
1246 next_bb = iter.Next();
1247 } while ((next_bb != NULL) && (next_bb->block_type == kDead));
1248 }
1249 HandleSlowPaths();
1250 }
1251
1252 //
1253 // LIR Slow Path
1254 //
1255
GenerateTargetLabel(int opcode)1256 LIR* Mir2Lir::LIRSlowPath::GenerateTargetLabel(int opcode) {
1257 m2l_->SetCurrentDexPc(current_dex_pc_);
1258 LIR* target = m2l_->NewLIR0(opcode);
1259 fromfast_->target = target;
1260 return target;
1261 }
1262
1263
CheckRegStorageImpl(RegStorage rs,WidenessCheck wide,RefCheck ref,FPCheck fp,bool fail,bool report) const1264 void Mir2Lir::CheckRegStorageImpl(RegStorage rs, WidenessCheck wide, RefCheck ref, FPCheck fp,
1265 bool fail, bool report)
1266 const {
1267 if (rs.Valid()) {
1268 if (ref == RefCheck::kCheckRef) {
1269 if (cu_->target64 && !rs.Is64Bit()) {
1270 if (fail) {
1271 CHECK(false) << "Reg storage not 64b for ref.";
1272 } else if (report) {
1273 LOG(WARNING) << "Reg storage not 64b for ref.";
1274 }
1275 }
1276 }
1277 if (wide == WidenessCheck::kCheckWide) {
1278 if (!rs.Is64Bit()) {
1279 if (fail) {
1280 CHECK(false) << "Reg storage not 64b for wide.";
1281 } else if (report) {
1282 LOG(WARNING) << "Reg storage not 64b for wide.";
1283 }
1284 }
1285 }
1286 // A tighter check would be nice, but for now soft-float will not check float at all.
1287 if (fp == FPCheck::kCheckFP && cu_->instruction_set != kArm) {
1288 if (!rs.IsFloat()) {
1289 if (fail) {
1290 CHECK(false) << "Reg storage not float for fp.";
1291 } else if (report) {
1292 LOG(WARNING) << "Reg storage not float for fp.";
1293 }
1294 }
1295 } else if (fp == FPCheck::kCheckNotFP) {
1296 if (rs.IsFloat()) {
1297 if (fail) {
1298 CHECK(false) << "Reg storage float for not-fp.";
1299 } else if (report) {
1300 LOG(WARNING) << "Reg storage float for not-fp.";
1301 }
1302 }
1303 }
1304 }
1305 }
1306
CheckRegLocationImpl(RegLocation rl,bool fail,bool report) const1307 void Mir2Lir::CheckRegLocationImpl(RegLocation rl, bool fail, bool report) const {
1308 // Regrettably can't use the fp part of rl, as that is not really indicative of where a value
1309 // will be stored.
1310 CheckRegStorageImpl(rl.reg, rl.wide ? WidenessCheck::kCheckWide : WidenessCheck::kCheckNotWide,
1311 rl.ref ? RefCheck::kCheckRef : RefCheck::kCheckNotRef, FPCheck::kIgnoreFP, fail, report);
1312 }
1313
GetInstructionOffset(LIR * lir)1314 size_t Mir2Lir::GetInstructionOffset(LIR* lir) {
1315 UNIMPLEMENTED(FATAL) << "Unsuppored GetInstructionOffset()";
1316 return 0;
1317 }
1318
1319 } // namespace art
1320