1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "code_sinking.h"
18
19 #include "base/arena_bit_vector.h"
20 #include "base/array_ref.h"
21 #include "base/bit_vector-inl.h"
22 #include "base/globals.h"
23 #include "base/logging.h"
24 #include "base/scoped_arena_allocator.h"
25 #include "base/scoped_arena_containers.h"
26 #include "common_dominator.h"
27 #include "nodes.h"
28
29 namespace art HIDDEN {
30
Run()31 bool CodeSinking::Run() {
32 if (graph_->GetExitBlock() == nullptr) {
33 // Infinite loop, just bail.
34 return false;
35 }
36
37 UncommonBranchSinking();
38 ReturnSinking();
39 return true;
40 }
41
UncommonBranchSinking()42 void CodeSinking::UncommonBranchSinking() {
43 HBasicBlock* exit = graph_->GetExitBlock();
44 DCHECK(exit != nullptr);
45 // TODO(ngeoffray): we do not profile branches yet, so use throw instructions
46 // as an indicator of an uncommon branch.
47 for (HBasicBlock* exit_predecessor : exit->GetPredecessors()) {
48 HInstruction* last = exit_predecessor->GetLastInstruction();
49
50 // TryBoundary instructions are sometimes inserted between the last instruction (e.g. Throw,
51 // Return) and Exit. We don't want to use that instruction for our "uncommon branch" heuristic
52 // because they are not as good an indicator as throwing branches, so we skip them and fetch the
53 // actual last instruction.
54 if (last->IsTryBoundary()) {
55 // We have an exit try boundary. Fetch the previous instruction.
56 DCHECK(!last->AsTryBoundary()->IsEntry());
57 if (last->GetPrevious() == nullptr) {
58 DCHECK(exit_predecessor->IsSingleTryBoundary());
59 exit_predecessor = exit_predecessor->GetSinglePredecessor();
60 last = exit_predecessor->GetLastInstruction();
61 } else {
62 last = last->GetPrevious();
63 }
64 }
65
66 // Any predecessor of the exit that does not return, throws an exception.
67 if (!last->IsReturn() && !last->IsReturnVoid()) {
68 SinkCodeToUncommonBranch(exit_predecessor);
69 }
70 }
71 }
72
IsInterestingInstruction(HInstruction * instruction)73 static bool IsInterestingInstruction(HInstruction* instruction) {
74 // Instructions from the entry graph (for example constants) are never interesting to move.
75 if (instruction->GetBlock() == instruction->GetBlock()->GetGraph()->GetEntryBlock()) {
76 return false;
77 }
78 // We want to move moveable instructions that cannot throw, as well as
79 // heap stores and allocations.
80
81 // Volatile stores cannot be moved.
82 if (instruction->IsInstanceFieldSet()) {
83 if (instruction->AsInstanceFieldSet()->IsVolatile()) {
84 return false;
85 }
86 }
87
88 // Check allocations and strings first, as they can throw, but it is safe to move them.
89 if (instruction->IsNewInstance() || instruction->IsNewArray() || instruction->IsLoadString()) {
90 return true;
91 }
92
93 // Check it is safe to move ConstructorFence.
94 // (Safe to move ConstructorFence for only protecting the new-instance but not for finals.)
95 if (instruction->IsConstructorFence()) {
96 HConstructorFence* ctor_fence = instruction->AsConstructorFence();
97
98 // A fence with "0" inputs is dead and should've been removed in a prior pass.
99 DCHECK_NE(0u, ctor_fence->InputCount());
100
101 // TODO: this should be simplified to 'return true' since it's
102 // potentially pessimizing any code sinking for inlined constructors with final fields.
103 // TODO: double check that if the final field assignments are not moved,
104 // then the fence is not moved either.
105
106 return ctor_fence->GetAssociatedAllocation() != nullptr;
107 }
108
109 // All other instructions that can throw cannot be moved.
110 if (instruction->CanThrow()) {
111 return false;
112 }
113
114 // We can only store on local allocations. Other heap references can
115 // be escaping. Note that allocations can escape too, but we only move
116 // allocations if their users can move too, or are in the list of
117 // post dominated blocks.
118 if (instruction->IsInstanceFieldSet()) {
119 if (!instruction->InputAt(0)->IsNewInstance()) {
120 return false;
121 }
122 }
123
124 if (instruction->IsArraySet()) {
125 if (!instruction->InputAt(0)->IsNewArray()) {
126 return false;
127 }
128 }
129
130 // Heap accesses cannot go past instructions that have memory side effects, which
131 // we are not tracking here. Note that the load/store elimination optimization
132 // runs before this optimization, and should have removed interesting ones.
133 // In theory, we could handle loads of local allocations, but this is currently
134 // hard to test, as LSE removes them.
135 if (instruction->IsStaticFieldGet() ||
136 instruction->IsInstanceFieldGet() ||
137 instruction->IsPredicatedInstanceFieldGet() ||
138 instruction->IsArrayGet()) {
139 return false;
140 }
141
142 if (instruction->IsInstanceFieldSet() ||
143 instruction->IsArraySet() ||
144 instruction->CanBeMoved()) {
145 return true;
146 }
147 return false;
148 }
149
AddInstruction(HInstruction * instruction,const ArenaBitVector & processed_instructions,const ArenaBitVector & discard_blocks,ScopedArenaVector<HInstruction * > * worklist)150 static void AddInstruction(HInstruction* instruction,
151 const ArenaBitVector& processed_instructions,
152 const ArenaBitVector& discard_blocks,
153 ScopedArenaVector<HInstruction*>* worklist) {
154 // Add to the work list if the instruction is not in the list of blocks
155 // to discard, hasn't been already processed and is of interest.
156 if (!discard_blocks.IsBitSet(instruction->GetBlock()->GetBlockId()) &&
157 !processed_instructions.IsBitSet(instruction->GetId()) &&
158 IsInterestingInstruction(instruction)) {
159 worklist->push_back(instruction);
160 }
161 }
162
AddInputs(HInstruction * instruction,const ArenaBitVector & processed_instructions,const ArenaBitVector & discard_blocks,ScopedArenaVector<HInstruction * > * worklist)163 static void AddInputs(HInstruction* instruction,
164 const ArenaBitVector& processed_instructions,
165 const ArenaBitVector& discard_blocks,
166 ScopedArenaVector<HInstruction*>* worklist) {
167 for (HInstruction* input : instruction->GetInputs()) {
168 AddInstruction(input, processed_instructions, discard_blocks, worklist);
169 }
170 }
171
AddInputs(HBasicBlock * block,const ArenaBitVector & processed_instructions,const ArenaBitVector & discard_blocks,ScopedArenaVector<HInstruction * > * worklist)172 static void AddInputs(HBasicBlock* block,
173 const ArenaBitVector& processed_instructions,
174 const ArenaBitVector& discard_blocks,
175 ScopedArenaVector<HInstruction*>* worklist) {
176 for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
177 AddInputs(it.Current(), processed_instructions, discard_blocks, worklist);
178 }
179 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
180 AddInputs(it.Current(), processed_instructions, discard_blocks, worklist);
181 }
182 }
183
ShouldFilterUse(HInstruction * instruction,HInstruction * user,const ArenaBitVector & post_dominated)184 static bool ShouldFilterUse(HInstruction* instruction,
185 HInstruction* user,
186 const ArenaBitVector& post_dominated) {
187 if (instruction->IsNewInstance()) {
188 return (user->IsInstanceFieldSet() || user->IsConstructorFence()) &&
189 (user->InputAt(0) == instruction) &&
190 !post_dominated.IsBitSet(user->GetBlock()->GetBlockId());
191 } else if (instruction->IsNewArray()) {
192 return (user->IsArraySet() || user->IsConstructorFence()) &&
193 (user->InputAt(0) == instruction) &&
194 !post_dominated.IsBitSet(user->GetBlock()->GetBlockId());
195 }
196 return false;
197 }
198
199 // Find the ideal position for moving `instruction`. If `filter` is true,
200 // we filter out store instructions to that instruction, which are processed
201 // first in the step (3) of the sinking algorithm.
202 // This method is tailored to the sinking algorithm, unlike
203 // the generic HInstruction::MoveBeforeFirstUserAndOutOfLoops.
FindIdealPosition(HInstruction * instruction,const ArenaBitVector & post_dominated,bool filter=false)204 static HInstruction* FindIdealPosition(HInstruction* instruction,
205 const ArenaBitVector& post_dominated,
206 bool filter = false) {
207 DCHECK(!instruction->IsPhi()); // Makes no sense for Phi.
208
209 // Find the target block.
210 CommonDominator finder(/* block= */ nullptr);
211 for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
212 HInstruction* user = use.GetUser();
213 if (!(filter && ShouldFilterUse(instruction, user, post_dominated))) {
214 HBasicBlock* block = user->GetBlock();
215 if (user->IsPhi()) {
216 // Special case phis by taking the incoming block for regular ones,
217 // or the dominator for catch phis.
218 block = user->AsPhi()->IsCatchPhi()
219 ? block->GetDominator()
220 : block->GetPredecessors()[use.GetIndex()];
221 }
222 finder.Update(block);
223 }
224 }
225 for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
226 DCHECK(!use.GetUser()->GetHolder()->IsPhi());
227 DCHECK_IMPLIES(filter,
228 !ShouldFilterUse(instruction, use.GetUser()->GetHolder(), post_dominated));
229 finder.Update(use.GetUser()->GetHolder()->GetBlock());
230 }
231 HBasicBlock* target_block = finder.Get();
232 if (target_block == nullptr) {
233 // No user we can go next to? Likely a LSE or DCE limitation.
234 return nullptr;
235 }
236
237 // Move to the first dominator not in a loop, if we can. We only do this if we are trying to hoist
238 // `instruction` out of a loop it wasn't a part of.
239 const HLoopInformation* loop_info = instruction->GetBlock()->GetLoopInformation();
240 while (target_block->IsInLoop() && target_block->GetLoopInformation() != loop_info) {
241 if (!post_dominated.IsBitSet(target_block->GetDominator()->GetBlockId())) {
242 break;
243 }
244 target_block = target_block->GetDominator();
245 DCHECK(target_block != nullptr);
246 }
247
248 if (instruction->CanThrow()) {
249 // Consistency check: We shouldn't land in a loop if we weren't in one before traversing up the
250 // dominator tree regarding try catches.
251 const bool was_in_loop = target_block->IsInLoop();
252
253 // We cannot move an instruction that can throw into a try that said instruction is not a part
254 // of already, as that would mean it will throw into a different catch block. In short, for
255 // throwing instructions:
256 // * If the throwing instruction is part of a try, they should only be sunk into that same try.
257 // * If the throwing instruction is not part of any try, they shouldn't be sunk to any try.
258 if (instruction->GetBlock()->IsTryBlock()) {
259 const HTryBoundary& try_entry =
260 instruction->GetBlock()->GetTryCatchInformation()->GetTryEntry();
261 while (!(target_block->IsTryBlock() &&
262 try_entry.HasSameExceptionHandlersAs(
263 target_block->GetTryCatchInformation()->GetTryEntry()))) {
264 target_block = target_block->GetDominator();
265 if (!post_dominated.IsBitSet(target_block->GetBlockId())) {
266 // We couldn't find a suitable block.
267 return nullptr;
268 }
269 }
270 } else {
271 // Search for the first block also not in a try block
272 while (target_block->IsTryBlock()) {
273 target_block = target_block->GetDominator();
274 if (!post_dominated.IsBitSet(target_block->GetBlockId())) {
275 // We couldn't find a suitable block.
276 return nullptr;
277 }
278 }
279 }
280
281 DCHECK_IMPLIES(target_block->IsInLoop(), was_in_loop);
282 }
283
284 // Find insertion position. No need to filter anymore, as we have found a
285 // target block.
286 HInstruction* insert_pos = nullptr;
287 for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
288 if (use.GetUser()->GetBlock() == target_block &&
289 (insert_pos == nullptr || use.GetUser()->StrictlyDominates(insert_pos))) {
290 insert_pos = use.GetUser();
291 }
292 }
293 for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
294 HEnvironment* env = use.GetUser();
295 HInstruction* user = env->GetHolder();
296 if (user->GetBlock() == target_block &&
297 (insert_pos == nullptr || user->StrictlyDominates(insert_pos))) {
298 if (target_block->IsCatchBlock() && target_block->GetFirstInstruction() == user) {
299 // We can sink the instructions past the environment setting Nop. If we do that, we have to
300 // remove said instruction from the environment. Since we know that we will be sinking the
301 // instruction to this block and there are no more instructions to consider, we can safely
302 // remove it from the environment now.
303 DCHECK(target_block->GetFirstInstruction()->IsNop());
304 env->RemoveAsUserOfInput(use.GetIndex());
305 env->SetRawEnvAt(use.GetIndex(), /*instruction=*/ nullptr);
306 } else {
307 insert_pos = user;
308 }
309 }
310 }
311 if (insert_pos == nullptr) {
312 // No user in `target_block`, insert before the control flow instruction.
313 insert_pos = target_block->GetLastInstruction();
314 DCHECK(insert_pos->IsControlFlow());
315 // Avoid splitting HCondition from HIf to prevent unnecessary materialization.
316 if (insert_pos->IsIf()) {
317 HInstruction* if_input = insert_pos->AsIf()->InputAt(0);
318 if (if_input == insert_pos->GetPrevious()) {
319 insert_pos = if_input;
320 }
321 }
322 }
323 DCHECK(!insert_pos->IsPhi());
324 return insert_pos;
325 }
326
327
SinkCodeToUncommonBranch(HBasicBlock * end_block)328 void CodeSinking::SinkCodeToUncommonBranch(HBasicBlock* end_block) {
329 // Local allocator to discard data structures created below at the end of this optimization.
330 ScopedArenaAllocator allocator(graph_->GetArenaStack());
331
332 size_t number_of_instructions = graph_->GetCurrentInstructionId();
333 ScopedArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc));
334 ArenaBitVector processed_instructions(&allocator, number_of_instructions, /* expandable= */ false);
335 processed_instructions.ClearAllBits();
336 ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable= */ false);
337 post_dominated.ClearAllBits();
338 ArenaBitVector instructions_that_can_move(
339 &allocator, number_of_instructions, /* expandable= */ false);
340 instructions_that_can_move.ClearAllBits();
341 ScopedArenaVector<HInstruction*> move_in_order(allocator.Adapter(kArenaAllocMisc));
342
343 // Step (1): Visit post order to get a subset of blocks post dominated by `end_block`.
344 // TODO(ngeoffray): Getting the full set of post-dominated should be done by
345 // computing the post dominator tree, but that could be too time consuming. Also,
346 // we should start the analysis from blocks dominated by an uncommon branch, but we
347 // don't profile branches yet.
348 bool found_block = false;
349 for (HBasicBlock* block : graph_->GetPostOrder()) {
350 if (block == end_block) {
351 found_block = true;
352 post_dominated.SetBit(block->GetBlockId());
353 } else if (found_block) {
354 bool is_post_dominated = true;
355 DCHECK_NE(block, graph_->GetExitBlock())
356 << "We shouldn't encounter the exit block after `end_block`.";
357
358 // BasicBlock that are try entries look like this:
359 // BasicBlock i:
360 // instr 1
361 // ...
362 // instr N
363 // TryBoundary kind:entry ---Try begins here---
364 //
365 // Due to how our BasicBlocks are structured, BasicBlock i will have an xhandler successor
366 // since we are starting a try. If we use `GetSuccessors` for this case, we will check if
367 // the catch block is post_dominated.
368 //
369 // However, this catch block doesn't matter: when we sink the instruction into that
370 // BasicBlock i, we do it before the TryBoundary (i.e. outside of the try and outside the
371 // catch's domain). We can ignore catch blocks using `GetNormalSuccessors` to sink code
372 // right before the start of a try block.
373 //
374 // On the other side of the coin, BasicBlock that are try exits look like this:
375 // BasicBlock j:
376 // instr 1
377 // ...
378 // instr N
379 // TryBoundary kind:exit ---Try ends here---
380 //
381 // If we sink to these basic blocks we would be sinking inside of the try so we would like
382 // to check the catch block for post dominance.
383 const bool ends_with_try_boundary_entry =
384 block->EndsWithTryBoundary() && block->GetLastInstruction()->AsTryBoundary()->IsEntry();
385 ArrayRef<HBasicBlock* const> successors =
386 ends_with_try_boundary_entry ? block->GetNormalSuccessors() :
387 ArrayRef<HBasicBlock* const>(block->GetSuccessors());
388 for (HBasicBlock* successor : successors) {
389 if (!post_dominated.IsBitSet(successor->GetBlockId())) {
390 is_post_dominated = false;
391 break;
392 }
393 }
394 if (is_post_dominated) {
395 post_dominated.SetBit(block->GetBlockId());
396 }
397 }
398 }
399
400 // Now that we have found a subset of post-dominated blocks, add to the worklist all inputs
401 // of instructions in these blocks that are not themselves in these blocks.
402 // Also find the common dominator of the found post dominated blocks, to help filtering
403 // out un-movable uses in step (2).
404 CommonDominator finder(end_block);
405 for (size_t i = 0, e = graph_->GetBlocks().size(); i < e; ++i) {
406 if (post_dominated.IsBitSet(i)) {
407 finder.Update(graph_->GetBlocks()[i]);
408 AddInputs(graph_->GetBlocks()[i], processed_instructions, post_dominated, &worklist);
409 }
410 }
411 HBasicBlock* common_dominator = finder.Get();
412
413 // Step (2): iterate over the worklist to find sinking candidates.
414 while (!worklist.empty()) {
415 HInstruction* instruction = worklist.back();
416 if (processed_instructions.IsBitSet(instruction->GetId())) {
417 // The instruction has already been processed, continue. This happens
418 // when the instruction is the input/user of multiple instructions.
419 worklist.pop_back();
420 continue;
421 }
422 bool all_users_in_post_dominated_blocks = true;
423 bool can_move = true;
424 // Check users of the instruction.
425 for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
426 HInstruction* user = use.GetUser();
427 if (!post_dominated.IsBitSet(user->GetBlock()->GetBlockId()) &&
428 !instructions_that_can_move.IsBitSet(user->GetId())) {
429 all_users_in_post_dominated_blocks = false;
430 // If we've already processed this user, or the user cannot be moved, or
431 // is not dominating the post dominated blocks, bail.
432 // TODO(ngeoffray): The domination check is an approximation. We should
433 // instead check if the dominated blocks post dominate the user's block,
434 // but we do not have post dominance information here.
435 if (processed_instructions.IsBitSet(user->GetId()) ||
436 !IsInterestingInstruction(user) ||
437 !user->GetBlock()->Dominates(common_dominator)) {
438 can_move = false;
439 break;
440 }
441 }
442 }
443
444 // Check environment users of the instruction. Some of these users require
445 // the instruction not to move.
446 if (all_users_in_post_dominated_blocks) {
447 for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
448 HEnvironment* environment = use.GetUser();
449 HInstruction* user = environment->GetHolder();
450 if (!post_dominated.IsBitSet(user->GetBlock()->GetBlockId())) {
451 if (graph_->IsDebuggable() ||
452 user->IsDeoptimize() ||
453 user->CanThrowIntoCatchBlock() ||
454 (user->IsSuspendCheck() && graph_->IsCompilingOsr())) {
455 can_move = false;
456 break;
457 }
458 }
459 }
460 }
461 if (!can_move) {
462 // Instruction cannot be moved, mark it as processed and remove it from the work
463 // list.
464 processed_instructions.SetBit(instruction->GetId());
465 worklist.pop_back();
466 } else if (all_users_in_post_dominated_blocks) {
467 // Instruction is a candidate for being sunk. Mark it as such, remove it from the
468 // work list, and add its inputs to the work list.
469 instructions_that_can_move.SetBit(instruction->GetId());
470 move_in_order.push_back(instruction);
471 processed_instructions.SetBit(instruction->GetId());
472 worklist.pop_back();
473 AddInputs(instruction, processed_instructions, post_dominated, &worklist);
474 // Drop the environment use not in the list of post-dominated block. This is
475 // to help step (3) of this optimization, when we start moving instructions
476 // closer to their use.
477 for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
478 HEnvironment* environment = use.GetUser();
479 HInstruction* user = environment->GetHolder();
480 if (!post_dominated.IsBitSet(user->GetBlock()->GetBlockId())) {
481 environment->RemoveAsUserOfInput(use.GetIndex());
482 environment->SetRawEnvAt(use.GetIndex(), nullptr);
483 }
484 }
485 } else {
486 // The information we have on the users was not enough to decide whether the
487 // instruction could be moved.
488 // Add the users to the work list, and keep the instruction in the work list
489 // to process it again once all users have been processed.
490 for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
491 AddInstruction(use.GetUser(), processed_instructions, post_dominated, &worklist);
492 }
493 }
494 }
495
496 // Make sure we process instructions in dominated order. This is required for heap
497 // stores.
498 std::sort(move_in_order.begin(), move_in_order.end(), [](HInstruction* a, HInstruction* b) {
499 return b->StrictlyDominates(a);
500 });
501
502 // Step (3): Try to move sinking candidates.
503 for (HInstruction* instruction : move_in_order) {
504 HInstruction* position = nullptr;
505 if (instruction->IsArraySet()
506 || instruction->IsInstanceFieldSet()
507 || instruction->IsConstructorFence()) {
508 if (!instructions_that_can_move.IsBitSet(instruction->InputAt(0)->GetId())) {
509 // A store can trivially move, but it can safely do so only if the heap
510 // location it stores to can also move.
511 // TODO(ngeoffray): Handle allocation/store cycles by pruning these instructions
512 // from the set and all their inputs.
513 continue;
514 }
515 // Find the position of the instruction we're storing into, filtering out this
516 // store and all other stores to that instruction.
517 position = FindIdealPosition(instruction->InputAt(0), post_dominated, /* filter= */ true);
518
519 // The position needs to be dominated by the store, in order for the store to move there.
520 if (position == nullptr || !instruction->GetBlock()->Dominates(position->GetBlock())) {
521 continue;
522 }
523 } else {
524 // Find the ideal position within the post dominated blocks.
525 position = FindIdealPosition(instruction, post_dominated);
526 if (position == nullptr) {
527 continue;
528 }
529 }
530 // Bail if we could not find a position in the post dominated blocks (for example,
531 // if there are multiple users whose common dominator is not in the list of
532 // post dominated blocks).
533 if (!post_dominated.IsBitSet(position->GetBlock()->GetBlockId())) {
534 continue;
535 }
536 MaybeRecordStat(stats_, MethodCompilationStat::kInstructionSunk);
537 instruction->MoveBefore(position, /* do_checks= */ false);
538 }
539 }
540
ReturnSinking()541 void CodeSinking::ReturnSinking() {
542 HBasicBlock* exit = graph_->GetExitBlock();
543 DCHECK(exit != nullptr);
544
545 int number_of_returns = 0;
546 bool saw_return = false;
547 for (HBasicBlock* pred : exit->GetPredecessors()) {
548 // TODO(solanes): We might have Return/ReturnVoid->TryBoundary->Exit. We can theoretically
549 // handle them and move them out of the TryBoundary. However, it is a border case and it adds
550 // codebase complexity.
551 if (pred->GetLastInstruction()->IsReturn() || pred->GetLastInstruction()->IsReturnVoid()) {
552 saw_return |= pred->GetLastInstruction()->IsReturn();
553 ++number_of_returns;
554 }
555 }
556
557 if (number_of_returns < 2) {
558 // Nothing to do.
559 return;
560 }
561
562 // `new_block` will coalesce the Return instructions into Phi+Return, or the ReturnVoid
563 // instructions into a ReturnVoid.
564 HBasicBlock* new_block = new (graph_->GetAllocator()) HBasicBlock(graph_, exit->GetDexPc());
565 if (saw_return) {
566 HPhi* new_phi = nullptr;
567 for (size_t i = 0; i < exit->GetPredecessors().size(); /*++i in loop*/) {
568 HBasicBlock* pred = exit->GetPredecessors()[i];
569 if (!pred->GetLastInstruction()->IsReturn()) {
570 ++i;
571 continue;
572 }
573
574 HReturn* ret = pred->GetLastInstruction()->AsReturn();
575 if (new_phi == nullptr) {
576 // Create the new_phi, if we haven't done so yet. We do it here since we need to know the
577 // type to assign to it.
578 new_phi = new (graph_->GetAllocator()) HPhi(graph_->GetAllocator(),
579 kNoRegNumber,
580 /*number_of_inputs=*/0,
581 ret->InputAt(0)->GetType());
582 new_block->AddPhi(new_phi);
583 }
584 new_phi->AddInput(ret->InputAt(0));
585 pred->ReplaceAndRemoveInstructionWith(ret,
586 new (graph_->GetAllocator()) HGoto(ret->GetDexPc()));
587 pred->ReplaceSuccessor(exit, new_block);
588 // Since we are removing a predecessor, there's no need to increment `i`.
589 }
590 new_block->AddInstruction(new (graph_->GetAllocator()) HReturn(new_phi, exit->GetDexPc()));
591 } else {
592 for (size_t i = 0; i < exit->GetPredecessors().size(); /*++i in loop*/) {
593 HBasicBlock* pred = exit->GetPredecessors()[i];
594 if (!pred->GetLastInstruction()->IsReturnVoid()) {
595 ++i;
596 continue;
597 }
598
599 HReturnVoid* ret = pred->GetLastInstruction()->AsReturnVoid();
600 pred->ReplaceAndRemoveInstructionWith(ret,
601 new (graph_->GetAllocator()) HGoto(ret->GetDexPc()));
602 pred->ReplaceSuccessor(exit, new_block);
603 // Since we are removing a predecessor, there's no need to increment `i`.
604 }
605 new_block->AddInstruction(new (graph_->GetAllocator()) HReturnVoid(exit->GetDexPc()));
606 }
607
608 new_block->AddSuccessor(exit);
609 graph_->AddBlock(new_block);
610
611 // Recompute dominance since we added a new block.
612 graph_->ClearDominanceInformation();
613 graph_->ComputeDominanceInformation();
614 }
615
616 } // namespace art
617