• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "register_allocation_resolver.h"
18 
19 #include "code_generator.h"
20 #include "linear_order.h"
21 #include "ssa_liveness_analysis.h"
22 
23 namespace art {
24 
RegisterAllocationResolver(ArenaAllocator * allocator,CodeGenerator * codegen,const SsaLivenessAnalysis & liveness)25 RegisterAllocationResolver::RegisterAllocationResolver(ArenaAllocator* allocator,
26                                                        CodeGenerator* codegen,
27                                                        const SsaLivenessAnalysis& liveness)
28       : allocator_(allocator),
29         codegen_(codegen),
30         liveness_(liveness) {}
31 
Resolve(ArrayRef<HInstruction * const> safepoints,size_t reserved_out_slots,size_t int_spill_slots,size_t long_spill_slots,size_t float_spill_slots,size_t double_spill_slots,size_t catch_phi_spill_slots,const ArenaVector<LiveInterval * > & temp_intervals)32 void RegisterAllocationResolver::Resolve(ArrayRef<HInstruction* const> safepoints,
33                                          size_t reserved_out_slots,
34                                          size_t int_spill_slots,
35                                          size_t long_spill_slots,
36                                          size_t float_spill_slots,
37                                          size_t double_spill_slots,
38                                          size_t catch_phi_spill_slots,
39                                          const ArenaVector<LiveInterval*>& temp_intervals) {
40   size_t spill_slots = int_spill_slots
41                      + long_spill_slots
42                      + float_spill_slots
43                      + double_spill_slots
44                      + catch_phi_spill_slots;
45 
46   // Update safepoints and calculate the size of the spills.
47   UpdateSafepointLiveRegisters();
48   size_t maximum_safepoint_spill_size = CalculateMaximumSafepointSpillSize(safepoints);
49 
50   // Computes frame size and spill mask.
51   codegen_->InitializeCodeGeneration(spill_slots,
52                                      maximum_safepoint_spill_size,
53                                      reserved_out_slots,  // Includes slot(s) for the art method.
54                                      codegen_->GetGraph()->GetLinearOrder());
55 
56   // Resolve outputs, including stack locations.
57   // TODO: Use pointers of Location inside LiveInterval to avoid doing another iteration.
58   for (size_t i = 0, e = liveness_.GetNumberOfSsaValues(); i < e; ++i) {
59     HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
60     LiveInterval* current = instruction->GetLiveInterval();
61     LocationSummary* locations = instruction->GetLocations();
62     Location location = locations->Out();
63     if (instruction->IsParameterValue()) {
64       // Now that we know the frame size, adjust the parameter's location.
65       if (location.IsStackSlot()) {
66         location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
67         current->SetSpillSlot(location.GetStackIndex());
68         locations->UpdateOut(location);
69       } else if (location.IsDoubleStackSlot()) {
70         location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
71         current->SetSpillSlot(location.GetStackIndex());
72         locations->UpdateOut(location);
73       } else if (current->HasSpillSlot()) {
74         current->SetSpillSlot(current->GetSpillSlot() + codegen_->GetFrameSize());
75       }
76     } else if (instruction->IsCurrentMethod()) {
77       // The current method is always at offset 0.
78       DCHECK(!current->HasSpillSlot() || (current->GetSpillSlot() == 0));
79     } else if (instruction->IsPhi() && instruction->AsPhi()->IsCatchPhi()) {
80       DCHECK(current->HasSpillSlot());
81       size_t slot = current->GetSpillSlot()
82                     + spill_slots
83                     + reserved_out_slots
84                     - catch_phi_spill_slots;
85       current->SetSpillSlot(slot * kVRegSize);
86     } else if (current->HasSpillSlot()) {
87       // Adjust the stack slot, now that we know the number of them for each type.
88       // The way this implementation lays out the stack is the following:
89       // [parameter slots       ]
90       // [art method (caller)   ]
91       // [entry spill (core)    ]
92       // [entry spill (float)   ]
93       // [should_deoptimize flag] (this is optional)
94       // [catch phi spill slots ]
95       // [double spill slots    ]
96       // [long spill slots      ]
97       // [float spill slots     ]
98       // [int/ref values        ]
99       // [maximum out values    ] (number of arguments for calls)
100       // [art method            ].
101       size_t slot = current->GetSpillSlot();
102       switch (current->GetType()) {
103         case Primitive::kPrimDouble:
104           slot += long_spill_slots;
105           FALLTHROUGH_INTENDED;
106         case Primitive::kPrimLong:
107           slot += float_spill_slots;
108           FALLTHROUGH_INTENDED;
109         case Primitive::kPrimFloat:
110           slot += int_spill_slots;
111           FALLTHROUGH_INTENDED;
112         case Primitive::kPrimNot:
113         case Primitive::kPrimInt:
114         case Primitive::kPrimChar:
115         case Primitive::kPrimByte:
116         case Primitive::kPrimBoolean:
117         case Primitive::kPrimShort:
118           slot += reserved_out_slots;
119           break;
120         case Primitive::kPrimVoid:
121           LOG(FATAL) << "Unexpected type for interval " << current->GetType();
122       }
123       current->SetSpillSlot(slot * kVRegSize);
124     }
125 
126     Location source = current->ToLocation();
127 
128     if (location.IsUnallocated()) {
129       if (location.GetPolicy() == Location::kSameAsFirstInput) {
130         if (locations->InAt(0).IsUnallocated()) {
131           locations->SetInAt(0, source);
132         } else {
133           DCHECK(locations->InAt(0).Equals(source));
134         }
135       }
136       locations->UpdateOut(source);
137     } else {
138       DCHECK(source.Equals(location));
139     }
140   }
141 
142   // Connect siblings and resolve inputs.
143   for (size_t i = 0, e = liveness_.GetNumberOfSsaValues(); i < e; ++i) {
144     HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
145     ConnectSiblings(instruction->GetLiveInterval());
146   }
147 
148   // Resolve non-linear control flow across branches. Order does not matter.
149   for (HBasicBlock* block : codegen_->GetGraph()->GetLinearOrder()) {
150     if (block->IsCatchBlock() ||
151         (block->IsLoopHeader() && block->GetLoopInformation()->IsIrreducible())) {
152       // Instructions live at the top of catch blocks or irreducible loop header
153       // were forced to spill.
154       if (kIsDebugBuild) {
155         BitVector* live = liveness_.GetLiveInSet(*block);
156         for (uint32_t idx : live->Indexes()) {
157           LiveInterval* interval = liveness_.GetInstructionFromSsaIndex(idx)->GetLiveInterval();
158           LiveInterval* sibling = interval->GetSiblingAt(block->GetLifetimeStart());
159           // `GetSiblingAt` returns the sibling that contains a position, but there could be
160           // a lifetime hole in it. `CoversSlow` returns whether the interval is live at that
161           // position.
162           if ((sibling != nullptr) && sibling->CoversSlow(block->GetLifetimeStart())) {
163             DCHECK(!sibling->HasRegister());
164           }
165         }
166       }
167     } else {
168       BitVector* live = liveness_.GetLiveInSet(*block);
169       for (uint32_t idx : live->Indexes()) {
170         LiveInterval* interval = liveness_.GetInstructionFromSsaIndex(idx)->GetLiveInterval();
171         for (HBasicBlock* predecessor : block->GetPredecessors()) {
172           ConnectSplitSiblings(interval, predecessor, block);
173         }
174       }
175     }
176   }
177 
178   // Resolve phi inputs. Order does not matter.
179   for (HBasicBlock* block : codegen_->GetGraph()->GetLinearOrder()) {
180     if (block->IsCatchBlock()) {
181       // Catch phi values are set at runtime by the exception delivery mechanism.
182     } else {
183       for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
184         HInstruction* phi = inst_it.Current();
185         for (size_t i = 0, e = block->GetPredecessors().size(); i < e; ++i) {
186           HBasicBlock* predecessor = block->GetPredecessors()[i];
187           DCHECK_EQ(predecessor->GetNormalSuccessors().size(), 1u);
188           HInstruction* input = phi->InputAt(i);
189           Location source = input->GetLiveInterval()->GetLocationAt(
190               predecessor->GetLifetimeEnd() - 1);
191           Location destination = phi->GetLiveInterval()->ToLocation();
192           InsertParallelMoveAtExitOf(predecessor, phi, source, destination);
193         }
194       }
195     }
196   }
197 
198   // Resolve temp locations.
199   for (LiveInterval* temp : temp_intervals) {
200     if (temp->IsHighInterval()) {
201       // High intervals can be skipped, they are already handled by the low interval.
202       continue;
203     }
204     HInstruction* at = liveness_.GetTempUser(temp);
205     size_t temp_index = liveness_.GetTempIndex(temp);
206     LocationSummary* locations = at->GetLocations();
207     switch (temp->GetType()) {
208       case Primitive::kPrimInt:
209         locations->SetTempAt(temp_index, Location::RegisterLocation(temp->GetRegister()));
210         break;
211 
212       case Primitive::kPrimDouble:
213         if (codegen_->NeedsTwoRegisters(Primitive::kPrimDouble)) {
214           Location location = Location::FpuRegisterPairLocation(
215               temp->GetRegister(), temp->GetHighInterval()->GetRegister());
216           locations->SetTempAt(temp_index, location);
217         } else {
218           locations->SetTempAt(temp_index, Location::FpuRegisterLocation(temp->GetRegister()));
219         }
220         break;
221 
222       default:
223         LOG(FATAL) << "Unexpected type for temporary location "
224                    << temp->GetType();
225     }
226   }
227 }
228 
UpdateSafepointLiveRegisters()229 void RegisterAllocationResolver::UpdateSafepointLiveRegisters() {
230   for (size_t i = 0, e = liveness_.GetNumberOfSsaValues(); i < e; ++i) {
231     HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
232     for (LiveInterval* current = instruction->GetLiveInterval();
233          current != nullptr;
234          current = current->GetNextSibling()) {
235       if (!current->HasRegister()) {
236         continue;
237       }
238       Location source = current->ToLocation();
239       for (SafepointPosition* safepoint_position = current->GetFirstSafepoint();
240            safepoint_position != nullptr;
241            safepoint_position = safepoint_position->GetNext()) {
242         DCHECK(current->CoversSlow(safepoint_position->GetPosition()));
243         LocationSummary* locations = safepoint_position->GetLocations();
244         switch (source.GetKind()) {
245           case Location::kRegister:
246           case Location::kFpuRegister: {
247             locations->AddLiveRegister(source);
248             break;
249           }
250           case Location::kRegisterPair:
251           case Location::kFpuRegisterPair: {
252             locations->AddLiveRegister(source.ToLow());
253             locations->AddLiveRegister(source.ToHigh());
254             break;
255           }
256           case Location::kStackSlot:  // Fall-through
257           case Location::kDoubleStackSlot:  // Fall-through
258           case Location::kConstant: {
259             // Nothing to do.
260             break;
261           }
262           default: {
263             LOG(FATAL) << "Unexpected location for object";
264           }
265         }
266       }
267     }
268   }
269 }
270 
CalculateMaximumSafepointSpillSize(ArrayRef<HInstruction * const> safepoints)271 size_t RegisterAllocationResolver::CalculateMaximumSafepointSpillSize(
272     ArrayRef<HInstruction* const> safepoints) {
273   size_t core_register_spill_size = codegen_->GetWordSize();
274   size_t fp_register_spill_size = codegen_->GetFloatingPointSpillSlotSize();
275   size_t maximum_safepoint_spill_size = 0u;
276   for (HInstruction* instruction : safepoints) {
277     LocationSummary* locations = instruction->GetLocations();
278     if (locations->OnlyCallsOnSlowPath()) {
279       size_t core_spills =
280           codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ true);
281       size_t fp_spills =
282           codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ false);
283       size_t spill_size =
284           core_register_spill_size * core_spills + fp_register_spill_size * fp_spills;
285       maximum_safepoint_spill_size = std::max(maximum_safepoint_spill_size, spill_size);
286     } else if (locations->CallsOnMainAndSlowPath()) {
287       // Nothing to spill on the slow path if the main path already clobbers caller-saves.
288       DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ true));
289       DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ false));
290     }
291   }
292   return maximum_safepoint_spill_size;
293 }
294 
ConnectSiblings(LiveInterval * interval)295 void RegisterAllocationResolver::ConnectSiblings(LiveInterval* interval) {
296   LiveInterval* current = interval;
297   if (current->HasSpillSlot()
298       && current->HasRegister()
299       // Currently, we spill unconditionnally the current method in the code generators.
300       && !interval->GetDefinedBy()->IsCurrentMethod()) {
301     // We spill eagerly, so move must be at definition.
302     Location loc;
303     switch (interval->NumberOfSpillSlotsNeeded()) {
304       case 1: loc = Location::StackSlot(interval->GetParent()->GetSpillSlot()); break;
305       case 2: loc = Location::DoubleStackSlot(interval->GetParent()->GetSpillSlot()); break;
306       case 4: loc = Location::SIMDStackSlot(interval->GetParent()->GetSpillSlot()); break;
307       default: LOG(FATAL) << "Unexpected number of spill slots"; UNREACHABLE();
308     }
309     InsertMoveAfter(interval->GetDefinedBy(), interval->ToLocation(), loc);
310   }
311   UsePositionList::const_iterator use_it = current->GetUses().begin();
312   const UsePositionList::const_iterator use_end = current->GetUses().end();
313   EnvUsePositionList::const_iterator env_use_it = current->GetEnvironmentUses().begin();
314   const EnvUsePositionList::const_iterator env_use_end = current->GetEnvironmentUses().end();
315 
316   // Walk over all siblings, updating locations of use positions, and
317   // connecting them when they are adjacent.
318   do {
319     Location source = current->ToLocation();
320 
321     // Walk over all uses covered by this interval, and update the location
322     // information.
323 
324     LiveRange* range = current->GetFirstRange();
325     while (range != nullptr) {
326       // Process uses in the closed interval [range->GetStart(), range->GetEnd()].
327       // FindMatchingUseRange() expects a half-open interval, so pass `range->GetEnd() + 1u`.
328       size_t range_begin = range->GetStart();
329       size_t range_end = range->GetEnd() + 1u;
330       auto matching_use_range =
331           FindMatchingUseRange(use_it, use_end, range_begin, range_end);
332       DCHECK(std::all_of(use_it,
333                          matching_use_range.begin(),
334                          [](const UsePosition& pos) { return pos.IsSynthesized(); }));
335       for (const UsePosition& use : matching_use_range) {
336         DCHECK(current->CoversSlow(use.GetPosition()) || (use.GetPosition() == range->GetEnd()));
337         if (!use.IsSynthesized()) {
338           LocationSummary* locations = use.GetUser()->GetLocations();
339           Location expected_location = locations->InAt(use.GetInputIndex());
340           // The expected (actual) location may be invalid in case the input is unused. Currently
341           // this only happens for intrinsics.
342           if (expected_location.IsValid()) {
343             if (expected_location.IsUnallocated()) {
344               locations->SetInAt(use.GetInputIndex(), source);
345             } else if (!expected_location.IsConstant()) {
346               AddInputMoveFor(
347                   interval->GetDefinedBy(), use.GetUser(), source, expected_location);
348             }
349           } else {
350             DCHECK(use.GetUser()->IsInvoke());
351             DCHECK(use.GetUser()->AsInvoke()->GetIntrinsic() != Intrinsics::kNone);
352           }
353         }
354       }
355       use_it = matching_use_range.end();
356 
357       // Walk over the environment uses, and update their locations.
358       auto matching_env_use_range =
359           FindMatchingUseRange(env_use_it, env_use_end, range_begin, range_end);
360       for (const EnvUsePosition& env_use : matching_env_use_range) {
361         DCHECK(current->CoversSlow(env_use.GetPosition())
362                || (env_use.GetPosition() == range->GetEnd()));
363         HEnvironment* environment = env_use.GetEnvironment();
364         environment->SetLocationAt(env_use.GetInputIndex(), source);
365       }
366       env_use_it = matching_env_use_range.end();
367 
368       range = range->GetNext();
369     }
370 
371     // If the next interval starts just after this one, and has a register,
372     // insert a move.
373     LiveInterval* next_sibling = current->GetNextSibling();
374     if (next_sibling != nullptr
375         && next_sibling->HasRegister()
376         && current->GetEnd() == next_sibling->GetStart()) {
377       Location destination = next_sibling->ToLocation();
378       InsertParallelMoveAt(current->GetEnd(), interval->GetDefinedBy(), source, destination);
379     }
380 
381     for (SafepointPosition* safepoint_position = current->GetFirstSafepoint();
382          safepoint_position != nullptr;
383          safepoint_position = safepoint_position->GetNext()) {
384       DCHECK(current->CoversSlow(safepoint_position->GetPosition()));
385 
386       if (current->GetType() == Primitive::kPrimNot) {
387         DCHECK(interval->GetDefinedBy()->IsActualObject())
388             << interval->GetDefinedBy()->DebugName()
389             << '(' << interval->GetDefinedBy()->GetId() << ')'
390             << "@" << safepoint_position->GetInstruction()->DebugName()
391             << '(' << safepoint_position->GetInstruction()->GetId() << ')';
392         LocationSummary* locations = safepoint_position->GetLocations();
393         if (current->GetParent()->HasSpillSlot()) {
394           locations->SetStackBit(current->GetParent()->GetSpillSlot() / kVRegSize);
395         }
396         if (source.GetKind() == Location::kRegister) {
397           locations->SetRegisterBit(source.reg());
398         }
399       }
400     }
401     current = next_sibling;
402   } while (current != nullptr);
403 
404   // Following uses can only be synthesized uses.
405   DCHECK(std::all_of(use_it, use_end, [](const UsePosition& pos) { return pos.IsSynthesized(); }));
406 }
407 
IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(HInstruction * instruction)408 static bool IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(
409     HInstruction* instruction) {
410   return instruction->GetBlock()->GetGraph()->HasIrreducibleLoops() &&
411          (instruction->IsConstant() || instruction->IsCurrentMethod());
412 }
413 
ConnectSplitSiblings(LiveInterval * interval,HBasicBlock * from,HBasicBlock * to) const414 void RegisterAllocationResolver::ConnectSplitSiblings(LiveInterval* interval,
415                                                       HBasicBlock* from,
416                                                       HBasicBlock* to) const {
417   if (interval->GetNextSibling() == nullptr) {
418     // Nothing to connect. The whole range was allocated to the same location.
419     return;
420   }
421 
422   // Find the intervals that cover `from` and `to`.
423   size_t destination_position = to->GetLifetimeStart();
424   size_t source_position = from->GetLifetimeEnd() - 1;
425   LiveInterval* destination = interval->GetSiblingAt(destination_position);
426   LiveInterval* source = interval->GetSiblingAt(source_position);
427 
428   if (destination == source) {
429     // Interval was not split.
430     return;
431   }
432 
433   LiveInterval* parent = interval->GetParent();
434   HInstruction* defined_by = parent->GetDefinedBy();
435   if (codegen_->GetGraph()->HasIrreducibleLoops() &&
436       (destination == nullptr || !destination->CoversSlow(destination_position))) {
437     // Our live_in fixed point calculation has found that the instruction is live
438     // in the `to` block because it will eventually enter an irreducible loop. Our
439     // live interval computation however does not compute a fixed point, and
440     // therefore will not have a location for that instruction for `to`.
441     // Because the instruction is a constant or the ArtMethod, we don't need to
442     // do anything: it will be materialized in the irreducible loop.
443     DCHECK(IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(defined_by))
444         << defined_by->DebugName() << ":" << defined_by->GetId()
445         << " " << from->GetBlockId() << " -> " << to->GetBlockId();
446     return;
447   }
448 
449   if (!destination->HasRegister()) {
450     // Values are eagerly spilled. Spill slot already contains appropriate value.
451     return;
452   }
453 
454   Location location_source;
455   // `GetSiblingAt` returns the interval whose start and end cover `position`,
456   // but does not check whether the interval is inactive at that position.
457   // The only situation where the interval is inactive at that position is in the
458   // presence of irreducible loops for constants and ArtMethod.
459   if (codegen_->GetGraph()->HasIrreducibleLoops() &&
460       (source == nullptr || !source->CoversSlow(source_position))) {
461     DCHECK(IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(defined_by));
462     if (defined_by->IsConstant()) {
463       location_source = defined_by->GetLocations()->Out();
464     } else {
465       DCHECK(defined_by->IsCurrentMethod());
466       switch (parent->NumberOfSpillSlotsNeeded()) {
467         case 1: location_source = Location::StackSlot(parent->GetSpillSlot()); break;
468         case 2: location_source = Location::DoubleStackSlot(parent->GetSpillSlot()); break;
469         case 4: location_source = Location::SIMDStackSlot(parent->GetSpillSlot()); break;
470         default: LOG(FATAL) << "Unexpected number of spill slots"; UNREACHABLE();
471       }
472     }
473   } else {
474     DCHECK(source != nullptr);
475     DCHECK(source->CoversSlow(source_position));
476     DCHECK(destination->CoversSlow(destination_position));
477     location_source = source->ToLocation();
478   }
479 
480   // If `from` has only one successor, we can put the moves at the exit of it. Otherwise
481   // we need to put the moves at the entry of `to`.
482   if (from->GetNormalSuccessors().size() == 1) {
483     InsertParallelMoveAtExitOf(from,
484                                defined_by,
485                                location_source,
486                                destination->ToLocation());
487   } else {
488     DCHECK_EQ(to->GetPredecessors().size(), 1u);
489     InsertParallelMoveAtEntryOf(to,
490                                 defined_by,
491                                 location_source,
492                                 destination->ToLocation());
493   }
494 }
495 
IsValidDestination(Location destination)496 static bool IsValidDestination(Location destination) {
497   return destination.IsRegister()
498       || destination.IsRegisterPair()
499       || destination.IsFpuRegister()
500       || destination.IsFpuRegisterPair()
501       || destination.IsStackSlot()
502       || destination.IsDoubleStackSlot()
503       || destination.IsSIMDStackSlot();
504 }
505 
AddMove(HParallelMove * move,Location source,Location destination,HInstruction * instruction,Primitive::Type type) const506 void RegisterAllocationResolver::AddMove(HParallelMove* move,
507                                          Location source,
508                                          Location destination,
509                                          HInstruction* instruction,
510                                          Primitive::Type type) const {
511   if (type == Primitive::kPrimLong
512       && codegen_->ShouldSplitLongMoves()
513       // The parallel move resolver knows how to deal with long constants.
514       && !source.IsConstant()) {
515     move->AddMove(source.ToLow(), destination.ToLow(), Primitive::kPrimInt, instruction);
516     move->AddMove(source.ToHigh(), destination.ToHigh(), Primitive::kPrimInt, nullptr);
517   } else {
518     move->AddMove(source, destination, type, instruction);
519   }
520 }
521 
AddInputMoveFor(HInstruction * input,HInstruction * user,Location source,Location destination) const522 void RegisterAllocationResolver::AddInputMoveFor(HInstruction* input,
523                                                  HInstruction* user,
524                                                  Location source,
525                                                  Location destination) const {
526   if (source.Equals(destination)) return;
527 
528   DCHECK(!user->IsPhi());
529 
530   HInstruction* previous = user->GetPrevious();
531   HParallelMove* move = nullptr;
532   if (previous == nullptr
533       || !previous->IsParallelMove()
534       || previous->GetLifetimePosition() < user->GetLifetimePosition()) {
535     move = new (allocator_) HParallelMove(allocator_);
536     move->SetLifetimePosition(user->GetLifetimePosition());
537     user->GetBlock()->InsertInstructionBefore(move, user);
538   } else {
539     move = previous->AsParallelMove();
540   }
541   DCHECK_EQ(move->GetLifetimePosition(), user->GetLifetimePosition());
542   AddMove(move, source, destination, nullptr, input->GetType());
543 }
544 
IsInstructionStart(size_t position)545 static bool IsInstructionStart(size_t position) {
546   return (position & 1) == 0;
547 }
548 
IsInstructionEnd(size_t position)549 static bool IsInstructionEnd(size_t position) {
550   return (position & 1) == 1;
551 }
552 
InsertParallelMoveAt(size_t position,HInstruction * instruction,Location source,Location destination) const553 void RegisterAllocationResolver::InsertParallelMoveAt(size_t position,
554                                                       HInstruction* instruction,
555                                                       Location source,
556                                                       Location destination) const {
557   DCHECK(IsValidDestination(destination)) << destination;
558   if (source.Equals(destination)) return;
559 
560   HInstruction* at = liveness_.GetInstructionFromPosition(position / 2);
561   HParallelMove* move;
562   if (at == nullptr) {
563     if (IsInstructionStart(position)) {
564       // Block boundary, don't do anything the connection of split siblings will handle it.
565       return;
566     } else {
567       // Move must happen before the first instruction of the block.
568       at = liveness_.GetInstructionFromPosition((position + 1) / 2);
569       // Note that parallel moves may have already been inserted, so we explicitly
570       // ask for the first instruction of the block: `GetInstructionFromPosition` does
571       // not contain the `HParallelMove` instructions.
572       at = at->GetBlock()->GetFirstInstruction();
573 
574       if (at->GetLifetimePosition() < position) {
575         // We may insert moves for split siblings and phi spills at the beginning of the block.
576         // Since this is a different lifetime position, we need to go to the next instruction.
577         DCHECK(at->IsParallelMove());
578         at = at->GetNext();
579       }
580 
581       if (at->GetLifetimePosition() != position) {
582         DCHECK_GT(at->GetLifetimePosition(), position);
583         move = new (allocator_) HParallelMove(allocator_);
584         move->SetLifetimePosition(position);
585         at->GetBlock()->InsertInstructionBefore(move, at);
586       } else {
587         DCHECK(at->IsParallelMove());
588         move = at->AsParallelMove();
589       }
590     }
591   } else if (IsInstructionEnd(position)) {
592     // Move must happen after the instruction.
593     DCHECK(!at->IsControlFlow());
594     move = at->GetNext()->AsParallelMove();
595     // This is a parallel move for connecting siblings in a same block. We need to
596     // differentiate it with moves for connecting blocks, and input moves.
597     if (move == nullptr || move->GetLifetimePosition() > position) {
598       move = new (allocator_) HParallelMove(allocator_);
599       move->SetLifetimePosition(position);
600       at->GetBlock()->InsertInstructionBefore(move, at->GetNext());
601     }
602   } else {
603     // Move must happen before the instruction.
604     HInstruction* previous = at->GetPrevious();
605     if (previous == nullptr
606         || !previous->IsParallelMove()
607         || previous->GetLifetimePosition() != position) {
608       // If the previous is a parallel move, then its position must be lower
609       // than the given `position`: it was added just after the non-parallel
610       // move instruction that precedes `instruction`.
611       DCHECK(previous == nullptr
612              || !previous->IsParallelMove()
613              || previous->GetLifetimePosition() < position);
614       move = new (allocator_) HParallelMove(allocator_);
615       move->SetLifetimePosition(position);
616       at->GetBlock()->InsertInstructionBefore(move, at);
617     } else {
618       move = previous->AsParallelMove();
619     }
620   }
621   DCHECK_EQ(move->GetLifetimePosition(), position);
622   AddMove(move, source, destination, instruction, instruction->GetType());
623 }
624 
InsertParallelMoveAtExitOf(HBasicBlock * block,HInstruction * instruction,Location source,Location destination) const625 void RegisterAllocationResolver::InsertParallelMoveAtExitOf(HBasicBlock* block,
626                                                             HInstruction* instruction,
627                                                             Location source,
628                                                             Location destination) const {
629   DCHECK(IsValidDestination(destination)) << destination;
630   if (source.Equals(destination)) return;
631 
632   DCHECK_EQ(block->GetNormalSuccessors().size(), 1u);
633   HInstruction* last = block->GetLastInstruction();
634   // We insert moves at exit for phi predecessors and connecting blocks.
635   // A block ending with an if or a packed switch cannot branch to a block
636   // with phis because we do not allow critical edges. It can also not connect
637   // a split interval between two blocks: the move has to happen in the successor.
638   DCHECK(!last->IsIf() && !last->IsPackedSwitch());
639   HInstruction* previous = last->GetPrevious();
640   HParallelMove* move;
641   // This is a parallel move for connecting blocks. We need to differentiate
642   // it with moves for connecting siblings in a same block, and output moves.
643   size_t position = last->GetLifetimePosition();
644   if (previous == nullptr || !previous->IsParallelMove()
645       || previous->AsParallelMove()->GetLifetimePosition() != position) {
646     move = new (allocator_) HParallelMove(allocator_);
647     move->SetLifetimePosition(position);
648     block->InsertInstructionBefore(move, last);
649   } else {
650     move = previous->AsParallelMove();
651   }
652   AddMove(move, source, destination, instruction, instruction->GetType());
653 }
654 
InsertParallelMoveAtEntryOf(HBasicBlock * block,HInstruction * instruction,Location source,Location destination) const655 void RegisterAllocationResolver::InsertParallelMoveAtEntryOf(HBasicBlock* block,
656                                                              HInstruction* instruction,
657                                                              Location source,
658                                                              Location destination) const {
659   DCHECK(IsValidDestination(destination)) << destination;
660   if (source.Equals(destination)) return;
661 
662   HInstruction* first = block->GetFirstInstruction();
663   HParallelMove* move = first->AsParallelMove();
664   size_t position = block->GetLifetimeStart();
665   // This is a parallel move for connecting blocks. We need to differentiate
666   // it with moves for connecting siblings in a same block, and input moves.
667   if (move == nullptr || move->GetLifetimePosition() != position) {
668     move = new (allocator_) HParallelMove(allocator_);
669     move->SetLifetimePosition(position);
670     block->InsertInstructionBefore(move, first);
671   }
672   AddMove(move, source, destination, instruction, instruction->GetType());
673 }
674 
InsertMoveAfter(HInstruction * instruction,Location source,Location destination) const675 void RegisterAllocationResolver::InsertMoveAfter(HInstruction* instruction,
676                                                  Location source,
677                                                  Location destination) const {
678   DCHECK(IsValidDestination(destination)) << destination;
679   if (source.Equals(destination)) return;
680 
681   if (instruction->IsPhi()) {
682     InsertParallelMoveAtEntryOf(instruction->GetBlock(), instruction, source, destination);
683     return;
684   }
685 
686   size_t position = instruction->GetLifetimePosition() + 1;
687   HParallelMove* move = instruction->GetNext()->AsParallelMove();
688   // This is a parallel move for moving the output of an instruction. We need
689   // to differentiate with input moves, moves for connecting siblings in a
690   // and moves for connecting blocks.
691   if (move == nullptr || move->GetLifetimePosition() != position) {
692     move = new (allocator_) HParallelMove(allocator_);
693     move->SetLifetimePosition(position);
694     instruction->GetBlock()->InsertInstructionBefore(move, instruction->GetNext());
695   }
696   AddMove(move, source, destination, instruction, instruction->GetType());
697 }
698 
699 }  // namespace art
700