• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_OPTIMIZING_LOCATIONS_H_
18 #define ART_COMPILER_OPTIMIZING_LOCATIONS_H_
19 
20 #include "base/arena_containers.h"
21 #include "base/arena_object.h"
22 #include "base/bit_field.h"
23 #include "base/bit_utils.h"
24 #include "base/bit_vector.h"
25 #include "base/macros.h"
26 #include "base/value_object.h"
27 
28 namespace art HIDDEN {
29 
30 class HConstant;
31 class HInstruction;
32 class Location;
33 
34 std::ostream& operator<<(std::ostream& os, const Location& location);
35 
36 /**
37  * A Location is an abstraction over the potential location
38  * of an instruction. It could be in register or stack.
39  */
40 class Location : public ValueObject {
41  public:
42   enum OutputOverlap {
43     // The liveness of the output overlaps the liveness of one or
44     // several input(s); the register allocator cannot reuse an
45     // input's location for the output's location.
46     kOutputOverlap,
47     // The liveness of the output does not overlap the liveness of any
48     // input; the register allocator is allowed to reuse an input's
49     // location for the output's location.
50     kNoOutputOverlap
51   };
52 
53   enum Kind {
54     kInvalid = 0,
55     kConstant = 1,
56     kStackSlot = 2,  // 32bit stack slot.
57     kDoubleStackSlot = 3,  // 64bit stack slot.
58 
59     kRegister = 4,  // Core register.
60 
61     // We do not use the value 5 because it conflicts with kLocationConstantMask.
62     kDoNotUse5 = 5,
63 
64     kFpuRegister = 6,  // Float register.
65 
66     kRegisterPair = 7,  // Long register.
67 
68     kFpuRegisterPair = 8,  // Double register.
69 
70     // We do not use the value 9 because it conflicts with kLocationConstantMask.
71     kDoNotUse9 = 9,
72 
73     kSIMDStackSlot = 10,  // 128bit stack slot. TODO: generalize with encoded #bytes?
74 
75     // Unallocated location represents a location that is not fixed and can be
76     // allocated by a register allocator.  Each unallocated location has
77     // a policy that specifies what kind of location is suitable. Payload
78     // contains register allocation policy.
79     kUnallocated = 11,
80   };
81 
Location()82   Location() : ValueObject(), value_(kInvalid) {
83     // Verify that non-constant location kinds do not interfere with kConstant.
84     static_assert((kInvalid & kLocationConstantMask) != kConstant, "TagError");
85     static_assert((kUnallocated & kLocationConstantMask) != kConstant, "TagError");
86     static_assert((kStackSlot & kLocationConstantMask) != kConstant, "TagError");
87     static_assert((kDoubleStackSlot & kLocationConstantMask) != kConstant, "TagError");
88     static_assert((kSIMDStackSlot & kLocationConstantMask) != kConstant, "TagError");
89     static_assert((kRegister & kLocationConstantMask) != kConstant, "TagError");
90     static_assert((kFpuRegister & kLocationConstantMask) != kConstant, "TagError");
91     static_assert((kRegisterPair & kLocationConstantMask) != kConstant, "TagError");
92     static_assert((kFpuRegisterPair & kLocationConstantMask) != kConstant, "TagError");
93     static_assert((kConstant & kLocationConstantMask) == kConstant, "TagError");
94 
95     DCHECK(!IsValid());
96   }
97 
98   Location(const Location& other) = default;
99 
100   Location& operator=(const Location& other) = default;
101 
IsConstant()102   bool IsConstant() const {
103     return (value_ & kLocationConstantMask) == kConstant;
104   }
105 
ConstantLocation(HInstruction * constant)106   static Location ConstantLocation(HInstruction* constant) {
107     DCHECK(constant != nullptr);
108     if (kIsDebugBuild) {
109       // Call out-of-line helper to avoid circular dependency with `nodes.h`.
110       DCheckInstructionIsConstant(constant);
111     }
112     return Location(kConstant | reinterpret_cast<uintptr_t>(constant));
113   }
114 
GetConstant()115   HConstant* GetConstant() const {
116     DCHECK(IsConstant());
117     return reinterpret_cast<HConstant*>(value_ & ~kLocationConstantMask);
118   }
119 
IsValid()120   bool IsValid() const {
121     return value_ != kInvalid;
122   }
123 
IsInvalid()124   bool IsInvalid() const {
125     return !IsValid();
126   }
127 
128   // Empty location. Used if there the location should be ignored.
NoLocation()129   static Location NoLocation() {
130     return Location();
131   }
132 
133   // Register locations.
RegisterLocation(int reg)134   static Location RegisterLocation(int reg) {
135     return Location(kRegister, reg);
136   }
137 
FpuRegisterLocation(int reg)138   static Location FpuRegisterLocation(int reg) {
139     return Location(kFpuRegister, reg);
140   }
141 
RegisterPairLocation(int low,int high)142   static Location RegisterPairLocation(int low, int high) {
143     return Location(kRegisterPair, low << 16 | high);
144   }
145 
FpuRegisterPairLocation(int low,int high)146   static Location FpuRegisterPairLocation(int low, int high) {
147     return Location(kFpuRegisterPair, low << 16 | high);
148   }
149 
IsRegister()150   bool IsRegister() const {
151     return GetKind() == kRegister;
152   }
153 
IsFpuRegister()154   bool IsFpuRegister() const {
155     return GetKind() == kFpuRegister;
156   }
157 
IsRegisterPair()158   bool IsRegisterPair() const {
159     return GetKind() == kRegisterPair;
160   }
161 
IsFpuRegisterPair()162   bool IsFpuRegisterPair() const {
163     return GetKind() == kFpuRegisterPair;
164   }
165 
IsRegisterKind()166   bool IsRegisterKind() const {
167     return IsRegister() || IsFpuRegister() || IsRegisterPair() || IsFpuRegisterPair();
168   }
169 
reg()170   int reg() const {
171     DCHECK(IsRegister() || IsFpuRegister());
172     return GetPayload();
173   }
174 
low()175   int low() const {
176     DCHECK(IsPair());
177     return GetPayload() >> 16;
178   }
179 
high()180   int high() const {
181     DCHECK(IsPair());
182     return GetPayload() & 0xFFFF;
183   }
184 
185   template <typename T>
AsRegister()186   T AsRegister() const {
187     DCHECK(IsRegister());
188     return static_cast<T>(reg());
189   }
190 
191   template <typename T>
AsFpuRegister()192   T AsFpuRegister() const {
193     DCHECK(IsFpuRegister());
194     return static_cast<T>(reg());
195   }
196 
197   template <typename T>
AsRegisterPairLow()198   T AsRegisterPairLow() const {
199     DCHECK(IsRegisterPair());
200     return static_cast<T>(low());
201   }
202 
203   template <typename T>
AsRegisterPairHigh()204   T AsRegisterPairHigh() const {
205     DCHECK(IsRegisterPair());
206     return static_cast<T>(high());
207   }
208 
209   template <typename T>
AsFpuRegisterPairLow()210   T AsFpuRegisterPairLow() const {
211     DCHECK(IsFpuRegisterPair());
212     return static_cast<T>(low());
213   }
214 
215   template <typename T>
AsFpuRegisterPairHigh()216   T AsFpuRegisterPairHigh() const {
217     DCHECK(IsFpuRegisterPair());
218     return static_cast<T>(high());
219   }
220 
IsPair()221   bool IsPair() const {
222     return IsRegisterPair() || IsFpuRegisterPair();
223   }
224 
ToLow()225   Location ToLow() const {
226     if (IsRegisterPair()) {
227       return Location::RegisterLocation(low());
228     } else if (IsFpuRegisterPair()) {
229       return Location::FpuRegisterLocation(low());
230     } else {
231       DCHECK(IsDoubleStackSlot());
232       return Location::StackSlot(GetStackIndex());
233     }
234   }
235 
ToHigh()236   Location ToHigh() const {
237     if (IsRegisterPair()) {
238       return Location::RegisterLocation(high());
239     } else if (IsFpuRegisterPair()) {
240       return Location::FpuRegisterLocation(high());
241     } else {
242       DCHECK(IsDoubleStackSlot());
243       return Location::StackSlot(GetHighStackIndex(4));
244     }
245   }
246 
EncodeStackIndex(intptr_t stack_index)247   static uintptr_t EncodeStackIndex(intptr_t stack_index) {
248     DCHECK(-kStackIndexBias <= stack_index);
249     DCHECK(stack_index < kStackIndexBias);
250     return static_cast<uintptr_t>(kStackIndexBias + stack_index);
251   }
252 
StackSlot(intptr_t stack_index)253   static Location StackSlot(intptr_t stack_index) {
254     uintptr_t payload = EncodeStackIndex(stack_index);
255     Location loc(kStackSlot, payload);
256     // Ensure that sign is preserved.
257     DCHECK_EQ(loc.GetStackIndex(), stack_index);
258     return loc;
259   }
260 
IsStackSlot()261   bool IsStackSlot() const {
262     return GetKind() == kStackSlot;
263   }
264 
DoubleStackSlot(intptr_t stack_index)265   static Location DoubleStackSlot(intptr_t stack_index) {
266     uintptr_t payload = EncodeStackIndex(stack_index);
267     Location loc(kDoubleStackSlot, payload);
268     // Ensure that sign is preserved.
269     DCHECK_EQ(loc.GetStackIndex(), stack_index);
270     return loc;
271   }
272 
IsDoubleStackSlot()273   bool IsDoubleStackSlot() const {
274     return GetKind() == kDoubleStackSlot;
275   }
276 
SIMDStackSlot(intptr_t stack_index)277   static Location SIMDStackSlot(intptr_t stack_index) {
278     uintptr_t payload = EncodeStackIndex(stack_index);
279     Location loc(kSIMDStackSlot, payload);
280     // Ensure that sign is preserved.
281     DCHECK_EQ(loc.GetStackIndex(), stack_index);
282     return loc;
283   }
284 
IsSIMDStackSlot()285   bool IsSIMDStackSlot() const {
286     return GetKind() == kSIMDStackSlot;
287   }
288 
StackSlotByNumOfSlots(size_t num_of_slots,int spill_slot)289   static Location StackSlotByNumOfSlots(size_t num_of_slots, int spill_slot) {
290     DCHECK_NE(num_of_slots, 0u);
291     switch (num_of_slots) {
292       case 1u:
293         return Location::StackSlot(spill_slot);
294       case 2u:
295         return Location::DoubleStackSlot(spill_slot);
296       default:
297         // Assume all other stack slot sizes correspond to SIMD slot size.
298         return Location::SIMDStackSlot(spill_slot);
299     }
300   }
301 
GetStackIndex()302   intptr_t GetStackIndex() const {
303     DCHECK(IsStackSlot() || IsDoubleStackSlot() || IsSIMDStackSlot());
304     // Decode stack index manually to preserve sign.
305     return GetPayload() - kStackIndexBias;
306   }
307 
GetHighStackIndex(uintptr_t word_size)308   intptr_t GetHighStackIndex(uintptr_t word_size) const {
309     DCHECK(IsDoubleStackSlot());
310     // Decode stack index manually to preserve sign.
311     return GetPayload() - kStackIndexBias + word_size;
312   }
313 
GetKind()314   Kind GetKind() const {
315     return IsConstant() ? kConstant : KindField::Decode(value_);
316   }
317 
Equals(Location other)318   bool Equals(Location other) const {
319     return value_ == other.value_;
320   }
321 
Contains(Location other)322   bool Contains(Location other) const {
323     if (Equals(other)) {
324       return true;
325     } else if (IsPair() || IsDoubleStackSlot()) {
326       return ToLow().Equals(other) || ToHigh().Equals(other);
327     }
328     return false;
329   }
330 
OverlapsWith(Location other)331   bool OverlapsWith(Location other) const {
332     // Only check the overlapping case that can happen with our register allocation algorithm.
333     bool overlap = Contains(other) || other.Contains(*this);
334     if (kIsDebugBuild && !overlap) {
335       // Note: These are also overlapping cases. But we are not able to handle them in
336       // ParallelMoveResolverWithSwap. Make sure that we do not meet such case with our compiler.
337       if ((IsPair() && other.IsPair()) || (IsDoubleStackSlot() && other.IsDoubleStackSlot())) {
338         DCHECK(!Contains(other.ToLow()));
339         DCHECK(!Contains(other.ToHigh()));
340       }
341     }
342     return overlap;
343   }
344 
DebugString()345   const char* DebugString() const {
346     switch (GetKind()) {
347       case kInvalid: return "I";
348       case kRegister: return "R";
349       case kStackSlot: return "S";
350       case kDoubleStackSlot: return "DS";
351       case kSIMDStackSlot: return "SIMD";
352       case kUnallocated: return "U";
353       case kConstant: return "C";
354       case kFpuRegister: return "F";
355       case kRegisterPair: return "RP";
356       case kFpuRegisterPair: return "FP";
357       case kDoNotUse5:  // fall-through
358       case kDoNotUse9:
359         LOG(FATAL) << "Should not use this location kind";
360     }
361     UNREACHABLE();
362   }
363 
364   // Unallocated locations.
365   enum Policy {
366     kAny,
367     kRequiresRegister,
368     kRequiresFpuRegister,
369     kSameAsFirstInput,
370   };
371 
IsUnallocated()372   bool IsUnallocated() const {
373     return GetKind() == kUnallocated;
374   }
375 
UnallocatedLocation(Policy policy)376   static Location UnallocatedLocation(Policy policy) {
377     return Location(kUnallocated, PolicyField::Encode(policy));
378   }
379 
380   // Any free register is suitable to replace this unallocated location.
Any()381   static Location Any() {
382     return UnallocatedLocation(kAny);
383   }
384 
RequiresRegister()385   static Location RequiresRegister() {
386     return UnallocatedLocation(kRequiresRegister);
387   }
388 
RequiresFpuRegister()389   static Location RequiresFpuRegister() {
390     return UnallocatedLocation(kRequiresFpuRegister);
391   }
392 
393   static Location RegisterOrConstant(HInstruction* instruction);
394   static Location RegisterOrInt32Constant(HInstruction* instruction);
395   static Location ByteRegisterOrConstant(int reg, HInstruction* instruction);
396   static Location FpuRegisterOrConstant(HInstruction* instruction);
397   static Location FpuRegisterOrInt32Constant(HInstruction* instruction);
398 
399   // The location of the first input to the instruction will be
400   // used to replace this unallocated location.
SameAsFirstInput()401   static Location SameAsFirstInput() {
402     return UnallocatedLocation(kSameAsFirstInput);
403   }
404 
GetPolicy()405   Policy GetPolicy() const {
406     DCHECK(IsUnallocated());
407     return PolicyField::Decode(GetPayload());
408   }
409 
RequiresRegisterKind()410   bool RequiresRegisterKind() const {
411     return GetPolicy() == kRequiresRegister || GetPolicy() == kRequiresFpuRegister;
412   }
413 
GetEncoding()414   uintptr_t GetEncoding() const {
415     return GetPayload();
416   }
417 
418  private:
419   // Number of bits required to encode Kind value.
420   static constexpr uint32_t kBitsForKind = 4;
421   static constexpr uint32_t kBitsForPayload = kBitsPerIntPtrT - kBitsForKind;
422   static constexpr uintptr_t kLocationConstantMask = 0x3;
423 
Location(uintptr_t value)424   explicit Location(uintptr_t value) : value_(value) {}
425 
Location(Kind kind,uintptr_t payload)426   Location(Kind kind, uintptr_t payload)
427       : value_(KindField::Encode(kind) | PayloadField::Encode(payload)) {}
428 
GetPayload()429   uintptr_t GetPayload() const {
430     return PayloadField::Decode(value_);
431   }
432 
433   static void DCheckInstructionIsConstant(HInstruction* instruction);
434 
435   using KindField = BitField<Kind, 0, kBitsForKind>;
436   using PayloadField = BitField<uintptr_t, kBitsForKind, kBitsForPayload>;
437 
438   // Layout for kUnallocated locations payload.
439   using PolicyField = BitField<Policy, 0, 3>;
440 
441   // Layout for stack slots.
442   static const intptr_t kStackIndexBias =
443       static_cast<intptr_t>(1) << (kBitsForPayload - 1);
444 
445   // Location either contains kind and payload fields or a tagged handle for
446   // a constant locations. Values of enumeration Kind are selected in such a
447   // way that none of them can be interpreted as a kConstant tag.
448   uintptr_t value_;
449 };
450 std::ostream& operator<<(std::ostream& os, Location::Kind rhs);
451 std::ostream& operator<<(std::ostream& os, Location::Policy rhs);
452 
453 class RegisterSet : public ValueObject {
454  public:
Empty()455   static RegisterSet Empty() { return RegisterSet(); }
AllFpu()456   static RegisterSet AllFpu() { return RegisterSet(0, -1); }
457 
Add(Location loc)458   void Add(Location loc) {
459     if (loc.IsRegister()) {
460       core_registers_ |= (1 << loc.reg());
461     } else {
462       DCHECK(loc.IsFpuRegister());
463       floating_point_registers_ |= (1 << loc.reg());
464     }
465   }
466 
Remove(Location loc)467   void Remove(Location loc) {
468     if (loc.IsRegister()) {
469       core_registers_ &= ~(1 << loc.reg());
470     } else {
471       DCHECK(loc.IsFpuRegister()) << loc;
472       floating_point_registers_ &= ~(1 << loc.reg());
473     }
474   }
475 
ContainsCoreRegister(uint32_t id)476   bool ContainsCoreRegister(uint32_t id) const {
477     return Contains(core_registers_, id);
478   }
479 
ContainsFloatingPointRegister(uint32_t id)480   bool ContainsFloatingPointRegister(uint32_t id) const {
481     return Contains(floating_point_registers_, id);
482   }
483 
Contains(uint32_t register_set,uint32_t reg)484   static bool Contains(uint32_t register_set, uint32_t reg) {
485     return (register_set & (1 << reg)) != 0;
486   }
487 
OverlapsRegisters(Location out)488   bool OverlapsRegisters(Location out) {
489     DCHECK(out.IsRegisterKind());
490     switch (out.GetKind()) {
491       case Location::Kind::kRegister:
492         return ContainsCoreRegister(out.reg());
493       case Location::Kind::kFpuRegister:
494         return ContainsFloatingPointRegister(out.reg());
495       case Location::Kind::kRegisterPair:
496         return ContainsCoreRegister(out.low()) || ContainsCoreRegister(out.high());
497       case Location::Kind::kFpuRegisterPair:
498         return ContainsFloatingPointRegister(out.low()) ||
499                ContainsFloatingPointRegister(out.high());
500       default:
501         return false;
502     }
503   }
504 
GetNumberOfRegisters()505   size_t GetNumberOfRegisters() const {
506     return POPCOUNT(core_registers_) + POPCOUNT(floating_point_registers_);
507   }
508 
GetCoreRegisters()509   uint32_t GetCoreRegisters() const {
510     return core_registers_;
511   }
512 
GetFloatingPointRegisters()513   uint32_t GetFloatingPointRegisters() const {
514     return floating_point_registers_;
515   }
516 
517  private:
RegisterSet()518   RegisterSet() : core_registers_(0), floating_point_registers_(0) {}
RegisterSet(uint32_t core,uint32_t fp)519   RegisterSet(uint32_t core, uint32_t fp) : core_registers_(core), floating_point_registers_(fp) {}
520 
521   uint32_t core_registers_;
522   uint32_t floating_point_registers_;
523 };
524 
525 static constexpr bool kIntrinsified = true;
526 
527 /**
528  * The code generator computes LocationSummary for each instruction so that
529  * the instruction itself knows what code to generate: where to find the inputs
530  * and where to place the result.
531  *
532  * The intent is to have the code for generating the instruction independent of
533  * register allocation. A register allocator just has to provide a LocationSummary.
534  */
535 class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> {
536  public:
537   enum CallKind {
538     kNoCall,
539     kCallOnMainAndSlowPath,
540     kCallOnSlowPath,
541     kCallOnMainOnly
542   };
543 
544   explicit LocationSummary(HInstruction* instruction,
545                            CallKind call_kind = kNoCall,
546                            bool intrinsified = false);
547 
SetInAt(uint32_t at,Location location)548   void SetInAt(uint32_t at, Location location) {
549     inputs_[at] = location;
550   }
551 
InAt(uint32_t at)552   Location InAt(uint32_t at) const {
553     return inputs_[at];
554   }
555 
GetInputCount()556   size_t GetInputCount() const {
557     return inputs_.size();
558   }
559 
560   // Set the output location.  Argument `overlaps` tells whether the
561   // output overlaps any of the inputs (if so, it cannot share the
562   // same register as one of the inputs); it is set to
563   // `Location::kOutputOverlap` by default for safety.
564   void SetOut(Location location, Location::OutputOverlap overlaps = Location::kOutputOverlap) {
565     DCHECK(output_.IsInvalid());
566     output_overlaps_ = overlaps;
567     output_ = location;
568   }
569 
UpdateOut(Location location)570   void UpdateOut(Location location) {
571     // There are two reasons for updating an output:
572     // 1) Parameters, where we only know the exact stack slot after
573     //    doing full register allocation.
574     // 2) Unallocated location.
575     DCHECK(output_.IsStackSlot() || output_.IsDoubleStackSlot() || output_.IsUnallocated());
576     output_ = location;
577   }
578 
AddTemp(Location location)579   void AddTemp(Location location) {
580     temps_.push_back(location);
581   }
582 
AddRegisterTemps(size_t count)583   void AddRegisterTemps(size_t count) {
584     for (size_t i = 0; i < count; ++i) {
585       AddTemp(Location::RequiresRegister());
586     }
587   }
588 
GetTemp(uint32_t at)589   Location GetTemp(uint32_t at) const {
590     return temps_[at];
591   }
592 
SetTempAt(uint32_t at,Location location)593   void SetTempAt(uint32_t at, Location location) {
594     DCHECK(temps_[at].IsUnallocated() || temps_[at].IsInvalid());
595     temps_[at] = location;
596   }
597 
GetTempCount()598   size_t GetTempCount() const {
599     return temps_.size();
600   }
601 
HasTemps()602   bool HasTemps() const { return !temps_.empty(); }
603 
Out()604   Location Out() const { return output_; }
605 
CanCall()606   bool CanCall() const {
607     return call_kind_ != kNoCall;
608   }
609 
WillCall()610   bool WillCall() const {
611     return call_kind_ == kCallOnMainOnly || call_kind_ == kCallOnMainAndSlowPath;
612   }
613 
CallsOnSlowPath()614   bool CallsOnSlowPath() const {
615     return OnlyCallsOnSlowPath() || CallsOnMainAndSlowPath();
616   }
617 
OnlyCallsOnSlowPath()618   bool OnlyCallsOnSlowPath() const {
619     return call_kind_ == kCallOnSlowPath;
620   }
621 
NeedsSuspendCheckEntry()622   bool NeedsSuspendCheckEntry() const {
623     // Slow path calls do not need a SuspendCheck at method entry since they go into the runtime,
624     // which we expect to either do a suspend check or return quickly.
625     return WillCall();
626   }
627 
CallsOnMainAndSlowPath()628   bool CallsOnMainAndSlowPath() const {
629     return call_kind_ == kCallOnMainAndSlowPath;
630   }
631 
NeedsSafepoint()632   bool NeedsSafepoint() const {
633     return CanCall();
634   }
635 
SetCustomSlowPathCallerSaves(const RegisterSet & caller_saves)636   void SetCustomSlowPathCallerSaves(const RegisterSet& caller_saves) {
637     DCHECK(OnlyCallsOnSlowPath());
638     has_custom_slow_path_calling_convention_ = true;
639     custom_slow_path_caller_saves_ = caller_saves;
640   }
641 
HasCustomSlowPathCallingConvention()642   bool HasCustomSlowPathCallingConvention() const {
643     return has_custom_slow_path_calling_convention_;
644   }
645 
GetCustomSlowPathCallerSaves()646   const RegisterSet& GetCustomSlowPathCallerSaves() const {
647     DCHECK(HasCustomSlowPathCallingConvention());
648     return custom_slow_path_caller_saves_;
649   }
650 
SetStackBit(uint32_t index)651   void SetStackBit(uint32_t index) {
652     stack_mask_->SetBit(index);
653   }
654 
ClearStackBit(uint32_t index)655   void ClearStackBit(uint32_t index) {
656     stack_mask_->ClearBit(index);
657   }
658 
SetRegisterBit(uint32_t reg_id)659   void SetRegisterBit(uint32_t reg_id) {
660     register_mask_ |= (1 << reg_id);
661   }
662 
GetRegisterMask()663   uint32_t GetRegisterMask() const {
664     return register_mask_;
665   }
666 
RegisterContainsObject(uint32_t reg_id)667   bool RegisterContainsObject(uint32_t reg_id) {
668     return RegisterSet::Contains(register_mask_, reg_id);
669   }
670 
AddLiveRegister(Location location)671   void AddLiveRegister(Location location) {
672     live_registers_.Add(location);
673   }
674 
GetStackMask()675   BitVector* GetStackMask() const {
676     return stack_mask_;
677   }
678 
GetLiveRegisters()679   RegisterSet* GetLiveRegisters() {
680     return &live_registers_;
681   }
682 
GetNumberOfLiveRegisters()683   size_t GetNumberOfLiveRegisters() const {
684     return live_registers_.GetNumberOfRegisters();
685   }
686 
OutputUsesSameAs(uint32_t input_index)687   bool OutputUsesSameAs(uint32_t input_index) const {
688     return (input_index == 0)
689         && output_.IsUnallocated()
690         && (output_.GetPolicy() == Location::kSameAsFirstInput);
691   }
692 
IsFixedInput(uint32_t input_index)693   bool IsFixedInput(uint32_t input_index) const {
694     Location input = inputs_[input_index];
695     return input.IsRegister()
696         || input.IsFpuRegister()
697         || input.IsPair()
698         || input.IsStackSlot()
699         || input.IsDoubleStackSlot();
700   }
701 
OutputCanOverlapWithInputs()702   bool OutputCanOverlapWithInputs() const {
703     return output_overlaps_ == Location::kOutputOverlap;
704   }
705 
Intrinsified()706   bool Intrinsified() const {
707     return intrinsified_;
708   }
709 
710  private:
711   LocationSummary(HInstruction* instruction,
712                   CallKind call_kind,
713                   bool intrinsified,
714                   ArenaAllocator* allocator);
715 
716   ArenaVector<Location> inputs_;
717   ArenaVector<Location> temps_;
718   const CallKind call_kind_;
719   // Whether these are locations for an intrinsified call.
720   const bool intrinsified_;
721   // Whether the slow path has default or custom calling convention.
722   bool has_custom_slow_path_calling_convention_;
723   // Whether the output overlaps with any of the inputs. If it overlaps, then it cannot
724   // share the same register as the inputs.
725   Location::OutputOverlap output_overlaps_;
726   Location output_;
727 
728   // Mask of objects that live in the stack.
729   BitVector* stack_mask_;
730 
731   // Mask of objects that live in register.
732   uint32_t register_mask_;
733 
734   // Registers that are in use at this position.
735   RegisterSet live_registers_;
736 
737   // Custom slow path caller saves. Valid only if indicated by slow_path_calling_convention_.
738   RegisterSet custom_slow_path_caller_saves_;
739 
740   friend class RegisterAllocatorTest;
741   DISALLOW_COPY_AND_ASSIGN(LocationSummary);
742 };
743 
744 }  // namespace art
745 
746 #endif  // ART_COMPILER_OPTIMIZING_LOCATIONS_H_
747