• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_OPTIMIZING_LOCATIONS_H_
18 #define ART_COMPILER_OPTIMIZING_LOCATIONS_H_
19 
20 #include "base/arena_containers.h"
21 #include "base/arena_object.h"
22 #include "base/array_ref.h"
23 #include "base/bit_field.h"
24 #include "base/bit_utils.h"
25 #include "base/bit_vector.h"
26 #include "base/macros.h"
27 #include "base/value_object.h"
28 
29 namespace art HIDDEN {
30 
31 class HConstant;
32 class HInstruction;
33 class Location;
34 
35 std::ostream& operator<<(std::ostream& os, const Location& location);
36 
37 /**
38  * A Location is an abstraction over the potential location
39  * of an instruction. It could be in register or stack.
40  */
41 class Location : public ValueObject {
42  public:
43   enum OutputOverlap : uint8_t {
44     // The liveness of the output overlaps the liveness of one or
45     // several input(s); the register allocator cannot reuse an
46     // input's location for the output's location.
47     kOutputOverlap,
48     // The liveness of the output does not overlap the liveness of any
49     // input; the register allocator is allowed to reuse an input's
50     // location for the output's location.
51     kNoOutputOverlap
52   };
53 
54   enum Kind {
55     kInvalid = 0,
56     kConstant = 1,
57     kStackSlot = 2,  // 32bit stack slot.
58     kDoubleStackSlot = 3,  // 64bit stack slot.
59 
60     kRegister = 4,  // Core register.
61 
62     // We do not use the value 5 because it conflicts with kLocationConstantMask.
63     kDoNotUse5 = 5,
64 
65     kFpuRegister = 6,  // Float register.
66 
67     kRegisterPair = 7,  // Long register.
68 
69     kFpuRegisterPair = 8,  // Double register.
70 
71     // We do not use the value 9 because it conflicts with kLocationConstantMask.
72     kDoNotUse9 = 9,
73 
74     kSIMDStackSlot = 10,  // 128bit stack slot. TODO: generalize with encoded #bytes?
75 
76     // Unallocated location represents a location that is not fixed and can be
77     // allocated by a register allocator.  Each unallocated location has
78     // a policy that specifies what kind of location is suitable. Payload
79     // contains register allocation policy.
80     kUnallocated = 11,
81   };
82 
Location()83   constexpr Location() : ValueObject(), value_(kInvalid) {
84     // Verify that non-constant location kinds do not interfere with kConstant.
85     static_assert((kInvalid & kLocationConstantMask) != kConstant, "TagError");
86     static_assert((kUnallocated & kLocationConstantMask) != kConstant, "TagError");
87     static_assert((kStackSlot & kLocationConstantMask) != kConstant, "TagError");
88     static_assert((kDoubleStackSlot & kLocationConstantMask) != kConstant, "TagError");
89     static_assert((kSIMDStackSlot & kLocationConstantMask) != kConstant, "TagError");
90     static_assert((kRegister & kLocationConstantMask) != kConstant, "TagError");
91     static_assert((kFpuRegister & kLocationConstantMask) != kConstant, "TagError");
92     static_assert((kRegisterPair & kLocationConstantMask) != kConstant, "TagError");
93     static_assert((kFpuRegisterPair & kLocationConstantMask) != kConstant, "TagError");
94     static_assert((kConstant & kLocationConstantMask) == kConstant, "TagError");
95 
96     DCHECK(!IsValid());
97   }
98 
99   constexpr Location(const Location& other) = default;
100 
101   Location& operator=(const Location& other) = default;
102 
IsConstant()103   bool IsConstant() const {
104     return (value_ & kLocationConstantMask) == kConstant;
105   }
106 
ConstantLocation(HInstruction * constant)107   static Location ConstantLocation(HInstruction* constant) {
108     DCHECK(constant != nullptr);
109     if (kIsDebugBuild) {
110       // Call out-of-line helper to avoid circular dependency with `nodes.h`.
111       DCheckInstructionIsConstant(constant);
112     }
113     return Location(kConstant | reinterpret_cast<uintptr_t>(constant));
114   }
115 
GetConstant()116   HConstant* GetConstant() const {
117     DCHECK(IsConstant());
118     return reinterpret_cast<HConstant*>(value_ & ~kLocationConstantMask);
119   }
120 
IsValid()121   bool IsValid() const {
122     return value_ != kInvalid;
123   }
124 
IsInvalid()125   bool IsInvalid() const {
126     return !IsValid();
127   }
128 
129   // Empty location. Used if there the location should be ignored.
NoLocation()130   static constexpr Location NoLocation() {
131     return Location();
132   }
133 
134   // Register locations.
RegisterLocation(int reg)135   static constexpr Location RegisterLocation(int reg) {
136     return Location(kRegister, reg);
137   }
138 
FpuRegisterLocation(int reg)139   static constexpr Location FpuRegisterLocation(int reg) {
140     return Location(kFpuRegister, reg);
141   }
142 
RegisterPairLocation(int low,int high)143   static constexpr Location RegisterPairLocation(int low, int high) {
144     return Location(kRegisterPair, low << 16 | high);
145   }
146 
FpuRegisterPairLocation(int low,int high)147   static constexpr Location FpuRegisterPairLocation(int low, int high) {
148     return Location(kFpuRegisterPair, low << 16 | high);
149   }
150 
IsRegister()151   bool IsRegister() const {
152     return GetKind() == kRegister;
153   }
154 
IsFpuRegister()155   bool IsFpuRegister() const {
156     return GetKind() == kFpuRegister;
157   }
158 
IsRegisterPair()159   bool IsRegisterPair() const {
160     return GetKind() == kRegisterPair;
161   }
162 
IsFpuRegisterPair()163   bool IsFpuRegisterPair() const {
164     return GetKind() == kFpuRegisterPair;
165   }
166 
IsRegisterKind()167   bool IsRegisterKind() const {
168     return IsRegister() || IsFpuRegister() || IsRegisterPair() || IsFpuRegisterPair();
169   }
170 
reg()171   int reg() const {
172     DCHECK(IsRegister() || IsFpuRegister());
173     return GetPayload();
174   }
175 
low()176   int low() const {
177     DCHECK(IsPair());
178     return GetPayload() >> 16;
179   }
180 
high()181   int high() const {
182     DCHECK(IsPair());
183     return GetPayload() & 0xFFFF;
184   }
185 
186   template <typename T>
AsRegister()187   T AsRegister() const {
188     DCHECK(IsRegister());
189     return static_cast<T>(reg());
190   }
191 
192   template <typename T>
AsFpuRegister()193   T AsFpuRegister() const {
194     DCHECK(IsFpuRegister());
195     return static_cast<T>(reg());
196   }
197 
198   template <typename T>
AsRegisterPairLow()199   T AsRegisterPairLow() const {
200     DCHECK(IsRegisterPair());
201     return static_cast<T>(low());
202   }
203 
204   template <typename T>
AsRegisterPairHigh()205   T AsRegisterPairHigh() const {
206     DCHECK(IsRegisterPair());
207     return static_cast<T>(high());
208   }
209 
210   template <typename T>
AsFpuRegisterPairLow()211   T AsFpuRegisterPairLow() const {
212     DCHECK(IsFpuRegisterPair());
213     return static_cast<T>(low());
214   }
215 
216   template <typename T>
AsFpuRegisterPairHigh()217   T AsFpuRegisterPairHigh() const {
218     DCHECK(IsFpuRegisterPair());
219     return static_cast<T>(high());
220   }
221 
IsPair()222   bool IsPair() const {
223     return IsRegisterPair() || IsFpuRegisterPair();
224   }
225 
ToLow()226   Location ToLow() const {
227     if (IsRegisterPair()) {
228       return Location::RegisterLocation(low());
229     } else if (IsFpuRegisterPair()) {
230       return Location::FpuRegisterLocation(low());
231     } else {
232       DCHECK(IsDoubleStackSlot());
233       return Location::StackSlot(GetStackIndex());
234     }
235   }
236 
ToHigh()237   Location ToHigh() const {
238     if (IsRegisterPair()) {
239       return Location::RegisterLocation(high());
240     } else if (IsFpuRegisterPair()) {
241       return Location::FpuRegisterLocation(high());
242     } else {
243       DCHECK(IsDoubleStackSlot());
244       return Location::StackSlot(GetHighStackIndex(4));
245     }
246   }
247 
EncodeStackIndex(intptr_t stack_index)248   static uintptr_t EncodeStackIndex(intptr_t stack_index) {
249     DCHECK(-kStackIndexBias <= stack_index);
250     DCHECK(stack_index < kStackIndexBias);
251     return static_cast<uintptr_t>(kStackIndexBias + stack_index);
252   }
253 
StackSlot(intptr_t stack_index)254   static Location StackSlot(intptr_t stack_index) {
255     uintptr_t payload = EncodeStackIndex(stack_index);
256     Location loc(kStackSlot, payload);
257     // Ensure that sign is preserved.
258     DCHECK_EQ(loc.GetStackIndex(), stack_index);
259     return loc;
260   }
261 
IsStackSlot()262   bool IsStackSlot() const {
263     return GetKind() == kStackSlot;
264   }
265 
DoubleStackSlot(intptr_t stack_index)266   static Location DoubleStackSlot(intptr_t stack_index) {
267     uintptr_t payload = EncodeStackIndex(stack_index);
268     Location loc(kDoubleStackSlot, payload);
269     // Ensure that sign is preserved.
270     DCHECK_EQ(loc.GetStackIndex(), stack_index);
271     return loc;
272   }
273 
IsDoubleStackSlot()274   bool IsDoubleStackSlot() const {
275     return GetKind() == kDoubleStackSlot;
276   }
277 
SIMDStackSlot(intptr_t stack_index)278   static Location SIMDStackSlot(intptr_t stack_index) {
279     uintptr_t payload = EncodeStackIndex(stack_index);
280     Location loc(kSIMDStackSlot, payload);
281     // Ensure that sign is preserved.
282     DCHECK_EQ(loc.GetStackIndex(), stack_index);
283     return loc;
284   }
285 
IsSIMDStackSlot()286   bool IsSIMDStackSlot() const {
287     return GetKind() == kSIMDStackSlot;
288   }
289 
StackSlotByNumOfSlots(size_t num_of_slots,int spill_slot)290   static Location StackSlotByNumOfSlots(size_t num_of_slots, int spill_slot) {
291     DCHECK_NE(num_of_slots, 0u);
292     switch (num_of_slots) {
293       case 1u:
294         return Location::StackSlot(spill_slot);
295       case 2u:
296         return Location::DoubleStackSlot(spill_slot);
297       default:
298         // Assume all other stack slot sizes correspond to SIMD slot size.
299         return Location::SIMDStackSlot(spill_slot);
300     }
301   }
302 
GetStackIndex()303   intptr_t GetStackIndex() const {
304     DCHECK(IsStackSlot() || IsDoubleStackSlot() || IsSIMDStackSlot());
305     // Decode stack index manually to preserve sign.
306     return GetPayload() - kStackIndexBias;
307   }
308 
GetHighStackIndex(uintptr_t word_size)309   intptr_t GetHighStackIndex(uintptr_t word_size) const {
310     DCHECK(IsDoubleStackSlot());
311     // Decode stack index manually to preserve sign.
312     return GetPayload() - kStackIndexBias + word_size;
313   }
314 
GetKind()315   Kind GetKind() const {
316     return IsConstant() ? kConstant : KindField::Decode(value_);
317   }
318 
Equals(Location other)319   bool Equals(Location other) const {
320     return value_ == other.value_;
321   }
322 
Contains(Location other)323   bool Contains(Location other) const {
324     if (Equals(other)) {
325       return true;
326     } else if (IsPair() || IsDoubleStackSlot()) {
327       return ToLow().Equals(other) || ToHigh().Equals(other);
328     }
329     return false;
330   }
331 
OverlapsWith(Location other)332   bool OverlapsWith(Location other) const {
333     // Only check the overlapping case that can happen with our register allocation algorithm.
334     bool overlap = Contains(other) || other.Contains(*this);
335     if (kIsDebugBuild && !overlap) {
336       // Note: These are also overlapping cases. But we are not able to handle them in
337       // ParallelMoveResolverWithSwap. Make sure that we do not meet such case with our compiler.
338       if ((IsPair() && other.IsPair()) || (IsDoubleStackSlot() && other.IsDoubleStackSlot())) {
339         DCHECK(!Contains(other.ToLow()));
340         DCHECK(!Contains(other.ToHigh()));
341       }
342     }
343     return overlap;
344   }
345 
DebugString()346   const char* DebugString() const {
347     switch (GetKind()) {
348       case kInvalid: return "I";
349       case kRegister: return "R";
350       case kStackSlot: return "S";
351       case kDoubleStackSlot: return "DS";
352       case kSIMDStackSlot: return "SIMD";
353       case kUnallocated: return "U";
354       case kConstant: return "C";
355       case kFpuRegister: return "F";
356       case kRegisterPair: return "RP";
357       case kFpuRegisterPair: return "FP";
358       case kDoNotUse5:  // fall-through
359       case kDoNotUse9:
360         LOG(FATAL) << "Should not use this location kind";
361     }
362     UNREACHABLE();
363   }
364 
365   // Unallocated locations.
366   enum Policy {
367     kAny,
368     kRequiresRegister,
369     kRequiresFpuRegister,
370     kSameAsFirstInput,
371   };
372 
IsUnallocated()373   bool IsUnallocated() const {
374     return GetKind() == kUnallocated;
375   }
376 
UnallocatedLocation(Policy policy)377   static Location UnallocatedLocation(Policy policy) {
378     return Location(kUnallocated, PolicyField::Encode(policy));
379   }
380 
381   // Any free register is suitable to replace this unallocated location.
Any()382   static Location Any() {
383     return UnallocatedLocation(kAny);
384   }
385 
RequiresRegister()386   static Location RequiresRegister() {
387     return UnallocatedLocation(kRequiresRegister);
388   }
389 
RequiresFpuRegister()390   static Location RequiresFpuRegister() {
391     return UnallocatedLocation(kRequiresFpuRegister);
392   }
393 
394   static Location RegisterOrConstant(HInstruction* instruction);
395   static Location RegisterOrInt32Constant(HInstruction* instruction);
396   static Location ByteRegisterOrConstant(int reg, HInstruction* instruction);
397   static Location FpuRegisterOrConstant(HInstruction* instruction);
398   static Location FpuRegisterOrInt32Constant(HInstruction* instruction);
399 
400   // The location of the first input to the instruction will be
401   // used to replace this unallocated location.
SameAsFirstInput()402   static Location SameAsFirstInput() {
403     return UnallocatedLocation(kSameAsFirstInput);
404   }
405 
GetPolicy()406   Policy GetPolicy() const {
407     DCHECK(IsUnallocated());
408     return PolicyField::Decode(GetPayload());
409   }
410 
RequiresRegisterKind()411   bool RequiresRegisterKind() const {
412     return GetPolicy() == kRequiresRegister || GetPolicy() == kRequiresFpuRegister;
413   }
414 
GetEncoding()415   uintptr_t GetEncoding() const {
416     return GetPayload();
417   }
418 
419  private:
420   // Number of bits required to encode Kind value.
421   static constexpr uint32_t kBitsForKind = 4;
422   static constexpr uint32_t kBitsForPayload = kBitsPerIntPtrT - kBitsForKind;
423   static constexpr uintptr_t kLocationConstantMask = 0x3;
424 
Location(uintptr_t value)425   explicit Location(uintptr_t value) : value_(value) {}
426 
Location(Kind kind,uintptr_t payload)427   constexpr Location(Kind kind, uintptr_t payload)
428       : value_(KindField::Encode(kind) | PayloadField::Encode(payload)) {}
429 
GetPayload()430   uintptr_t GetPayload() const {
431     return PayloadField::Decode(value_);
432   }
433 
434   static void DCheckInstructionIsConstant(HInstruction* instruction);
435 
436   using KindField = BitField<Kind, 0, kBitsForKind>;
437   using PayloadField = BitField<uintptr_t, kBitsForKind, kBitsForPayload>;
438 
439   // Layout for kUnallocated locations payload.
440   using PolicyField = BitField<Policy, 0, 3>;
441 
442   // Layout for stack slots.
443   static const intptr_t kStackIndexBias =
444       static_cast<intptr_t>(1) << (kBitsForPayload - 1);
445 
446   // Location either contains kind and payload fields or a tagged handle for
447   // a constant locations. Values of enumeration Kind are selected in such a
448   // way that none of them can be interpreted as a kConstant tag.
449   uintptr_t value_;
450 };
451 std::ostream& operator<<(std::ostream& os, Location::Kind rhs);
452 std::ostream& operator<<(std::ostream& os, Location::Policy rhs);
453 
454 class RegisterSet : public ValueObject {
455  public:
Empty()456   static RegisterSet Empty() { return RegisterSet(); }
AllFpu()457   static RegisterSet AllFpu() { return RegisterSet(0, -1); }
458 
Add(Location loc)459   void Add(Location loc) {
460     if (loc.IsRegister()) {
461       core_registers_ |= (1 << loc.reg());
462     } else {
463       DCHECK(loc.IsFpuRegister());
464       floating_point_registers_ |= (1 << loc.reg());
465     }
466   }
467 
Remove(Location loc)468   void Remove(Location loc) {
469     if (loc.IsRegister()) {
470       core_registers_ &= ~(1 << loc.reg());
471     } else {
472       DCHECK(loc.IsFpuRegister()) << loc;
473       floating_point_registers_ &= ~(1 << loc.reg());
474     }
475   }
476 
ContainsCoreRegister(uint32_t id)477   bool ContainsCoreRegister(uint32_t id) const {
478     return Contains(core_registers_, id);
479   }
480 
ContainsFloatingPointRegister(uint32_t id)481   bool ContainsFloatingPointRegister(uint32_t id) const {
482     return Contains(floating_point_registers_, id);
483   }
484 
Contains(uint32_t register_set,uint32_t reg)485   static bool Contains(uint32_t register_set, uint32_t reg) {
486     return (register_set & (1 << reg)) != 0;
487   }
488 
OverlapsRegisters(Location out)489   bool OverlapsRegisters(Location out) {
490     DCHECK(out.IsRegisterKind());
491     switch (out.GetKind()) {
492       case Location::Kind::kRegister:
493         return ContainsCoreRegister(out.reg());
494       case Location::Kind::kFpuRegister:
495         return ContainsFloatingPointRegister(out.reg());
496       case Location::Kind::kRegisterPair:
497         return ContainsCoreRegister(out.low()) || ContainsCoreRegister(out.high());
498       case Location::Kind::kFpuRegisterPair:
499         return ContainsFloatingPointRegister(out.low()) ||
500                ContainsFloatingPointRegister(out.high());
501       default:
502         return false;
503     }
504   }
505 
GetNumberOfRegisters()506   size_t GetNumberOfRegisters() const {
507     return POPCOUNT(core_registers_) + POPCOUNT(floating_point_registers_);
508   }
509 
GetCoreRegisters()510   uint32_t GetCoreRegisters() const {
511     return core_registers_;
512   }
513 
GetFloatingPointRegisters()514   uint32_t GetFloatingPointRegisters() const {
515     return floating_point_registers_;
516   }
517 
518  private:
RegisterSet()519   RegisterSet() : core_registers_(0), floating_point_registers_(0) {}
RegisterSet(uint32_t core,uint32_t fp)520   RegisterSet(uint32_t core, uint32_t fp) : core_registers_(core), floating_point_registers_(fp) {}
521 
522   uint32_t core_registers_;
523   uint32_t floating_point_registers_;
524 };
525 
526 static constexpr bool kIntrinsified = true;
527 
528 /**
529  * The code generator computes LocationSummary for each instruction so that
530  * the instruction itself knows what code to generate: where to find the inputs
531  * and where to place the result.
532  *
533  * The intent is to have the code for generating the instruction independent of
534  * register allocation. A register allocator just has to provide a LocationSummary.
535  */
536 class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> {
537  public:
538   enum CallKind : uint8_t {
539     kNoCall,
540     kCallOnMainAndSlowPath,
541     kCallOnSlowPath,
542     kCallOnMainOnly
543   };
544 
545   explicit LocationSummary(HInstruction* instruction,
546                            CallKind call_kind = kNoCall,
547                            bool intrinsified = false);
548 
SetInAt(uint32_t at,Location location)549   void SetInAt(uint32_t at, Location location) {
550     inputs_[at] = location;
551   }
552 
InAt(uint32_t at)553   Location InAt(uint32_t at) const {
554     return inputs_[at];
555   }
556 
GetInputCount()557   size_t GetInputCount() const {
558     return inputs_.size();
559   }
560 
561   // Set the output location.  Argument `overlaps` tells whether the
562   // output overlaps any of the inputs (if so, it cannot share the
563   // same register as one of the inputs); it is set to
564   // `Location::kOutputOverlap` by default for safety.
565   void SetOut(Location location, Location::OutputOverlap overlaps = Location::kOutputOverlap) {
566     DCHECK(output_.IsInvalid());
567     output_overlaps_ = overlaps;
568     output_ = location;
569   }
570 
UpdateOut(Location location)571   void UpdateOut(Location location) {
572     // There are two reasons for updating an output:
573     // 1) Parameters, where we only know the exact stack slot after
574     //    doing full register allocation.
575     // 2) Unallocated location.
576     DCHECK(output_.IsStackSlot() || output_.IsDoubleStackSlot() || output_.IsUnallocated());
577     output_ = location;
578   }
579 
AddTemp(Location location)580   void AddTemp(Location location) {
581     temps_.push_back(location);
582   }
583 
AddRegisterTemps(size_t count)584   void AddRegisterTemps(size_t count) {
585     for (size_t i = 0; i < count; ++i) {
586       AddTemp(Location::RequiresRegister());
587     }
588   }
589 
GetTemp(uint32_t at)590   Location GetTemp(uint32_t at) const {
591     return temps_[at];
592   }
593 
SetTempAt(uint32_t at,Location location)594   void SetTempAt(uint32_t at, Location location) {
595     DCHECK(temps_[at].IsUnallocated() || temps_[at].IsInvalid());
596     temps_[at] = location;
597   }
598 
GetTempCount()599   size_t GetTempCount() const {
600     return temps_.size();
601   }
602 
HasTemps()603   bool HasTemps() const { return !temps_.empty(); }
604 
Out()605   Location Out() const { return output_; }
606 
CanCall()607   bool CanCall() const {
608     return call_kind_ != kNoCall;
609   }
610 
WillCall()611   bool WillCall() const {
612     return call_kind_ == kCallOnMainOnly || call_kind_ == kCallOnMainAndSlowPath;
613   }
614 
CallsOnSlowPath()615   bool CallsOnSlowPath() const {
616     return OnlyCallsOnSlowPath() || CallsOnMainAndSlowPath();
617   }
618 
OnlyCallsOnSlowPath()619   bool OnlyCallsOnSlowPath() const {
620     return call_kind_ == kCallOnSlowPath;
621   }
622 
NeedsSuspendCheckEntry()623   bool NeedsSuspendCheckEntry() const {
624     // Slow path calls do not need a SuspendCheck at method entry since they go into the runtime,
625     // which we expect to either do a suspend check or return quickly.
626     return WillCall();
627   }
628 
CallsOnMainAndSlowPath()629   bool CallsOnMainAndSlowPath() const {
630     return call_kind_ == kCallOnMainAndSlowPath;
631   }
632 
NeedsSafepoint()633   bool NeedsSafepoint() const {
634     return CanCall();
635   }
636 
SetCustomSlowPathCallerSaves(const RegisterSet & caller_saves)637   void SetCustomSlowPathCallerSaves(const RegisterSet& caller_saves) {
638     DCHECK(OnlyCallsOnSlowPath());
639     has_custom_slow_path_calling_convention_ = true;
640     custom_slow_path_caller_saves_ = caller_saves;
641   }
642 
HasCustomSlowPathCallingConvention()643   bool HasCustomSlowPathCallingConvention() const {
644     return has_custom_slow_path_calling_convention_;
645   }
646 
GetCustomSlowPathCallerSaves()647   const RegisterSet& GetCustomSlowPathCallerSaves() const {
648     DCHECK(HasCustomSlowPathCallingConvention());
649     return custom_slow_path_caller_saves_;
650   }
651 
SetStackBit(uint32_t index)652   void SetStackBit(uint32_t index) {
653     stack_mask_->SetBit(index);
654   }
655 
ClearStackBit(uint32_t index)656   void ClearStackBit(uint32_t index) {
657     stack_mask_->ClearBit(index);
658   }
659 
SetRegisterBit(uint32_t reg_id)660   void SetRegisterBit(uint32_t reg_id) {
661     register_mask_ |= (1 << reg_id);
662   }
663 
GetRegisterMask()664   uint32_t GetRegisterMask() const {
665     return register_mask_;
666   }
667 
RegisterContainsObject(uint32_t reg_id)668   bool RegisterContainsObject(uint32_t reg_id) {
669     return RegisterSet::Contains(register_mask_, reg_id);
670   }
671 
AddLiveRegister(Location location)672   void AddLiveRegister(Location location) {
673     live_registers_.Add(location);
674   }
675 
GetStackMask()676   BitVector* GetStackMask() const {
677     return stack_mask_;
678   }
679 
GetLiveRegisters()680   RegisterSet* GetLiveRegisters() {
681     return &live_registers_;
682   }
683 
GetNumberOfLiveRegisters()684   size_t GetNumberOfLiveRegisters() const {
685     return live_registers_.GetNumberOfRegisters();
686   }
687 
OutputUsesSameAs(uint32_t input_index)688   bool OutputUsesSameAs(uint32_t input_index) const {
689     return (input_index == 0)
690         && output_.IsUnallocated()
691         && (output_.GetPolicy() == Location::kSameAsFirstInput);
692   }
693 
IsFixedInput(uint32_t input_index)694   bool IsFixedInput(uint32_t input_index) const {
695     Location input = inputs_[input_index];
696     return input.IsRegister()
697         || input.IsFpuRegister()
698         || input.IsPair()
699         || input.IsStackSlot()
700         || input.IsDoubleStackSlot();
701   }
702 
OutputCanOverlapWithInputs()703   bool OutputCanOverlapWithInputs() const {
704     return output_overlaps_ == Location::kOutputOverlap;
705   }
706 
Intrinsified()707   bool Intrinsified() const {
708     return intrinsified_;
709   }
710 
711  private:
712   LocationSummary(HInstruction* instruction,
713                   CallKind call_kind,
714                   bool intrinsified,
715                   ArenaAllocator* allocator);
716 
717   ArrayRef<Location> inputs_;
718   ArenaVector<Location> temps_;
719   Location output_;
720 
721   // Mask of objects that live in the stack.
722   BitVector* stack_mask_;
723 
724   const CallKind call_kind_;
725   // Whether these are locations for an intrinsified call.
726   const bool intrinsified_;
727   // Whether the slow path has default or custom calling convention.
728   bool has_custom_slow_path_calling_convention_;
729   // Whether the output overlaps with any of the inputs. If it overlaps, then it cannot
730   // share the same register as the inputs.
731   Location::OutputOverlap output_overlaps_;
732 
733   // Mask of objects that live in register.
734   uint32_t register_mask_;
735 
736   // Registers that are in use at this position.
737   RegisterSet live_registers_;
738 
739   // Custom slow path caller saves. Valid only if indicated by
740   // `has_custom_slow_path_calling_convention_`.
741   RegisterSet custom_slow_path_caller_saves_;
742 
743   ART_FRIEND_TEST(RegisterAllocatorTest, ExpectedInRegisterHint);
744   ART_FRIEND_TEST(RegisterAllocatorTest, SameAsFirstInputHint);
745   DISALLOW_COPY_AND_ASSIGN(LocationSummary);
746 };
747 
748 }  // namespace art
749 
750 #endif  // ART_COMPILER_OPTIMIZING_LOCATIONS_H_
751