• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_OPTIMIZING_LOCATIONS_H_
18 #define ART_COMPILER_OPTIMIZING_LOCATIONS_H_
19 
20 #include "base/arena_containers.h"
21 #include "base/arena_object.h"
22 #include "base/bit_field.h"
23 #include "base/bit_utils.h"
24 #include "base/bit_vector.h"
25 #include "base/value_object.h"
26 
27 namespace art {
28 
29 class HConstant;
30 class HInstruction;
31 class Location;
32 
33 std::ostream& operator<<(std::ostream& os, const Location& location);
34 
35 /**
36  * A Location is an abstraction over the potential location
37  * of an instruction. It could be in register or stack.
38  */
39 class Location : public ValueObject {
40  public:
41   enum OutputOverlap {
42     // The liveness of the output overlaps the liveness of one or
43     // several input(s); the register allocator cannot reuse an
44     // input's location for the output's location.
45     kOutputOverlap,
46     // The liveness of the output does not overlap the liveness of any
47     // input; the register allocator is allowed to reuse an input's
48     // location for the output's location.
49     kNoOutputOverlap
50   };
51 
52   enum Kind {
53     kInvalid = 0,
54     kConstant = 1,
55     kStackSlot = 2,  // 32bit stack slot.
56     kDoubleStackSlot = 3,  // 64bit stack slot.
57 
58     kRegister = 4,  // Core register.
59 
60     // We do not use the value 5 because it conflicts with kLocationConstantMask.
61     kDoNotUse5 = 5,
62 
63     kFpuRegister = 6,  // Float register.
64 
65     kRegisterPair = 7,  // Long register.
66 
67     kFpuRegisterPair = 8,  // Double register.
68 
69     // We do not use the value 9 because it conflicts with kLocationConstantMask.
70     kDoNotUse9 = 9,
71 
72     kSIMDStackSlot = 10,  // 128bit stack slot. TODO: generalize with encoded #bytes?
73 
74     // Unallocated location represents a location that is not fixed and can be
75     // allocated by a register allocator.  Each unallocated location has
76     // a policy that specifies what kind of location is suitable. Payload
77     // contains register allocation policy.
78     kUnallocated = 11,
79   };
80 
Location()81   Location() : ValueObject(), value_(kInvalid) {
82     // Verify that non-constant location kinds do not interfere with kConstant.
83     static_assert((kInvalid & kLocationConstantMask) != kConstant, "TagError");
84     static_assert((kUnallocated & kLocationConstantMask) != kConstant, "TagError");
85     static_assert((kStackSlot & kLocationConstantMask) != kConstant, "TagError");
86     static_assert((kDoubleStackSlot & kLocationConstantMask) != kConstant, "TagError");
87     static_assert((kSIMDStackSlot & kLocationConstantMask) != kConstant, "TagError");
88     static_assert((kRegister & kLocationConstantMask) != kConstant, "TagError");
89     static_assert((kFpuRegister & kLocationConstantMask) != kConstant, "TagError");
90     static_assert((kRegisterPair & kLocationConstantMask) != kConstant, "TagError");
91     static_assert((kFpuRegisterPair & kLocationConstantMask) != kConstant, "TagError");
92     static_assert((kConstant & kLocationConstantMask) == kConstant, "TagError");
93 
94     DCHECK(!IsValid());
95   }
96 
97   Location(const Location& other) = default;
98 
99   Location& operator=(const Location& other) = default;
100 
IsConstant()101   bool IsConstant() const {
102     return (value_ & kLocationConstantMask) == kConstant;
103   }
104 
ConstantLocation(HConstant * constant)105   static Location ConstantLocation(HConstant* constant) {
106     DCHECK(constant != nullptr);
107     return Location(kConstant | reinterpret_cast<uintptr_t>(constant));
108   }
109 
GetConstant()110   HConstant* GetConstant() const {
111     DCHECK(IsConstant());
112     return reinterpret_cast<HConstant*>(value_ & ~kLocationConstantMask);
113   }
114 
IsValid()115   bool IsValid() const {
116     return value_ != kInvalid;
117   }
118 
IsInvalid()119   bool IsInvalid() const {
120     return !IsValid();
121   }
122 
123   // Empty location. Used if there the location should be ignored.
NoLocation()124   static Location NoLocation() {
125     return Location();
126   }
127 
128   // Register locations.
RegisterLocation(int reg)129   static Location RegisterLocation(int reg) {
130     return Location(kRegister, reg);
131   }
132 
FpuRegisterLocation(int reg)133   static Location FpuRegisterLocation(int reg) {
134     return Location(kFpuRegister, reg);
135   }
136 
RegisterPairLocation(int low,int high)137   static Location RegisterPairLocation(int low, int high) {
138     return Location(kRegisterPair, low << 16 | high);
139   }
140 
FpuRegisterPairLocation(int low,int high)141   static Location FpuRegisterPairLocation(int low, int high) {
142     return Location(kFpuRegisterPair, low << 16 | high);
143   }
144 
IsRegister()145   bool IsRegister() const {
146     return GetKind() == kRegister;
147   }
148 
IsFpuRegister()149   bool IsFpuRegister() const {
150     return GetKind() == kFpuRegister;
151   }
152 
IsRegisterPair()153   bool IsRegisterPair() const {
154     return GetKind() == kRegisterPair;
155   }
156 
IsFpuRegisterPair()157   bool IsFpuRegisterPair() const {
158     return GetKind() == kFpuRegisterPair;
159   }
160 
IsRegisterKind()161   bool IsRegisterKind() const {
162     return IsRegister() || IsFpuRegister() || IsRegisterPair() || IsFpuRegisterPair();
163   }
164 
reg()165   int reg() const {
166     DCHECK(IsRegister() || IsFpuRegister());
167     return GetPayload();
168   }
169 
low()170   int low() const {
171     DCHECK(IsPair());
172     return GetPayload() >> 16;
173   }
174 
high()175   int high() const {
176     DCHECK(IsPair());
177     return GetPayload() & 0xFFFF;
178   }
179 
180   template <typename T>
AsRegister()181   T AsRegister() const {
182     DCHECK(IsRegister());
183     return static_cast<T>(reg());
184   }
185 
186   template <typename T>
AsFpuRegister()187   T AsFpuRegister() const {
188     DCHECK(IsFpuRegister());
189     return static_cast<T>(reg());
190   }
191 
192   template <typename T>
AsRegisterPairLow()193   T AsRegisterPairLow() const {
194     DCHECK(IsRegisterPair());
195     return static_cast<T>(low());
196   }
197 
198   template <typename T>
AsRegisterPairHigh()199   T AsRegisterPairHigh() const {
200     DCHECK(IsRegisterPair());
201     return static_cast<T>(high());
202   }
203 
204   template <typename T>
AsFpuRegisterPairLow()205   T AsFpuRegisterPairLow() const {
206     DCHECK(IsFpuRegisterPair());
207     return static_cast<T>(low());
208   }
209 
210   template <typename T>
AsFpuRegisterPairHigh()211   T AsFpuRegisterPairHigh() const {
212     DCHECK(IsFpuRegisterPair());
213     return static_cast<T>(high());
214   }
215 
IsPair()216   bool IsPair() const {
217     return IsRegisterPair() || IsFpuRegisterPair();
218   }
219 
ToLow()220   Location ToLow() const {
221     if (IsRegisterPair()) {
222       return Location::RegisterLocation(low());
223     } else if (IsFpuRegisterPair()) {
224       return Location::FpuRegisterLocation(low());
225     } else {
226       DCHECK(IsDoubleStackSlot());
227       return Location::StackSlot(GetStackIndex());
228     }
229   }
230 
ToHigh()231   Location ToHigh() const {
232     if (IsRegisterPair()) {
233       return Location::RegisterLocation(high());
234     } else if (IsFpuRegisterPair()) {
235       return Location::FpuRegisterLocation(high());
236     } else {
237       DCHECK(IsDoubleStackSlot());
238       return Location::StackSlot(GetHighStackIndex(4));
239     }
240   }
241 
EncodeStackIndex(intptr_t stack_index)242   static uintptr_t EncodeStackIndex(intptr_t stack_index) {
243     DCHECK(-kStackIndexBias <= stack_index);
244     DCHECK(stack_index < kStackIndexBias);
245     return static_cast<uintptr_t>(kStackIndexBias + stack_index);
246   }
247 
StackSlot(intptr_t stack_index)248   static Location StackSlot(intptr_t stack_index) {
249     uintptr_t payload = EncodeStackIndex(stack_index);
250     Location loc(kStackSlot, payload);
251     // Ensure that sign is preserved.
252     DCHECK_EQ(loc.GetStackIndex(), stack_index);
253     return loc;
254   }
255 
IsStackSlot()256   bool IsStackSlot() const {
257     return GetKind() == kStackSlot;
258   }
259 
DoubleStackSlot(intptr_t stack_index)260   static Location DoubleStackSlot(intptr_t stack_index) {
261     uintptr_t payload = EncodeStackIndex(stack_index);
262     Location loc(kDoubleStackSlot, payload);
263     // Ensure that sign is preserved.
264     DCHECK_EQ(loc.GetStackIndex(), stack_index);
265     return loc;
266   }
267 
IsDoubleStackSlot()268   bool IsDoubleStackSlot() const {
269     return GetKind() == kDoubleStackSlot;
270   }
271 
SIMDStackSlot(intptr_t stack_index)272   static Location SIMDStackSlot(intptr_t stack_index) {
273     uintptr_t payload = EncodeStackIndex(stack_index);
274     Location loc(kSIMDStackSlot, payload);
275     // Ensure that sign is preserved.
276     DCHECK_EQ(loc.GetStackIndex(), stack_index);
277     return loc;
278   }
279 
IsSIMDStackSlot()280   bool IsSIMDStackSlot() const {
281     return GetKind() == kSIMDStackSlot;
282   }
283 
StackSlotByNumOfSlots(size_t num_of_slots,int spill_slot)284   static Location StackSlotByNumOfSlots(size_t num_of_slots, int spill_slot) {
285     DCHECK_NE(num_of_slots, 0u);
286     switch (num_of_slots) {
287       case 1u:
288         return Location::StackSlot(spill_slot);
289       case 2u:
290         return Location::DoubleStackSlot(spill_slot);
291       default:
292         // Assume all other stack slot sizes correspond to SIMD slot size.
293         return Location::SIMDStackSlot(spill_slot);
294     }
295   }
296 
GetStackIndex()297   intptr_t GetStackIndex() const {
298     DCHECK(IsStackSlot() || IsDoubleStackSlot() || IsSIMDStackSlot());
299     // Decode stack index manually to preserve sign.
300     return GetPayload() - kStackIndexBias;
301   }
302 
GetHighStackIndex(uintptr_t word_size)303   intptr_t GetHighStackIndex(uintptr_t word_size) const {
304     DCHECK(IsDoubleStackSlot());
305     // Decode stack index manually to preserve sign.
306     return GetPayload() - kStackIndexBias + word_size;
307   }
308 
GetKind()309   Kind GetKind() const {
310     return IsConstant() ? kConstant : KindField::Decode(value_);
311   }
312 
Equals(Location other)313   bool Equals(Location other) const {
314     return value_ == other.value_;
315   }
316 
Contains(Location other)317   bool Contains(Location other) const {
318     if (Equals(other)) {
319       return true;
320     } else if (IsPair() || IsDoubleStackSlot()) {
321       return ToLow().Equals(other) || ToHigh().Equals(other);
322     }
323     return false;
324   }
325 
OverlapsWith(Location other)326   bool OverlapsWith(Location other) const {
327     // Only check the overlapping case that can happen with our register allocation algorithm.
328     bool overlap = Contains(other) || other.Contains(*this);
329     if (kIsDebugBuild && !overlap) {
330       // Note: These are also overlapping cases. But we are not able to handle them in
331       // ParallelMoveResolverWithSwap. Make sure that we do not meet such case with our compiler.
332       if ((IsPair() && other.IsPair()) || (IsDoubleStackSlot() && other.IsDoubleStackSlot())) {
333         DCHECK(!Contains(other.ToLow()));
334         DCHECK(!Contains(other.ToHigh()));
335       }
336     }
337     return overlap;
338   }
339 
DebugString()340   const char* DebugString() const {
341     switch (GetKind()) {
342       case kInvalid: return "I";
343       case kRegister: return "R";
344       case kStackSlot: return "S";
345       case kDoubleStackSlot: return "DS";
346       case kSIMDStackSlot: return "SIMD";
347       case kUnallocated: return "U";
348       case kConstant: return "C";
349       case kFpuRegister: return "F";
350       case kRegisterPair: return "RP";
351       case kFpuRegisterPair: return "FP";
352       case kDoNotUse5:  // fall-through
353       case kDoNotUse9:
354         LOG(FATAL) << "Should not use this location kind";
355     }
356     UNREACHABLE();
357   }
358 
359   // Unallocated locations.
360   enum Policy {
361     kAny,
362     kRequiresRegister,
363     kRequiresFpuRegister,
364     kSameAsFirstInput,
365   };
366 
IsUnallocated()367   bool IsUnallocated() const {
368     return GetKind() == kUnallocated;
369   }
370 
UnallocatedLocation(Policy policy)371   static Location UnallocatedLocation(Policy policy) {
372     return Location(kUnallocated, PolicyField::Encode(policy));
373   }
374 
375   // Any free register is suitable to replace this unallocated location.
Any()376   static Location Any() {
377     return UnallocatedLocation(kAny);
378   }
379 
RequiresRegister()380   static Location RequiresRegister() {
381     return UnallocatedLocation(kRequiresRegister);
382   }
383 
RequiresFpuRegister()384   static Location RequiresFpuRegister() {
385     return UnallocatedLocation(kRequiresFpuRegister);
386   }
387 
388   static Location RegisterOrConstant(HInstruction* instruction);
389   static Location RegisterOrInt32Constant(HInstruction* instruction);
390   static Location ByteRegisterOrConstant(int reg, HInstruction* instruction);
391   static Location FpuRegisterOrConstant(HInstruction* instruction);
392   static Location FpuRegisterOrInt32Constant(HInstruction* instruction);
393 
394   // The location of the first input to the instruction will be
395   // used to replace this unallocated location.
SameAsFirstInput()396   static Location SameAsFirstInput() {
397     return UnallocatedLocation(kSameAsFirstInput);
398   }
399 
GetPolicy()400   Policy GetPolicy() const {
401     DCHECK(IsUnallocated());
402     return PolicyField::Decode(GetPayload());
403   }
404 
RequiresRegisterKind()405   bool RequiresRegisterKind() const {
406     return GetPolicy() == kRequiresRegister || GetPolicy() == kRequiresFpuRegister;
407   }
408 
GetEncoding()409   uintptr_t GetEncoding() const {
410     return GetPayload();
411   }
412 
413  private:
414   // Number of bits required to encode Kind value.
415   static constexpr uint32_t kBitsForKind = 4;
416   static constexpr uint32_t kBitsForPayload = kBitsPerIntPtrT - kBitsForKind;
417   static constexpr uintptr_t kLocationConstantMask = 0x3;
418 
Location(uintptr_t value)419   explicit Location(uintptr_t value) : value_(value) {}
420 
Location(Kind kind,uintptr_t payload)421   Location(Kind kind, uintptr_t payload)
422       : value_(KindField::Encode(kind) | PayloadField::Encode(payload)) {}
423 
GetPayload()424   uintptr_t GetPayload() const {
425     return PayloadField::Decode(value_);
426   }
427 
428   typedef BitField<Kind, 0, kBitsForKind> KindField;
429   typedef BitField<uintptr_t, kBitsForKind, kBitsForPayload> PayloadField;
430 
431   // Layout for kUnallocated locations payload.
432   typedef BitField<Policy, 0, 3> PolicyField;
433 
434   // Layout for stack slots.
435   static const intptr_t kStackIndexBias =
436       static_cast<intptr_t>(1) << (kBitsForPayload - 1);
437 
438   // Location either contains kind and payload fields or a tagged handle for
439   // a constant locations. Values of enumeration Kind are selected in such a
440   // way that none of them can be interpreted as a kConstant tag.
441   uintptr_t value_;
442 };
443 std::ostream& operator<<(std::ostream& os, Location::Kind rhs);
444 std::ostream& operator<<(std::ostream& os, Location::Policy rhs);
445 
446 class RegisterSet : public ValueObject {
447  public:
Empty()448   static RegisterSet Empty() { return RegisterSet(); }
AllFpu()449   static RegisterSet AllFpu() { return RegisterSet(0, -1); }
450 
Add(Location loc)451   void Add(Location loc) {
452     if (loc.IsRegister()) {
453       core_registers_ |= (1 << loc.reg());
454     } else {
455       DCHECK(loc.IsFpuRegister());
456       floating_point_registers_ |= (1 << loc.reg());
457     }
458   }
459 
Remove(Location loc)460   void Remove(Location loc) {
461     if (loc.IsRegister()) {
462       core_registers_ &= ~(1 << loc.reg());
463     } else {
464       DCHECK(loc.IsFpuRegister()) << loc;
465       floating_point_registers_ &= ~(1 << loc.reg());
466     }
467   }
468 
ContainsCoreRegister(uint32_t id)469   bool ContainsCoreRegister(uint32_t id) const {
470     return Contains(core_registers_, id);
471   }
472 
ContainsFloatingPointRegister(uint32_t id)473   bool ContainsFloatingPointRegister(uint32_t id) const {
474     return Contains(floating_point_registers_, id);
475   }
476 
Contains(uint32_t register_set,uint32_t reg)477   static bool Contains(uint32_t register_set, uint32_t reg) {
478     return (register_set & (1 << reg)) != 0;
479   }
480 
OverlapsRegisters(Location out)481   bool OverlapsRegisters(Location out) {
482     DCHECK(out.IsRegisterKind());
483     switch (out.GetKind()) {
484       case Location::Kind::kRegister:
485         return ContainsCoreRegister(out.reg());
486       case Location::Kind::kFpuRegister:
487         return ContainsFloatingPointRegister(out.reg());
488       case Location::Kind::kRegisterPair:
489         return ContainsCoreRegister(out.low()) || ContainsCoreRegister(out.high());
490       case Location::Kind::kFpuRegisterPair:
491         return ContainsFloatingPointRegister(out.low()) ||
492                ContainsFloatingPointRegister(out.high());
493       default:
494         return false;
495     }
496   }
497 
GetNumberOfRegisters()498   size_t GetNumberOfRegisters() const {
499     return POPCOUNT(core_registers_) + POPCOUNT(floating_point_registers_);
500   }
501 
GetCoreRegisters()502   uint32_t GetCoreRegisters() const {
503     return core_registers_;
504   }
505 
GetFloatingPointRegisters()506   uint32_t GetFloatingPointRegisters() const {
507     return floating_point_registers_;
508   }
509 
510  private:
RegisterSet()511   RegisterSet() : core_registers_(0), floating_point_registers_(0) {}
RegisterSet(uint32_t core,uint32_t fp)512   RegisterSet(uint32_t core, uint32_t fp) : core_registers_(core), floating_point_registers_(fp) {}
513 
514   uint32_t core_registers_;
515   uint32_t floating_point_registers_;
516 };
517 
518 static constexpr bool kIntrinsified = true;
519 
520 /**
521  * The code generator computes LocationSummary for each instruction so that
522  * the instruction itself knows what code to generate: where to find the inputs
523  * and where to place the result.
524  *
525  * The intent is to have the code for generating the instruction independent of
526  * register allocation. A register allocator just has to provide a LocationSummary.
527  */
528 class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> {
529  public:
530   enum CallKind {
531     kNoCall,
532     kCallOnMainAndSlowPath,
533     kCallOnSlowPath,
534     kCallOnMainOnly
535   };
536 
537   explicit LocationSummary(HInstruction* instruction,
538                            CallKind call_kind = kNoCall,
539                            bool intrinsified = false);
540 
SetInAt(uint32_t at,Location location)541   void SetInAt(uint32_t at, Location location) {
542     inputs_[at] = location;
543   }
544 
InAt(uint32_t at)545   Location InAt(uint32_t at) const {
546     return inputs_[at];
547   }
548 
GetInputCount()549   size_t GetInputCount() const {
550     return inputs_.size();
551   }
552 
553   // Set the output location.  Argument `overlaps` tells whether the
554   // output overlaps any of the inputs (if so, it cannot share the
555   // same register as one of the inputs); it is set to
556   // `Location::kOutputOverlap` by default for safety.
557   void SetOut(Location location, Location::OutputOverlap overlaps = Location::kOutputOverlap) {
558     DCHECK(output_.IsInvalid());
559     output_overlaps_ = overlaps;
560     output_ = location;
561   }
562 
UpdateOut(Location location)563   void UpdateOut(Location location) {
564     // There are two reasons for updating an output:
565     // 1) Parameters, where we only know the exact stack slot after
566     //    doing full register allocation.
567     // 2) Unallocated location.
568     DCHECK(output_.IsStackSlot() || output_.IsDoubleStackSlot() || output_.IsUnallocated());
569     output_ = location;
570   }
571 
AddTemp(Location location)572   void AddTemp(Location location) {
573     temps_.push_back(location);
574   }
575 
AddRegisterTemps(size_t count)576   void AddRegisterTemps(size_t count) {
577     for (size_t i = 0; i < count; ++i) {
578       AddTemp(Location::RequiresRegister());
579     }
580   }
581 
GetTemp(uint32_t at)582   Location GetTemp(uint32_t at) const {
583     return temps_[at];
584   }
585 
SetTempAt(uint32_t at,Location location)586   void SetTempAt(uint32_t at, Location location) {
587     DCHECK(temps_[at].IsUnallocated() || temps_[at].IsInvalid());
588     temps_[at] = location;
589   }
590 
GetTempCount()591   size_t GetTempCount() const {
592     return temps_.size();
593   }
594 
HasTemps()595   bool HasTemps() const { return !temps_.empty(); }
596 
Out()597   Location Out() const { return output_; }
598 
CanCall()599   bool CanCall() const {
600     return call_kind_ != kNoCall;
601   }
602 
WillCall()603   bool WillCall() const {
604     return call_kind_ == kCallOnMainOnly || call_kind_ == kCallOnMainAndSlowPath;
605   }
606 
CallsOnSlowPath()607   bool CallsOnSlowPath() const {
608     return call_kind_ == kCallOnSlowPath || call_kind_ == kCallOnMainAndSlowPath;
609   }
610 
OnlyCallsOnSlowPath()611   bool OnlyCallsOnSlowPath() const {
612     return call_kind_ == kCallOnSlowPath;
613   }
614 
CallsOnMainAndSlowPath()615   bool CallsOnMainAndSlowPath() const {
616     return call_kind_ == kCallOnMainAndSlowPath;
617   }
618 
NeedsSafepoint()619   bool NeedsSafepoint() const {
620     return CanCall();
621   }
622 
SetCustomSlowPathCallerSaves(const RegisterSet & caller_saves)623   void SetCustomSlowPathCallerSaves(const RegisterSet& caller_saves) {
624     DCHECK(OnlyCallsOnSlowPath());
625     has_custom_slow_path_calling_convention_ = true;
626     custom_slow_path_caller_saves_ = caller_saves;
627   }
628 
HasCustomSlowPathCallingConvention()629   bool HasCustomSlowPathCallingConvention() const {
630     return has_custom_slow_path_calling_convention_;
631   }
632 
GetCustomSlowPathCallerSaves()633   const RegisterSet& GetCustomSlowPathCallerSaves() const {
634     DCHECK(HasCustomSlowPathCallingConvention());
635     return custom_slow_path_caller_saves_;
636   }
637 
SetStackBit(uint32_t index)638   void SetStackBit(uint32_t index) {
639     stack_mask_->SetBit(index);
640   }
641 
ClearStackBit(uint32_t index)642   void ClearStackBit(uint32_t index) {
643     stack_mask_->ClearBit(index);
644   }
645 
SetRegisterBit(uint32_t reg_id)646   void SetRegisterBit(uint32_t reg_id) {
647     register_mask_ |= (1 << reg_id);
648   }
649 
GetRegisterMask()650   uint32_t GetRegisterMask() const {
651     return register_mask_;
652   }
653 
RegisterContainsObject(uint32_t reg_id)654   bool RegisterContainsObject(uint32_t reg_id) {
655     return RegisterSet::Contains(register_mask_, reg_id);
656   }
657 
AddLiveRegister(Location location)658   void AddLiveRegister(Location location) {
659     live_registers_.Add(location);
660   }
661 
GetStackMask()662   BitVector* GetStackMask() const {
663     return stack_mask_;
664   }
665 
GetLiveRegisters()666   RegisterSet* GetLiveRegisters() {
667     return &live_registers_;
668   }
669 
GetNumberOfLiveRegisters()670   size_t GetNumberOfLiveRegisters() const {
671     return live_registers_.GetNumberOfRegisters();
672   }
673 
OutputUsesSameAs(uint32_t input_index)674   bool OutputUsesSameAs(uint32_t input_index) const {
675     return (input_index == 0)
676         && output_.IsUnallocated()
677         && (output_.GetPolicy() == Location::kSameAsFirstInput);
678   }
679 
IsFixedInput(uint32_t input_index)680   bool IsFixedInput(uint32_t input_index) const {
681     Location input = inputs_[input_index];
682     return input.IsRegister()
683         || input.IsFpuRegister()
684         || input.IsPair()
685         || input.IsStackSlot()
686         || input.IsDoubleStackSlot();
687   }
688 
OutputCanOverlapWithInputs()689   bool OutputCanOverlapWithInputs() const {
690     return output_overlaps_ == Location::kOutputOverlap;
691   }
692 
Intrinsified()693   bool Intrinsified() const {
694     return intrinsified_;
695   }
696 
697  private:
698   LocationSummary(HInstruction* instruction,
699                   CallKind call_kind,
700                   bool intrinsified,
701                   ArenaAllocator* allocator);
702 
703   ArenaVector<Location> inputs_;
704   ArenaVector<Location> temps_;
705   const CallKind call_kind_;
706   // Whether these are locations for an intrinsified call.
707   const bool intrinsified_;
708   // Whether the slow path has default or custom calling convention.
709   bool has_custom_slow_path_calling_convention_;
710   // Whether the output overlaps with any of the inputs. If it overlaps, then it cannot
711   // share the same register as the inputs.
712   Location::OutputOverlap output_overlaps_;
713   Location output_;
714 
715   // Mask of objects that live in the stack.
716   BitVector* stack_mask_;
717 
718   // Mask of objects that live in register.
719   uint32_t register_mask_;
720 
721   // Registers that are in use at this position.
722   RegisterSet live_registers_;
723 
724   // Custom slow path caller saves. Valid only if indicated by slow_path_calling_convention_.
725   RegisterSet custom_slow_path_caller_saves_;
726 
727   friend class RegisterAllocatorTest;
728   DISALLOW_COPY_AND_ASSIGN(LocationSummary);
729 };
730 
731 }  // namespace art
732 
733 #endif  // ART_COMPILER_OPTIMIZING_LOCATIONS_H_
734