• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_VMAP_TABLE_H_
18 #define ART_RUNTIME_VMAP_TABLE_H_
19 
20 #include "base/logging.h"
21 #include "leb128.h"
22 #include "stack.h"
23 
24 namespace art {
25 
26 class VmapTable {
27  public:
28   // For efficient encoding of special values, entries are adjusted by 2.
29   static constexpr uint16_t kEntryAdjustment = 2u;
30   static constexpr uint16_t kAdjustedFpMarker = static_cast<uint16_t>(0xffffu + kEntryAdjustment);
31 
VmapTable(const uint8_t * table)32   explicit VmapTable(const uint8_t* table) : table_(table) {
33   }
34 
35   // Look up nth entry, not called from performance critical code.
36   uint16_t operator[](size_t n) const {
37     const uint8_t* table = table_;
38     size_t size = DecodeUnsignedLeb128(&table);
39     CHECK_LT(n, size);
40     uint16_t adjusted_entry = DecodeUnsignedLeb128(&table);
41     for (size_t i = 0; i < n; ++i) {
42       adjusted_entry = DecodeUnsignedLeb128(&table);
43     }
44     return adjusted_entry - kEntryAdjustment;
45   }
46 
Size()47   size_t Size() const {
48     const uint8_t* table = table_;
49     return DecodeUnsignedLeb128(&table);
50   }
51 
52   // Is the dex register 'vreg' in the context or on the stack? Should not be called when the
53   // 'kind' is unknown or constant.
IsInContext(size_t vreg,VRegKind kind,uint32_t * vmap_offset)54   bool IsInContext(size_t vreg, VRegKind kind, uint32_t* vmap_offset) const {
55     DCHECK(kind == kReferenceVReg || kind == kIntVReg || kind == kFloatVReg ||
56            kind == kLongLoVReg || kind == kLongHiVReg || kind == kDoubleLoVReg ||
57            kind == kDoubleHiVReg || kind == kImpreciseConstant);
58     *vmap_offset = 0xEBAD0FF5;
59     // TODO: take advantage of the registers being ordered
60     // TODO: we treat kImpreciseConstant as an integer below, need to ensure that such values
61     //       are never promoted to floating point registers.
62     bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
63     bool in_floats = false;
64     const uint8_t* table = table_;
65     uint16_t adjusted_vreg = vreg + kEntryAdjustment;
66     size_t end = DecodeUnsignedLeb128(&table);
67     bool high_reg = (kind == kLongHiVReg) || (kind == kDoubleHiVReg);
68     bool target64 = (kRuntimeISA == kArm64) || (kRuntimeISA == kX86_64) || (kRuntimeISA == kMips64);
69     if (target64 && high_reg) {
70       // Wide promoted registers are associated with the sreg of the low portion.
71       adjusted_vreg--;
72     }
73     for (size_t i = 0; i < end; ++i) {
74       // Stop if we find what we are are looking for.
75       uint16_t adjusted_entry = DecodeUnsignedLeb128(&table);
76       if ((adjusted_entry == adjusted_vreg) && (in_floats == is_float)) {
77         *vmap_offset = i;
78         return true;
79       }
80       // 0xffff is the marker for LR (return PC on x86), following it are spilled float registers.
81       if (adjusted_entry == kAdjustedFpMarker) {
82         in_floats = true;
83       }
84     }
85     return false;
86   }
87 
88   // Compute the register number that corresponds to the entry in the vmap (vmap_offset, computed
89   // by IsInContext above). If the kind is floating point then the result will be a floating point
90   // register number, otherwise it will be an integer register number.
ComputeRegister(uint32_t spill_mask,uint32_t vmap_offset,VRegKind kind)91   uint32_t ComputeRegister(uint32_t spill_mask, uint32_t vmap_offset, VRegKind kind) const {
92     // Compute the register we need to load from the context.
93     DCHECK(kind == kReferenceVReg || kind == kIntVReg || kind == kFloatVReg ||
94            kind == kLongLoVReg || kind == kLongHiVReg || kind == kDoubleLoVReg ||
95            kind == kDoubleHiVReg || kind == kImpreciseConstant);
96     // TODO: we treat kImpreciseConstant as an integer below, need to ensure that such values
97     //       are never promoted to floating point registers.
98     bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
99     uint32_t matches = 0;
100     if (UNLIKELY(is_float)) {
101       const uint8_t* table = table_;
102       DecodeUnsignedLeb128(&table);  // Skip size.
103       while (DecodeUnsignedLeb128(&table) != kAdjustedFpMarker) {
104         matches++;
105       }
106       matches++;
107     }
108     CHECK_LT(vmap_offset - matches, static_cast<uint32_t>(POPCOUNT(spill_mask)));
109     uint32_t spill_shifts = 0;
110     while (matches != (vmap_offset + 1)) {
111       DCHECK_NE(spill_mask, 0u);
112       matches += spill_mask & 1;  // Add 1 if the low bit is set
113       spill_mask >>= 1;
114       spill_shifts++;
115     }
116     spill_shifts--;  // wind back one as we want the last match
117     return spill_shifts;
118   }
119 
120  private:
121   const uint8_t* const table_;
122 };
123 
124 }  // namespace art
125 
126 #endif  // ART_RUNTIME_VMAP_TABLE_H_
127