• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_CODEGEN_CONSTANT_POOL_H_
6 #define V8_CODEGEN_CONSTANT_POOL_H_
7 
8 #include <map>
9 
10 #include "src/codegen/label.h"
11 #include "src/codegen/reloc-info.h"
12 #include "src/common/globals.h"
13 #include "src/numbers/double.h"
14 
15 namespace v8 {
16 namespace internal {
17 
18 class Instruction;
19 
20 // -----------------------------------------------------------------------------
21 // Constant pool support
22 
23 class ConstantPoolEntry {
24  public:
25   ConstantPoolEntry() = default;
26   ConstantPoolEntry(int position, intptr_t value, bool sharing_ok,
27                     RelocInfo::Mode rmode = RelocInfo::NONE)
position_(position)28       : position_(position),
29         merged_index_(sharing_ok ? SHARING_ALLOWED : SHARING_PROHIBITED),
30         value_(value),
31         rmode_(rmode) {}
32   ConstantPoolEntry(int position, Double value,
33                     RelocInfo::Mode rmode = RelocInfo::NONE)
position_(position)34       : position_(position),
35         merged_index_(SHARING_ALLOWED),
36         value64_(value.AsUint64()),
37         rmode_(rmode) {}
38 
position()39   int position() const { return position_; }
sharing_ok()40   bool sharing_ok() const { return merged_index_ != SHARING_PROHIBITED; }
is_merged()41   bool is_merged() const { return merged_index_ >= 0; }
merged_index()42   int merged_index() const {
43     DCHECK(is_merged());
44     return merged_index_;
45   }
set_merged_index(int index)46   void set_merged_index(int index) {
47     DCHECK(sharing_ok());
48     merged_index_ = index;
49     DCHECK(is_merged());
50   }
offset()51   int offset() const {
52     DCHECK_GE(merged_index_, 0);
53     return merged_index_;
54   }
set_offset(int offset)55   void set_offset(int offset) {
56     DCHECK_GE(offset, 0);
57     merged_index_ = offset;
58   }
value()59   intptr_t value() const { return value_; }
value64()60   uint64_t value64() const { return value64_; }
rmode()61   RelocInfo::Mode rmode() const { return rmode_; }
62 
63   enum Type { INTPTR, DOUBLE, NUMBER_OF_TYPES };
64 
size(Type type)65   static int size(Type type) {
66     return (type == INTPTR) ? kSystemPointerSize : kDoubleSize;
67   }
68 
69   enum Access { REGULAR, OVERFLOWED };
70 
71  private:
72   int position_;
73   int merged_index_;
74   union {
75     intptr_t value_;
76     uint64_t value64_;
77   };
78   // TODO(leszeks): The way we use this, it could probably be packed into
79   // merged_index_ if size is a concern.
80   RelocInfo::Mode rmode_;
81   enum { SHARING_PROHIBITED = -2, SHARING_ALLOWED = -1 };
82 };
83 
84 #if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64)
85 
86 // -----------------------------------------------------------------------------
87 // Embedded constant pool support
88 
89 class ConstantPoolBuilder {
90  public:
91   ConstantPoolBuilder(int ptr_reach_bits, int double_reach_bits);
92 
93 #ifdef DEBUG
~ConstantPoolBuilder()94   ~ConstantPoolBuilder() {
95     // Unused labels to prevent DCHECK failures.
96     emitted_label_.Unuse();
97     emitted_label_.UnuseNear();
98   }
99 #endif
100 
101   // Add pointer-sized constant to the embedded constant pool
AddEntry(int position,intptr_t value,bool sharing_ok)102   ConstantPoolEntry::Access AddEntry(int position, intptr_t value,
103                                      bool sharing_ok) {
104     ConstantPoolEntry entry(position, value, sharing_ok);
105     return AddEntry(&entry, ConstantPoolEntry::INTPTR);
106   }
107 
108   // Add double constant to the embedded constant pool
AddEntry(int position,Double value)109   ConstantPoolEntry::Access AddEntry(int position, Double value) {
110     ConstantPoolEntry entry(position, value);
111     return AddEntry(&entry, ConstantPoolEntry::DOUBLE);
112   }
113 
114   // Add double constant to the embedded constant pool
AddEntry(int position,double value)115   ConstantPoolEntry::Access AddEntry(int position, double value) {
116     return AddEntry(position, Double(value));
117   }
118 
119   // Previews the access type required for the next new entry to be added.
120   ConstantPoolEntry::Access NextAccess(ConstantPoolEntry::Type type) const;
121 
IsEmpty()122   bool IsEmpty() {
123     return info_[ConstantPoolEntry::INTPTR].entries.empty() &&
124            info_[ConstantPoolEntry::INTPTR].shared_entries.empty() &&
125            info_[ConstantPoolEntry::DOUBLE].entries.empty() &&
126            info_[ConstantPoolEntry::DOUBLE].shared_entries.empty();
127   }
128 
129   // Emit the constant pool.  Invoke only after all entries have been
130   // added and all instructions have been emitted.
131   // Returns position of the emitted pool (zero implies no constant pool).
132   int Emit(Assembler* assm);
133 
134   // Returns the label associated with the start of the constant pool.
135   // Linking to this label in the function prologue may provide an
136   // efficient means of constant pool pointer register initialization
137   // on some architectures.
EmittedPosition()138   inline Label* EmittedPosition() { return &emitted_label_; }
139 
140  private:
141   ConstantPoolEntry::Access AddEntry(ConstantPoolEntry* entry,
142                                      ConstantPoolEntry::Type type);
143   void EmitSharedEntries(Assembler* assm, ConstantPoolEntry::Type type);
144   void EmitGroup(Assembler* assm, ConstantPoolEntry::Access access,
145                  ConstantPoolEntry::Type type);
146 
147   struct PerTypeEntryInfo {
PerTypeEntryInfoPerTypeEntryInfo148     PerTypeEntryInfo() : regular_count(0), overflow_start(-1) {}
overflowPerTypeEntryInfo149     bool overflow() const {
150       return (overflow_start >= 0 &&
151               overflow_start < static_cast<int>(entries.size()));
152     }
153     int regular_reach_bits;
154     int regular_count;
155     int overflow_start;
156     std::vector<ConstantPoolEntry> entries;
157     std::vector<ConstantPoolEntry> shared_entries;
158   };
159 
160   Label emitted_label_;  // Records pc_offset of emitted pool
161   PerTypeEntryInfo info_[ConstantPoolEntry::NUMBER_OF_TYPES];
162 };
163 
164 #endif  // defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64)
165 
166 #if defined(V8_TARGET_ARCH_ARM64)
167 
168 class ConstantPoolKey {
169  public:
170   explicit ConstantPoolKey(uint64_t value,
171                            RelocInfo::Mode rmode = RelocInfo::NONE)
is_value32_(false)172       : is_value32_(false), value64_(value), rmode_(rmode) {}
173 
174   explicit ConstantPoolKey(uint32_t value,
175                            RelocInfo::Mode rmode = RelocInfo::NONE)
is_value32_(true)176       : is_value32_(true), value32_(value), rmode_(rmode) {}
177 
value64()178   uint64_t value64() const {
179     CHECK(!is_value32_);
180     return value64_;
181   }
value32()182   uint32_t value32() const {
183     CHECK(is_value32_);
184     return value32_;
185   }
186 
is_value32()187   bool is_value32() const { return is_value32_; }
rmode()188   RelocInfo::Mode rmode() const { return rmode_; }
189 
AllowsDeduplication()190   bool AllowsDeduplication() const {
191     DCHECK(rmode_ != RelocInfo::CONST_POOL &&
192            rmode_ != RelocInfo::VENEER_POOL &&
193            rmode_ != RelocInfo::DEOPT_SCRIPT_OFFSET &&
194            rmode_ != RelocInfo::DEOPT_INLINING_ID &&
195            rmode_ != RelocInfo::DEOPT_REASON && rmode_ != RelocInfo::DEOPT_ID);
196     // CODE_TARGETs can be shared because they aren't patched anymore,
197     // and we make sure we emit only one reloc info for them (thus delta
198     // patching) will apply the delta only once. At the moment, we do not dedup
199     // code targets if they are wrapped in a heap object request (value == 0).
200     bool is_sharable_code_target =
201         rmode_ == RelocInfo::CODE_TARGET &&
202         (is_value32() ? (value32() != 0) : (value64() != 0));
203     bool is_sharable_embedded_object = RelocInfo::IsEmbeddedObjectMode(rmode_);
204     return RelocInfo::IsShareableRelocMode(rmode_) || is_sharable_code_target ||
205            is_sharable_embedded_object;
206   }
207 
208  private:
209   bool is_value32_;
210   union {
211     uint64_t value64_;
212     uint32_t value32_;
213   };
214   RelocInfo::Mode rmode_;
215 };
216 
217 // Order for pool entries. 64bit entries go first.
218 inline bool operator<(const ConstantPoolKey& a, const ConstantPoolKey& b) {
219   if (a.is_value32() < b.is_value32()) return true;
220   if (a.is_value32() > b.is_value32()) return false;
221   if (a.rmode() < b.rmode()) return true;
222   if (a.rmode() > b.rmode()) return false;
223   if (a.is_value32()) return a.value32() < b.value32();
224   return a.value64() < b.value64();
225 }
226 
227 inline bool operator==(const ConstantPoolKey& a, const ConstantPoolKey& b) {
228   if (a.rmode() != b.rmode() || a.is_value32() != b.is_value32()) {
229     return false;
230   }
231   if (a.is_value32()) return a.value32() == b.value32();
232   return a.value64() == b.value64();
233 }
234 
235 // Constant pool generation
236 enum class Jump { kOmitted, kRequired };
237 enum class Emission { kIfNeeded, kForced };
238 enum class Alignment { kOmitted, kRequired };
239 enum class RelocInfoStatus { kMustRecord, kMustOmitForDuplicate };
240 enum class PoolEmissionCheck { kSkip };
241 
242 // Pools are emitted in the instruction stream, preferably after unconditional
243 // jumps or after returns from functions (in dead code locations).
244 // If a long code sequence does not contain unconditional jumps, it is
245 // necessary to emit the constant pool before the pool gets too far from the
246 // location it is accessed from. In this case, we emit a jump over the emitted
247 // constant pool.
248 // Constants in the pool may be addresses of functions that gets relocated;
249 // if so, a relocation info entry is associated to the constant pool entry.
250 class ConstantPool {
251  public:
252   explicit ConstantPool(Assembler* assm);
253   ~ConstantPool();
254 
255   // Returns true when we need to write RelocInfo and false when we do not.
256   RelocInfoStatus RecordEntry(uint32_t data, RelocInfo::Mode rmode);
257   RelocInfoStatus RecordEntry(uint64_t data, RelocInfo::Mode rmode);
258 
Entry32Count()259   size_t Entry32Count() const { return entry32_count_; }
Entry64Count()260   size_t Entry64Count() const { return entry64_count_; }
IsEmpty()261   bool IsEmpty() const { return entries_.empty(); }
262   // Check if pool will be out of range at {pc_offset}.
263   bool IsInImmRangeIfEmittedAt(int pc_offset);
264   // Size in bytes of the constant pool. Depending on parameters, the size will
265   // include the branch over the pool and alignment padding.
266   int ComputeSize(Jump require_jump, Alignment require_alignment) const;
267 
268   // Emit the pool at the current pc with a branch over the pool if requested.
269   void EmitAndClear(Jump require);
270   bool ShouldEmitNow(Jump require_jump, size_t margin = 0) const;
271   V8_EXPORT_PRIVATE void Check(Emission force_emission, Jump require_jump,
272                                size_t margin = 0);
273 
274   V8_EXPORT_PRIVATE void MaybeCheck();
275   void Clear();
276 
277   // Constant pool emisssion can be blocked temporarily.
278   bool IsBlocked() const;
279 
280   // Repeated checking whether the constant pool should be emitted is expensive;
281   // only check once a number of instructions have been generated.
282   void SetNextCheckIn(size_t instructions);
283 
284   // Class for scoping postponing the constant pool generation.
285   class V8_EXPORT_PRIVATE BlockScope {
286    public:
287     // BlockScope immediatelly emits the pool if necessary to ensure that
288     // during the block scope at least {margin} bytes can be emitted without
289     // pool emission becomming necessary.
290     explicit BlockScope(Assembler* pool, size_t margin = 0);
291     BlockScope(Assembler* pool, PoolEmissionCheck);
292     ~BlockScope();
293 
294    private:
295     ConstantPool* pool_;
296     DISALLOW_IMPLICIT_CONSTRUCTORS(BlockScope);
297   };
298 
299   // Hard limit to the const pool which must not be exceeded.
300   static const size_t kMaxDistToPool32;
301   static const size_t kMaxDistToPool64;
302   // Approximate distance where the pool should be emitted.
303   static const size_t kApproxDistToPool32;
304   V8_EXPORT_PRIVATE static const size_t kApproxDistToPool64;
305   // Approximate distance where the pool may be emitted if
306   // no jump is required (due to a recent unconditional jump).
307   static const size_t kOpportunityDistToPool32;
308   static const size_t kOpportunityDistToPool64;
309   // PC distance between constant pool checks.
310   V8_EXPORT_PRIVATE static const size_t kCheckInterval;
311   // Number of entries in the pool which trigger a check.
312   static const size_t kApproxMaxEntryCount;
313 
314  private:
315   void StartBlock();
316   void EndBlock();
317 
318   void EmitEntries();
319   void EmitPrologue(Alignment require_alignment);
320   int PrologueSize(Jump require_jump) const;
321   RelocInfoStatus RecordKey(ConstantPoolKey key, int offset);
322   RelocInfoStatus GetRelocInfoStatusFor(const ConstantPoolKey& key);
323   void Emit(const ConstantPoolKey& key);
324   void SetLoadOffsetToConstPoolEntry(int load_offset, Instruction* entry_offset,
325                                      const ConstantPoolKey& key);
326   Alignment IsAlignmentRequiredIfEmittedAt(Jump require_jump,
327                                            int pc_offset) const;
328 
329   Assembler* assm_;
330   // Keep track of the first instruction requiring a constant pool entry
331   // since the previous constant pool was emitted.
332   int first_use_32_ = -1;
333   int first_use_64_ = -1;
334   // We sort not according to insertion order, but since we do not insert
335   // addresses (for heap objects we insert an index which is created in
336   // increasing order), the order is deterministic. We map each entry to the
337   // pc offset of the load. We use a multimap because we need to record the
338   // pc offset of each load of the same constant so that the immediate of the
339   // loads can be back-patched when the pool is emitted.
340   std::multimap<ConstantPoolKey, int> entries_;
341   size_t entry32_count_ = 0;
342   size_t entry64_count_ = 0;
343   int next_check_ = 0;
344   int blocked_nesting_ = 0;
345 };
346 
347 #endif  // defined(V8_TARGET_ARCH_ARM64)
348 
349 }  // namespace internal
350 }  // namespace v8
351 
352 #endif  // V8_CODEGEN_CONSTANT_POOL_H_
353