1 // Copyright 2018 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/codegen/constant-pool.h"
6 #include "src/codegen/assembler-arch.h"
7 #include "src/codegen/assembler-inl.h"
8
9 namespace v8 {
10 namespace internal {
11
12 #if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64)
13
ConstantPoolBuilder(int ptr_reach_bits,int double_reach_bits)14 ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach_bits,
15 int double_reach_bits) {
16 info_[ConstantPoolEntry::INTPTR].entries.reserve(64);
17 info_[ConstantPoolEntry::INTPTR].regular_reach_bits = ptr_reach_bits;
18 info_[ConstantPoolEntry::DOUBLE].regular_reach_bits = double_reach_bits;
19 }
20
NextAccess(ConstantPoolEntry::Type type) const21 ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess(
22 ConstantPoolEntry::Type type) const {
23 const PerTypeEntryInfo& info = info_[type];
24
25 if (info.overflow()) return ConstantPoolEntry::OVERFLOWED;
26
27 int dbl_count = info_[ConstantPoolEntry::DOUBLE].regular_count;
28 int dbl_offset = dbl_count * kDoubleSize;
29 int ptr_count = info_[ConstantPoolEntry::INTPTR].regular_count;
30 int ptr_offset = ptr_count * kSystemPointerSize + dbl_offset;
31
32 if (type == ConstantPoolEntry::DOUBLE) {
33 // Double overflow detection must take into account the reach for both types
34 int ptr_reach_bits = info_[ConstantPoolEntry::INTPTR].regular_reach_bits;
35 if (!is_uintn(dbl_offset, info.regular_reach_bits) ||
36 (ptr_count > 0 &&
37 !is_uintn(ptr_offset + kDoubleSize - kSystemPointerSize,
38 ptr_reach_bits))) {
39 return ConstantPoolEntry::OVERFLOWED;
40 }
41 } else {
42 DCHECK(type == ConstantPoolEntry::INTPTR);
43 if (!is_uintn(ptr_offset, info.regular_reach_bits)) {
44 return ConstantPoolEntry::OVERFLOWED;
45 }
46 }
47
48 return ConstantPoolEntry::REGULAR;
49 }
50
AddEntry(ConstantPoolEntry * entry,ConstantPoolEntry::Type type)51 ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry(
52 ConstantPoolEntry* entry, ConstantPoolEntry::Type type) {
53 DCHECK(!emitted_label_.is_bound());
54 PerTypeEntryInfo& info = info_[type];
55 const int entry_size = ConstantPoolEntry::size(type);
56 bool merged = false;
57
58 if (entry->sharing_ok()) {
59 // Try to merge entries
60 std::vector<ConstantPoolEntry>::iterator it = info.shared_entries.begin();
61 int end = static_cast<int>(info.shared_entries.size());
62 for (int i = 0; i < end; i++, it++) {
63 if ((entry_size == kSystemPointerSize)
64 ? entry->value() == it->value()
65 : entry->value64() == it->value64()) {
66 // Merge with found entry.
67 entry->set_merged_index(i);
68 merged = true;
69 break;
70 }
71 }
72 }
73
74 // By definition, merged entries have regular access.
75 DCHECK(!merged || entry->merged_index() < info.regular_count);
76 ConstantPoolEntry::Access access =
77 (merged ? ConstantPoolEntry::REGULAR : NextAccess(type));
78
79 // Enforce an upper bound on search time by limiting the search to
80 // unique sharable entries which fit in the regular section.
81 if (entry->sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) {
82 info.shared_entries.push_back(*entry);
83 } else {
84 info.entries.push_back(*entry);
85 }
86
87 // We're done if we found a match or have already triggered the
88 // overflow state.
89 if (merged || info.overflow()) return access;
90
91 if (access == ConstantPoolEntry::REGULAR) {
92 info.regular_count++;
93 } else {
94 info.overflow_start = static_cast<int>(info.entries.size()) - 1;
95 }
96
97 return access;
98 }
99
EmitSharedEntries(Assembler * assm,ConstantPoolEntry::Type type)100 void ConstantPoolBuilder::EmitSharedEntries(Assembler* assm,
101 ConstantPoolEntry::Type type) {
102 PerTypeEntryInfo& info = info_[type];
103 std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
104 const int entry_size = ConstantPoolEntry::size(type);
105 int base = emitted_label_.pos();
106 DCHECK_GT(base, 0);
107 int shared_end = static_cast<int>(shared_entries.size());
108 std::vector<ConstantPoolEntry>::iterator shared_it = shared_entries.begin();
109 for (int i = 0; i < shared_end; i++, shared_it++) {
110 int offset = assm->pc_offset() - base;
111 shared_it->set_offset(offset); // Save offset for merged entries.
112 if (entry_size == kSystemPointerSize) {
113 assm->dp(shared_it->value());
114 } else {
115 assm->dq(shared_it->value64());
116 }
117 DCHECK(is_uintn(offset, info.regular_reach_bits));
118
119 // Patch load sequence with correct offset.
120 assm->PatchConstantPoolAccessInstruction(shared_it->position(), offset,
121 ConstantPoolEntry::REGULAR, type);
122 }
123 }
124
EmitGroup(Assembler * assm,ConstantPoolEntry::Access access,ConstantPoolEntry::Type type)125 void ConstantPoolBuilder::EmitGroup(Assembler* assm,
126 ConstantPoolEntry::Access access,
127 ConstantPoolEntry::Type type) {
128 PerTypeEntryInfo& info = info_[type];
129 const bool overflow = info.overflow();
130 std::vector<ConstantPoolEntry>& entries = info.entries;
131 std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
132 const int entry_size = ConstantPoolEntry::size(type);
133 int base = emitted_label_.pos();
134 DCHECK_GT(base, 0);
135 int begin;
136 int end;
137
138 if (access == ConstantPoolEntry::REGULAR) {
139 // Emit any shared entries first
140 EmitSharedEntries(assm, type);
141 }
142
143 if (access == ConstantPoolEntry::REGULAR) {
144 begin = 0;
145 end = overflow ? info.overflow_start : static_cast<int>(entries.size());
146 } else {
147 DCHECK(access == ConstantPoolEntry::OVERFLOWED);
148 if (!overflow) return;
149 begin = info.overflow_start;
150 end = static_cast<int>(entries.size());
151 }
152
153 std::vector<ConstantPoolEntry>::iterator it = entries.begin();
154 if (begin > 0) std::advance(it, begin);
155 for (int i = begin; i < end; i++, it++) {
156 // Update constant pool if necessary and get the entry's offset.
157 int offset;
158 ConstantPoolEntry::Access entry_access;
159 if (!it->is_merged()) {
160 // Emit new entry
161 offset = assm->pc_offset() - base;
162 entry_access = access;
163 if (entry_size == kSystemPointerSize) {
164 assm->dp(it->value());
165 } else {
166 assm->dq(it->value64());
167 }
168 } else {
169 // Retrieve offset from shared entry.
170 offset = shared_entries[it->merged_index()].offset();
171 entry_access = ConstantPoolEntry::REGULAR;
172 }
173
174 DCHECK(entry_access == ConstantPoolEntry::OVERFLOWED ||
175 is_uintn(offset, info.regular_reach_bits));
176
177 // Patch load sequence with correct offset.
178 assm->PatchConstantPoolAccessInstruction(it->position(), offset,
179 entry_access, type);
180 }
181 }
182
183 // Emit and return size of pool.
Emit(Assembler * assm)184 int ConstantPoolBuilder::Emit(Assembler* assm) {
185 bool emitted = emitted_label_.is_bound();
186 bool empty = IsEmpty();
187
188 if (!emitted) {
189 // Mark start of constant pool. Align if necessary.
190 if (!empty) assm->DataAlign(kDoubleSize);
191 assm->bind(&emitted_label_);
192 if (!empty) {
193 // Emit in groups based on access and type.
194 // Emit doubles first for alignment purposes.
195 EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::DOUBLE);
196 EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::INTPTR);
197 if (info_[ConstantPoolEntry::DOUBLE].overflow()) {
198 assm->DataAlign(kDoubleSize);
199 EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
200 ConstantPoolEntry::DOUBLE);
201 }
202 if (info_[ConstantPoolEntry::INTPTR].overflow()) {
203 EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
204 ConstantPoolEntry::INTPTR);
205 }
206 }
207 }
208
209 return !empty ? (assm->pc_offset() - emitted_label_.pos()) : 0;
210 }
211
212 #endif // defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64)
213
214 #if defined(V8_TARGET_ARCH_ARM64)
215
216 // Constant Pool.
217
ConstantPool(Assembler * assm)218 ConstantPool::ConstantPool(Assembler* assm) : assm_(assm) {}
~ConstantPool()219 ConstantPool::~ConstantPool() { DCHECK_EQ(blocked_nesting_, 0); }
220
RecordEntry(uint32_t data,RelocInfo::Mode rmode)221 RelocInfoStatus ConstantPool::RecordEntry(uint32_t data,
222 RelocInfo::Mode rmode) {
223 ConstantPoolKey key(data, rmode);
224 CHECK(key.is_value32());
225 return RecordKey(std::move(key), assm_->pc_offset());
226 }
227
RecordEntry(uint64_t data,RelocInfo::Mode rmode)228 RelocInfoStatus ConstantPool::RecordEntry(uint64_t data,
229 RelocInfo::Mode rmode) {
230 ConstantPoolKey key(data, rmode);
231 CHECK(!key.is_value32());
232 return RecordKey(std::move(key), assm_->pc_offset());
233 }
234
RecordKey(ConstantPoolKey key,int offset)235 RelocInfoStatus ConstantPool::RecordKey(ConstantPoolKey key, int offset) {
236 RelocInfoStatus write_reloc_info = GetRelocInfoStatusFor(key);
237 if (write_reloc_info == RelocInfoStatus::kMustRecord) {
238 if (key.is_value32()) {
239 if (entry32_count_ == 0) first_use_32_ = offset;
240 ++entry32_count_;
241 } else {
242 if (entry64_count_ == 0) first_use_64_ = offset;
243 ++entry64_count_;
244 }
245 }
246 entries_.insert(std::make_pair(key, offset));
247
248 if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
249 // Request constant pool emission after the next instruction.
250 SetNextCheckIn(1);
251 }
252
253 return write_reloc_info;
254 }
255
GetRelocInfoStatusFor(const ConstantPoolKey & key)256 RelocInfoStatus ConstantPool::GetRelocInfoStatusFor(
257 const ConstantPoolKey& key) {
258 if (key.AllowsDeduplication()) {
259 auto existing = entries_.find(key);
260 if (existing != entries_.end()) {
261 return RelocInfoStatus::kMustOmitForDuplicate;
262 }
263 }
264 return RelocInfoStatus::kMustRecord;
265 }
266
EmitAndClear(Jump require_jump)267 void ConstantPool::EmitAndClear(Jump require_jump) {
268 DCHECK(!IsBlocked());
269 // Prevent recursive pool emission.
270 Assembler::BlockPoolsScope block_pools(assm_, PoolEmissionCheck::kSkip);
271 Alignment require_alignment =
272 IsAlignmentRequiredIfEmittedAt(require_jump, assm_->pc_offset());
273 int size = ComputeSize(require_jump, require_alignment);
274 Label size_check;
275 assm_->bind(&size_check);
276 assm_->RecordConstPool(size);
277
278 // Emit the constant pool. It is preceded by an optional branch if
279 // {require_jump} and a header which will:
280 // 1) Encode the size of the constant pool, for use by the disassembler.
281 // 2) Terminate the program, to try to prevent execution from accidentally
282 // flowing into the constant pool.
283 // 3) align the 64bit pool entries to 64-bit.
284 // TODO(all): Make the alignment part less fragile. Currently code is
285 // allocated as a byte array so there are no guarantees the alignment will
286 // be preserved on compaction. Currently it works as allocation seems to be
287 // 64-bit aligned.
288
289 Label after_pool;
290 if (require_jump == Jump::kRequired) assm_->b(&after_pool);
291
292 assm_->RecordComment("[ Constant Pool");
293 EmitPrologue(require_alignment);
294 if (require_alignment == Alignment::kRequired) assm_->Align(kInt64Size);
295 EmitEntries();
296 assm_->RecordComment("]");
297
298 if (after_pool.is_linked()) assm_->bind(&after_pool);
299
300 DCHECK_EQ(assm_->SizeOfCodeGeneratedSince(&size_check), size);
301 Clear();
302 }
303
Clear()304 void ConstantPool::Clear() {
305 entries_.clear();
306 first_use_32_ = -1;
307 first_use_64_ = -1;
308 entry32_count_ = 0;
309 entry64_count_ = 0;
310 next_check_ = 0;
311 }
312
StartBlock()313 void ConstantPool::StartBlock() {
314 if (blocked_nesting_ == 0) {
315 // Prevent constant pool checks from happening by setting the next check to
316 // the biggest possible offset.
317 next_check_ = kMaxInt;
318 }
319 ++blocked_nesting_;
320 }
321
EndBlock()322 void ConstantPool::EndBlock() {
323 --blocked_nesting_;
324 if (blocked_nesting_ == 0) {
325 DCHECK(IsInImmRangeIfEmittedAt(assm_->pc_offset()));
326 // Make sure a check happens quickly after getting unblocked.
327 next_check_ = 0;
328 }
329 }
330
IsBlocked() const331 bool ConstantPool::IsBlocked() const { return blocked_nesting_ > 0; }
332
SetNextCheckIn(size_t instructions)333 void ConstantPool::SetNextCheckIn(size_t instructions) {
334 next_check_ =
335 assm_->pc_offset() + static_cast<int>(instructions * kInstrSize);
336 }
337
EmitEntries()338 void ConstantPool::EmitEntries() {
339 for (auto iter = entries_.begin(); iter != entries_.end();) {
340 DCHECK(iter->first.is_value32() || IsAligned(assm_->pc_offset(), 8));
341 auto range = entries_.equal_range(iter->first);
342 bool shared = iter->first.AllowsDeduplication();
343 for (auto it = range.first; it != range.second; ++it) {
344 SetLoadOffsetToConstPoolEntry(it->second, assm_->pc(), it->first);
345 if (!shared) Emit(it->first);
346 }
347 if (shared) Emit(iter->first);
348 iter = range.second;
349 }
350 }
351
Emit(const ConstantPoolKey & key)352 void ConstantPool::Emit(const ConstantPoolKey& key) {
353 if (key.is_value32()) {
354 assm_->dd(key.value32());
355 } else {
356 assm_->dq(key.value64());
357 }
358 }
359
ShouldEmitNow(Jump require_jump,size_t margin) const360 bool ConstantPool::ShouldEmitNow(Jump require_jump, size_t margin) const {
361 if (IsEmpty()) return false;
362 if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
363 return true;
364 }
365 // We compute {dist32/64}, i.e. the distance from the first instruction
366 // accessing a 32bit/64bit entry in the constant pool to any of the
367 // 32bit/64bit constant pool entries, respectively. This is required because
368 // we do not guarantee that entries are emitted in order of reference, i.e. it
369 // is possible that the entry with the earliest reference is emitted last.
370 // The constant pool should be emitted if either of the following is true:
371 // (A) {dist32/64} will be out of range at the next check in.
372 // (B) Emission can be done behind an unconditional branch and {dist32/64}
373 // exceeds {kOpportunityDist*}.
374 // (C) {dist32/64} exceeds the desired approximate distance to the pool.
375 int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
376 size_t pool_end_32 = assm_->pc_offset() + margin + worst_case_size;
377 size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
378 if (Entry64Count() != 0) {
379 // The 64-bit constants are always emitted before the 32-bit constants, so
380 // we subtract the size of the 32-bit constants from {size}.
381 size_t dist64 = pool_end_64 - first_use_64_;
382 bool next_check_too_late = dist64 + 2 * kCheckInterval >= kMaxDistToPool64;
383 bool opportune_emission_without_jump =
384 require_jump == Jump::kOmitted && (dist64 >= kOpportunityDistToPool64);
385 bool approximate_distance_exceeded = dist64 >= kApproxDistToPool64;
386 if (next_check_too_late || opportune_emission_without_jump ||
387 approximate_distance_exceeded) {
388 return true;
389 }
390 }
391 if (Entry32Count() != 0) {
392 size_t dist32 = pool_end_32 - first_use_32_;
393 bool next_check_too_late = dist32 + 2 * kCheckInterval >= kMaxDistToPool32;
394 bool opportune_emission_without_jump =
395 require_jump == Jump::kOmitted && (dist32 >= kOpportunityDistToPool32);
396 bool approximate_distance_exceeded = dist32 >= kApproxDistToPool32;
397 if (next_check_too_late || opportune_emission_without_jump ||
398 approximate_distance_exceeded) {
399 return true;
400 }
401 }
402 return false;
403 }
404
ComputeSize(Jump require_jump,Alignment require_alignment) const405 int ConstantPool::ComputeSize(Jump require_jump,
406 Alignment require_alignment) const {
407 int size_up_to_marker = PrologueSize(require_jump);
408 int alignment = require_alignment == Alignment::kRequired ? kInstrSize : 0;
409 size_t size_after_marker =
410 Entry32Count() * kInt32Size + alignment + Entry64Count() * kInt64Size;
411 return size_up_to_marker + static_cast<int>(size_after_marker);
412 }
413
IsAlignmentRequiredIfEmittedAt(Jump require_jump,int pc_offset) const414 Alignment ConstantPool::IsAlignmentRequiredIfEmittedAt(Jump require_jump,
415 int pc_offset) const {
416 int size_up_to_marker = PrologueSize(require_jump);
417 if (Entry64Count() != 0 &&
418 !IsAligned(pc_offset + size_up_to_marker, kInt64Size)) {
419 return Alignment::kRequired;
420 }
421 return Alignment::kOmitted;
422 }
423
IsInImmRangeIfEmittedAt(int pc_offset)424 bool ConstantPool::IsInImmRangeIfEmittedAt(int pc_offset) {
425 // Check that all entries are in range if the pool is emitted at {pc_offset}.
426 // This ignores kPcLoadDelta (conservatively, since all offsets are positive),
427 // and over-estimates the last entry's address with the pool's end.
428 Alignment require_alignment =
429 IsAlignmentRequiredIfEmittedAt(Jump::kRequired, pc_offset);
430 size_t pool_end_32 =
431 pc_offset + ComputeSize(Jump::kRequired, require_alignment);
432 size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
433 bool entries_in_range_32 =
434 Entry32Count() == 0 || (pool_end_32 < first_use_32_ + kMaxDistToPool32);
435 bool entries_in_range_64 =
436 Entry64Count() == 0 || (pool_end_64 < first_use_64_ + kMaxDistToPool64);
437 return entries_in_range_32 && entries_in_range_64;
438 }
439
BlockScope(Assembler * assm,size_t margin)440 ConstantPool::BlockScope::BlockScope(Assembler* assm, size_t margin)
441 : pool_(&assm->constpool_) {
442 pool_->assm_->EmitConstPoolWithJumpIfNeeded(margin);
443 pool_->StartBlock();
444 }
445
BlockScope(Assembler * assm,PoolEmissionCheck check)446 ConstantPool::BlockScope::BlockScope(Assembler* assm, PoolEmissionCheck check)
447 : pool_(&assm->constpool_) {
448 DCHECK_EQ(check, PoolEmissionCheck::kSkip);
449 pool_->StartBlock();
450 }
451
~BlockScope()452 ConstantPool::BlockScope::~BlockScope() { pool_->EndBlock(); }
453
MaybeCheck()454 void ConstantPool::MaybeCheck() {
455 if (assm_->pc_offset() >= next_check_) {
456 Check(Emission::kIfNeeded, Jump::kRequired);
457 }
458 }
459
460 #endif // defined(V8_TARGET_ARCH_ARM64)
461
462 } // namespace internal
463 } // namespace v8
464