1 // Copyright 2018 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/codegen/constant-pool.h"
6 #include "src/codegen/assembler-arch.h"
7 #include "src/codegen/assembler-inl.h"
8
9 namespace v8 {
10 namespace internal {
11
12 #if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64)
13
ConstantPoolBuilder(int ptr_reach_bits,int double_reach_bits)14 ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach_bits,
15 int double_reach_bits) {
16 info_[ConstantPoolEntry::INTPTR].entries.reserve(64);
17 info_[ConstantPoolEntry::INTPTR].regular_reach_bits = ptr_reach_bits;
18 info_[ConstantPoolEntry::DOUBLE].regular_reach_bits = double_reach_bits;
19 }
20
NextAccess(ConstantPoolEntry::Type type) const21 ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess(
22 ConstantPoolEntry::Type type) const {
23 const PerTypeEntryInfo& info = info_[type];
24
25 if (info.overflow()) return ConstantPoolEntry::OVERFLOWED;
26
27 int dbl_count = info_[ConstantPoolEntry::DOUBLE].regular_count;
28 int dbl_offset = dbl_count * kDoubleSize;
29 int ptr_count = info_[ConstantPoolEntry::INTPTR].regular_count;
30 int ptr_offset = ptr_count * kSystemPointerSize + dbl_offset;
31
32 if (type == ConstantPoolEntry::DOUBLE) {
33 // Double overflow detection must take into account the reach for both types
34 int ptr_reach_bits = info_[ConstantPoolEntry::INTPTR].regular_reach_bits;
35 if (!is_uintn(dbl_offset, info.regular_reach_bits) ||
36 (ptr_count > 0 &&
37 !is_uintn(ptr_offset + kDoubleSize - kSystemPointerSize,
38 ptr_reach_bits))) {
39 return ConstantPoolEntry::OVERFLOWED;
40 }
41 } else {
42 DCHECK(type == ConstantPoolEntry::INTPTR);
43 if (!is_uintn(ptr_offset, info.regular_reach_bits)) {
44 return ConstantPoolEntry::OVERFLOWED;
45 }
46 }
47
48 return ConstantPoolEntry::REGULAR;
49 }
50
AddEntry(ConstantPoolEntry * entry,ConstantPoolEntry::Type type)51 ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry(
52 ConstantPoolEntry* entry, ConstantPoolEntry::Type type) {
53 DCHECK(!emitted_label_.is_bound());
54 PerTypeEntryInfo& info = info_[type];
55 const int entry_size = ConstantPoolEntry::size(type);
56 bool merged = false;
57
58 if (entry->sharing_ok()) {
59 // Try to merge entries
60 std::vector<ConstantPoolEntry>::iterator it = info.shared_entries.begin();
61 int end = static_cast<int>(info.shared_entries.size());
62 for (int i = 0; i < end; i++, it++) {
63 if ((entry_size == kSystemPointerSize)
64 ? entry->value() == it->value()
65 : entry->value64() == it->value64()) {
66 // Merge with found entry.
67 entry->set_merged_index(i);
68 merged = true;
69 break;
70 }
71 }
72 }
73
74 // By definition, merged entries have regular access.
75 DCHECK(!merged || entry->merged_index() < info.regular_count);
76 ConstantPoolEntry::Access access =
77 (merged ? ConstantPoolEntry::REGULAR : NextAccess(type));
78
79 // Enforce an upper bound on search time by limiting the search to
80 // unique sharable entries which fit in the regular section.
81 if (entry->sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) {
82 info.shared_entries.push_back(*entry);
83 } else {
84 info.entries.push_back(*entry);
85 }
86
87 // We're done if we found a match or have already triggered the
88 // overflow state.
89 if (merged || info.overflow()) return access;
90
91 if (access == ConstantPoolEntry::REGULAR) {
92 info.regular_count++;
93 } else {
94 info.overflow_start = static_cast<int>(info.entries.size()) - 1;
95 }
96
97 return access;
98 }
99
EmitSharedEntries(Assembler * assm,ConstantPoolEntry::Type type)100 void ConstantPoolBuilder::EmitSharedEntries(Assembler* assm,
101 ConstantPoolEntry::Type type) {
102 PerTypeEntryInfo& info = info_[type];
103 std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
104 const int entry_size = ConstantPoolEntry::size(type);
105 int base = emitted_label_.pos();
106 DCHECK_GT(base, 0);
107 int shared_end = static_cast<int>(shared_entries.size());
108 std::vector<ConstantPoolEntry>::iterator shared_it = shared_entries.begin();
109 for (int i = 0; i < shared_end; i++, shared_it++) {
110 int offset = assm->pc_offset() - base;
111 shared_it->set_offset(offset); // Save offset for merged entries.
112 if (entry_size == kSystemPointerSize) {
113 assm->dp(shared_it->value());
114 } else {
115 assm->dq(shared_it->value64());
116 }
117 DCHECK(is_uintn(offset, info.regular_reach_bits));
118
119 // Patch load sequence with correct offset.
120 assm->PatchConstantPoolAccessInstruction(shared_it->position(), offset,
121 ConstantPoolEntry::REGULAR, type);
122 }
123 }
124
EmitGroup(Assembler * assm,ConstantPoolEntry::Access access,ConstantPoolEntry::Type type)125 void ConstantPoolBuilder::EmitGroup(Assembler* assm,
126 ConstantPoolEntry::Access access,
127 ConstantPoolEntry::Type type) {
128 PerTypeEntryInfo& info = info_[type];
129 const bool overflow = info.overflow();
130 std::vector<ConstantPoolEntry>& entries = info.entries;
131 std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
132 const int entry_size = ConstantPoolEntry::size(type);
133 int base = emitted_label_.pos();
134 DCHECK_GT(base, 0);
135 int begin;
136 int end;
137
138 if (access == ConstantPoolEntry::REGULAR) {
139 // Emit any shared entries first
140 EmitSharedEntries(assm, type);
141 }
142
143 if (access == ConstantPoolEntry::REGULAR) {
144 begin = 0;
145 end = overflow ? info.overflow_start : static_cast<int>(entries.size());
146 } else {
147 DCHECK(access == ConstantPoolEntry::OVERFLOWED);
148 if (!overflow) return;
149 begin = info.overflow_start;
150 end = static_cast<int>(entries.size());
151 }
152
153 std::vector<ConstantPoolEntry>::iterator it = entries.begin();
154 if (begin > 0) std::advance(it, begin);
155 for (int i = begin; i < end; i++, it++) {
156 // Update constant pool if necessary and get the entry's offset.
157 int offset;
158 ConstantPoolEntry::Access entry_access;
159 if (!it->is_merged()) {
160 // Emit new entry
161 offset = assm->pc_offset() - base;
162 entry_access = access;
163 if (entry_size == kSystemPointerSize) {
164 assm->dp(it->value());
165 } else {
166 assm->dq(it->value64());
167 }
168 } else {
169 // Retrieve offset from shared entry.
170 offset = shared_entries[it->merged_index()].offset();
171 entry_access = ConstantPoolEntry::REGULAR;
172 }
173
174 DCHECK(entry_access == ConstantPoolEntry::OVERFLOWED ||
175 is_uintn(offset, info.regular_reach_bits));
176
177 // Patch load sequence with correct offset.
178 assm->PatchConstantPoolAccessInstruction(it->position(), offset,
179 entry_access, type);
180 }
181 }
182
183 // Emit and return size of pool.
Emit(Assembler * assm)184 int ConstantPoolBuilder::Emit(Assembler* assm) {
185 bool emitted = emitted_label_.is_bound();
186 bool empty = IsEmpty();
187
188 if (!emitted) {
189 // Mark start of constant pool. Align if necessary.
190 if (!empty) assm->DataAlign(kDoubleSize);
191 assm->bind(&emitted_label_);
192 if (!empty) {
193 // Emit in groups based on access and type.
194 // Emit doubles first for alignment purposes.
195 EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::DOUBLE);
196 EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::INTPTR);
197 if (info_[ConstantPoolEntry::DOUBLE].overflow()) {
198 assm->DataAlign(kDoubleSize);
199 EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
200 ConstantPoolEntry::DOUBLE);
201 }
202 if (info_[ConstantPoolEntry::INTPTR].overflow()) {
203 EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
204 ConstantPoolEntry::INTPTR);
205 }
206 }
207 }
208
209 return !empty ? (assm->pc_offset() - emitted_label_.pos()) : 0;
210 }
211
212 #endif // defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64)
213
214 #if defined(V8_TARGET_ARCH_ARM64)
215
216 // Constant Pool.
217
ConstantPool(Assembler * assm)218 ConstantPool::ConstantPool(Assembler* assm) : assm_(assm) {}
~ConstantPool()219 ConstantPool::~ConstantPool() { DCHECK_EQ(blocked_nesting_, 0); }
220
RecordEntry(uint32_t data,RelocInfo::Mode rmode)221 RelocInfoStatus ConstantPool::RecordEntry(uint32_t data,
222 RelocInfo::Mode rmode) {
223 ConstantPoolKey key(data, rmode);
224 CHECK(key.is_value32());
225 return RecordKey(std::move(key), assm_->pc_offset());
226 }
227
RecordEntry(uint64_t data,RelocInfo::Mode rmode)228 RelocInfoStatus ConstantPool::RecordEntry(uint64_t data,
229 RelocInfo::Mode rmode) {
230 ConstantPoolKey key(data, rmode);
231 CHECK(!key.is_value32());
232 return RecordKey(std::move(key), assm_->pc_offset());
233 }
234
RecordKey(ConstantPoolKey key,int offset)235 RelocInfoStatus ConstantPool::RecordKey(ConstantPoolKey key, int offset) {
236 RelocInfoStatus write_reloc_info = GetRelocInfoStatusFor(key);
237 if (write_reloc_info == RelocInfoStatus::kMustRecord) {
238 if (key.is_value32()) {
239 if (entry32_count_ == 0) first_use_32_ = offset;
240 ++entry32_count_;
241 } else {
242 if (entry64_count_ == 0) first_use_64_ = offset;
243 ++entry64_count_;
244 }
245 }
246 entries_.insert(std::make_pair(key, offset));
247
248 if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
249 // Request constant pool emission after the next instruction.
250 SetNextCheckIn(1);
251 }
252
253 return write_reloc_info;
254 }
255
GetRelocInfoStatusFor(const ConstantPoolKey & key)256 RelocInfoStatus ConstantPool::GetRelocInfoStatusFor(
257 const ConstantPoolKey& key) {
258 if (key.AllowsDeduplication()) {
259 auto existing = entries_.find(key);
260 if (existing != entries_.end()) {
261 return RelocInfoStatus::kMustOmitForDuplicate;
262 }
263 }
264 return RelocInfoStatus::kMustRecord;
265 }
266
EmitAndClear(Jump require_jump)267 void ConstantPool::EmitAndClear(Jump require_jump) {
268 DCHECK(!IsBlocked());
269 // Prevent recursive pool emission.
270 Assembler::BlockPoolsScope block_pools(assm_, PoolEmissionCheck::kSkip);
271 Alignment require_alignment =
272 IsAlignmentRequiredIfEmittedAt(require_jump, assm_->pc_offset());
273 int size = ComputeSize(require_jump, require_alignment);
274 Label size_check;
275 assm_->bind(&size_check);
276 assm_->RecordConstPool(size);
277
278 // Emit the constant pool. It is preceded by an optional branch if
279 // {require_jump} and a header which will:
280 // 1) Encode the size of the constant pool, for use by the disassembler.
281 // 2) Terminate the program, to try to prevent execution from accidentally
282 // flowing into the constant pool.
283 // 3) align the 64bit pool entries to 64-bit.
284 // TODO(all): Make the alignment part less fragile. Currently code is
285 // allocated as a byte array so there are no guarantees the alignment will
286 // be preserved on compaction. Currently it works as allocation seems to be
287 // 64-bit aligned.
288
289 Label after_pool;
290 if (require_jump == Jump::kRequired) assm_->b(&after_pool);
291
292 assm_->RecordComment("[ Constant Pool");
293 EmitPrologue(require_alignment);
294 if (require_alignment == Alignment::kRequired) assm_->Align(kInt64Size);
295 EmitEntries();
296 assm_->RecordComment("]");
297
298 if (after_pool.is_linked()) assm_->bind(&after_pool);
299
300 DCHECK_EQ(assm_->SizeOfCodeGeneratedSince(&size_check), size);
301 Clear();
302 }
303
Clear()304 void ConstantPool::Clear() {
305 entries_.clear();
306 first_use_32_ = -1;
307 first_use_64_ = -1;
308 entry32_count_ = 0;
309 entry64_count_ = 0;
310 next_check_ = 0;
311 }
312
StartBlock()313 void ConstantPool::StartBlock() {
314 if (blocked_nesting_ == 0) {
315 // Prevent constant pool checks from happening by setting the next check to
316 // the biggest possible offset.
317 next_check_ = kMaxInt;
318 }
319 ++blocked_nesting_;
320 }
321
EndBlock()322 void ConstantPool::EndBlock() {
323 --blocked_nesting_;
324 if (blocked_nesting_ == 0) {
325 DCHECK(IsInImmRangeIfEmittedAt(assm_->pc_offset()));
326 // Make sure a check happens quickly after getting unblocked.
327 next_check_ = 0;
328 }
329 }
330
IsBlocked() const331 bool ConstantPool::IsBlocked() const { return blocked_nesting_ > 0; }
332
SetNextCheckIn(size_t instructions)333 void ConstantPool::SetNextCheckIn(size_t instructions) {
334 next_check_ =
335 assm_->pc_offset() + static_cast<int>(instructions * kInstrSize);
336 }
337
EmitEntries()338 void ConstantPool::EmitEntries() {
339 for (auto iter = entries_.begin(); iter != entries_.end();) {
340 DCHECK(iter->first.is_value32() || IsAligned(assm_->pc_offset(), 8));
341 auto range = entries_.equal_range(iter->first);
342 bool shared = iter->first.AllowsDeduplication();
343 for (auto it = range.first; it != range.second; ++it) {
344 SetLoadOffsetToConstPoolEntry(it->second, assm_->pc(), it->first);
345 if (!shared) Emit(it->first);
346 }
347 if (shared) Emit(iter->first);
348 iter = range.second;
349 }
350 }
351
Emit(const ConstantPoolKey & key)352 void ConstantPool::Emit(const ConstantPoolKey& key) {
353 if (key.is_value32()) {
354 assm_->dd(key.value32());
355 } else {
356 assm_->dq(key.value64());
357 }
358 }
359
ShouldEmitNow(Jump require_jump,size_t margin) const360 bool ConstantPool::ShouldEmitNow(Jump require_jump, size_t margin) const {
361 if (IsEmpty()) return false;
362 if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
363 return true;
364 }
365 // We compute {dist32/64}, i.e. the distance from the first instruction
366 // accessing a 32bit/64bit entry in the constant pool to any of the
367 // 32bit/64bit constant pool entries, respectively. This is required because
368 // we do not guarantee that entries are emitted in order of reference, i.e. it
369 // is possible that the entry with the earliest reference is emitted last.
370 // The constant pool should be emitted if either of the following is true:
371 // (A) {dist32/64} will be out of range at the next check in.
372 // (B) Emission can be done behind an unconditional branch and {dist32/64}
373 // exceeds {kOpportunityDist*}.
374 // (C) {dist32/64} exceeds the desired approximate distance to the pool.
375 int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
376 size_t pool_end_32 = assm_->pc_offset() + margin + worst_case_size;
377 size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
378 if (Entry64Count() != 0) {
379 // The 64-bit constants are always emitted before the 32-bit constants, so
380 // we subtract the size of the 32-bit constants from {size}.
381 size_t dist64 = pool_end_64 - first_use_64_;
382 bool next_check_too_late = dist64 + 2 * kCheckInterval >= kMaxDistToPool64;
383 bool opportune_emission_without_jump =
384 require_jump == Jump::kOmitted && (dist64 >= kOpportunityDistToPool64);
385 bool approximate_distance_exceeded = dist64 >= kApproxDistToPool64;
386 if (next_check_too_late || opportune_emission_without_jump ||
387 approximate_distance_exceeded) {
388 return true;
389 }
390 }
391 if (Entry32Count() != 0) {
392 size_t dist32 = pool_end_32 - first_use_32_;
393 bool next_check_too_late = dist32 + 2 * kCheckInterval >= kMaxDistToPool32;
394 bool opportune_emission_without_jump =
395 require_jump == Jump::kOmitted && (dist32 >= kOpportunityDistToPool32);
396 bool approximate_distance_exceeded = dist32 >= kApproxDistToPool32;
397 if (next_check_too_late || opportune_emission_without_jump ||
398 approximate_distance_exceeded) {
399 return true;
400 }
401 }
402 return false;
403 }
404
ComputeSize(Jump require_jump,Alignment require_alignment) const405 int ConstantPool::ComputeSize(Jump require_jump,
406 Alignment require_alignment) const {
407 int size_up_to_marker = PrologueSize(require_jump);
408 int alignment = require_alignment == Alignment::kRequired ? kInstrSize : 0;
409 size_t size_after_marker =
410 Entry32Count() * kInt32Size + alignment + Entry64Count() * kInt64Size;
411 return size_up_to_marker + static_cast<int>(size_after_marker);
412 }
413
IsAlignmentRequiredIfEmittedAt(Jump require_jump,int pc_offset) const414 Alignment ConstantPool::IsAlignmentRequiredIfEmittedAt(Jump require_jump,
415 int pc_offset) const {
416 int size_up_to_marker = PrologueSize(require_jump);
417 if (Entry64Count() != 0 &&
418 !IsAligned(pc_offset + size_up_to_marker, kInt64Size)) {
419 return Alignment::kRequired;
420 }
421 return Alignment::kOmitted;
422 }
423
IsInImmRangeIfEmittedAt(int pc_offset)424 bool ConstantPool::IsInImmRangeIfEmittedAt(int pc_offset) {
425 // Check that all entries are in range if the pool is emitted at {pc_offset}.
426 // This ignores kPcLoadDelta (conservatively, since all offsets are positive),
427 // and over-estimates the last entry's address with the pool's end.
428 Alignment require_alignment =
429 IsAlignmentRequiredIfEmittedAt(Jump::kRequired, pc_offset);
430 size_t pool_end_32 =
431 pc_offset + ComputeSize(Jump::kRequired, require_alignment);
432 size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
433 bool entries_in_range_32 =
434 Entry32Count() == 0 || (pool_end_32 < first_use_32_ + kMaxDistToPool32);
435 bool entries_in_range_64 =
436 Entry64Count() == 0 || (pool_end_64 < first_use_64_ + kMaxDistToPool64);
437 return entries_in_range_32 && entries_in_range_64;
438 }
439
BlockScope(Assembler * assm,size_t margin)440 ConstantPool::BlockScope::BlockScope(Assembler* assm, size_t margin)
441 : pool_(&assm->constpool_) {
442 pool_->assm_->EmitConstPoolWithJumpIfNeeded(margin);
443 pool_->StartBlock();
444 }
445
BlockScope(Assembler * assm,PoolEmissionCheck check)446 ConstantPool::BlockScope::BlockScope(Assembler* assm, PoolEmissionCheck check)
447 : pool_(&assm->constpool_) {
448 DCHECK_EQ(check, PoolEmissionCheck::kSkip);
449 pool_->StartBlock();
450 }
451
~BlockScope()452 ConstantPool::BlockScope::~BlockScope() { pool_->EndBlock(); }
453
MaybeCheck()454 void ConstantPool::MaybeCheck() {
455 if (assm_->pc_offset() >= next_check_) {
456 Check(Emission::kIfNeeded, Jump::kRequired);
457 }
458 }
459
460 #endif // defined(V8_TARGET_ARCH_ARM64)
461
462 #if defined(V8_TARGET_ARCH_RISCV64)
463
464 // Constant Pool.
465
ConstantPool(Assembler * assm)466 ConstantPool::ConstantPool(Assembler* assm) : assm_(assm) {}
~ConstantPool()467 ConstantPool::~ConstantPool() { DCHECK_EQ(blocked_nesting_, 0); }
468
RecordEntry(uint32_t data,RelocInfo::Mode rmode)469 RelocInfoStatus ConstantPool::RecordEntry(uint32_t data,
470 RelocInfo::Mode rmode) {
471 ConstantPoolKey key(data, rmode);
472 CHECK(key.is_value32());
473 return RecordKey(std::move(key), assm_->pc_offset());
474 }
475
RecordEntry(uint64_t data,RelocInfo::Mode rmode)476 RelocInfoStatus ConstantPool::RecordEntry(uint64_t data,
477 RelocInfo::Mode rmode) {
478 ConstantPoolKey key(data, rmode);
479 CHECK(!key.is_value32());
480 return RecordKey(std::move(key), assm_->pc_offset());
481 }
482
RecordKey(ConstantPoolKey key,int offset)483 RelocInfoStatus ConstantPool::RecordKey(ConstantPoolKey key, int offset) {
484 RelocInfoStatus write_reloc_info = GetRelocInfoStatusFor(key);
485 if (write_reloc_info == RelocInfoStatus::kMustRecord) {
486 if (key.is_value32()) {
487 if (entry32_count_ == 0) first_use_32_ = offset;
488 ++entry32_count_;
489 } else {
490 if (entry64_count_ == 0) first_use_64_ = offset;
491 ++entry64_count_;
492 }
493 }
494 entries_.insert(std::make_pair(key, offset));
495
496 if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
497 // Request constant pool emission after the next instruction.
498 SetNextCheckIn(1);
499 }
500
501 return write_reloc_info;
502 }
503
GetRelocInfoStatusFor(const ConstantPoolKey & key)504 RelocInfoStatus ConstantPool::GetRelocInfoStatusFor(
505 const ConstantPoolKey& key) {
506 if (key.AllowsDeduplication()) {
507 auto existing = entries_.find(key);
508 if (existing != entries_.end()) {
509 return RelocInfoStatus::kMustOmitForDuplicate;
510 }
511 }
512 return RelocInfoStatus::kMustRecord;
513 }
514
EmitAndClear(Jump require_jump)515 void ConstantPool::EmitAndClear(Jump require_jump) {
516 DCHECK(!IsBlocked());
517 // Prevent recursive pool emission.
518 Assembler::BlockPoolsScope block_pools(assm_, PoolEmissionCheck::kSkip);
519 Alignment require_alignment =
520 IsAlignmentRequiredIfEmittedAt(require_jump, assm_->pc_offset());
521 int size = ComputeSize(require_jump, require_alignment);
522 Label size_check;
523 assm_->bind(&size_check);
524 assm_->RecordConstPool(size);
525
526 // Emit the constant pool. It is preceded by an optional branch if
527 // {require_jump} and a header which will:
528 // 1) Encode the size of the constant pool, for use by the disassembler.
529 // 2) Terminate the program, to try to prevent execution from accidentally
530 // flowing into the constant pool.
531 // 3) align the 64bit pool entries to 64-bit.
532 // TODO(all): Make the alignment part less fragile. Currently code is
533 // allocated as a byte array so there are no guarantees the alignment will
534 // be preserved on compaction. Currently it works as allocation seems to be
535 // 64-bit aligned.
536 DEBUG_PRINTF("\tConstant Pool start\n")
537 Label after_pool;
538 if (require_jump == Jump::kRequired) assm_->b(&after_pool);
539
540 assm_->RecordComment("[ Constant Pool");
541
542 EmitPrologue(require_alignment);
543 if (require_alignment == Alignment::kRequired) assm_->DataAlign(kInt64Size);
544 EmitEntries();
545 assm_->RecordComment("]");
546 assm_->bind(&after_pool);
547 DEBUG_PRINTF("\tConstant Pool end\n")
548
549 DCHECK_LE(assm_->SizeOfCodeGeneratedSince(&size_check) - size, 3);
550 Clear();
551 }
552
Clear()553 void ConstantPool::Clear() {
554 entries_.clear();
555 first_use_32_ = -1;
556 first_use_64_ = -1;
557 entry32_count_ = 0;
558 entry64_count_ = 0;
559 next_check_ = 0;
560 }
561
StartBlock()562 void ConstantPool::StartBlock() {
563 if (blocked_nesting_ == 0) {
564 // Prevent constant pool checks from happening by setting the next check to
565 // the biggest possible offset.
566 next_check_ = kMaxInt;
567 }
568 ++blocked_nesting_;
569 }
570
EndBlock()571 void ConstantPool::EndBlock() {
572 --blocked_nesting_;
573 if (blocked_nesting_ == 0) {
574 DCHECK(IsInImmRangeIfEmittedAt(assm_->pc_offset()));
575 // Make sure a check happens quickly after getting unblocked.
576 next_check_ = 0;
577 }
578 }
579
IsBlocked() const580 bool ConstantPool::IsBlocked() const { return blocked_nesting_ > 0; }
581
SetNextCheckIn(size_t instructions)582 void ConstantPool::SetNextCheckIn(size_t instructions) {
583 next_check_ =
584 assm_->pc_offset() + static_cast<int>(instructions * kInstrSize);
585 }
586
EmitEntries()587 void ConstantPool::EmitEntries() {
588 for (auto iter = entries_.begin(); iter != entries_.end();) {
589 DCHECK(iter->first.is_value32() || IsAligned(assm_->pc_offset(), 8));
590 auto range = entries_.equal_range(iter->first);
591 bool shared = iter->first.AllowsDeduplication();
592 for (auto it = range.first; it != range.second; ++it) {
593 SetLoadOffsetToConstPoolEntry(it->second, assm_->pc(), it->first);
594 if (!shared) Emit(it->first);
595 }
596 if (shared) Emit(iter->first);
597 iter = range.second;
598 }
599 }
600
Emit(const ConstantPoolKey & key)601 void ConstantPool::Emit(const ConstantPoolKey& key) {
602 if (key.is_value32()) {
603 assm_->dd(key.value32());
604 } else {
605 assm_->dq(key.value64());
606 }
607 }
608
ShouldEmitNow(Jump require_jump,size_t margin) const609 bool ConstantPool::ShouldEmitNow(Jump require_jump, size_t margin) const {
610 if (IsEmpty()) return false;
611 if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
612 return true;
613 }
614 // We compute {dist32/64}, i.e. the distance from the first instruction
615 // accessing a 32bit/64bit entry in the constant pool to any of the
616 // 32bit/64bit constant pool entries, respectively. This is required because
617 // we do not guarantee that entries are emitted in order of reference, i.e. it
618 // is possible that the entry with the earliest reference is emitted last.
619 // The constant pool should be emitted if either of the following is true:
620 // (A) {dist32/64} will be out of range at the next check in.
621 // (B) Emission can be done behind an unconditional branch and {dist32/64}
622 // exceeds {kOpportunityDist*}.
623 // (C) {dist32/64} exceeds the desired approximate distance to the pool.
624 int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
625 size_t pool_end_32 = assm_->pc_offset() + margin + worst_case_size;
626 size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
627 if (Entry64Count() != 0) {
628 // The 64-bit constants are always emitted before the 32-bit constants, so
629 // we subtract the size of the 32-bit constants from {size}.
630 size_t dist64 = pool_end_64 - first_use_64_;
631 bool next_check_too_late = dist64 + 2 * kCheckInterval >= kMaxDistToPool64;
632 bool opportune_emission_without_jump =
633 require_jump == Jump::kOmitted && (dist64 >= kOpportunityDistToPool64);
634 bool approximate_distance_exceeded = dist64 >= kApproxDistToPool64;
635 if (next_check_too_late || opportune_emission_without_jump ||
636 approximate_distance_exceeded) {
637 return true;
638 }
639 }
640 if (Entry32Count() != 0) {
641 size_t dist32 = pool_end_32 - first_use_32_;
642 bool next_check_too_late = dist32 + 2 * kCheckInterval >= kMaxDistToPool32;
643 bool opportune_emission_without_jump =
644 require_jump == Jump::kOmitted && (dist32 >= kOpportunityDistToPool32);
645 bool approximate_distance_exceeded = dist32 >= kApproxDistToPool32;
646 if (next_check_too_late || opportune_emission_without_jump ||
647 approximate_distance_exceeded) {
648 return true;
649 }
650 }
651 return false;
652 }
653
ComputeSize(Jump require_jump,Alignment require_alignment) const654 int ConstantPool::ComputeSize(Jump require_jump,
655 Alignment require_alignment) const {
656 int size_up_to_marker = PrologueSize(require_jump);
657 int alignment = require_alignment == Alignment::kRequired ? kInstrSize : 0;
658 size_t size_after_marker =
659 Entry32Count() * kInt32Size + alignment + Entry64Count() * kInt64Size;
660 return size_up_to_marker + static_cast<int>(size_after_marker);
661 }
662
IsAlignmentRequiredIfEmittedAt(Jump require_jump,int pc_offset) const663 Alignment ConstantPool::IsAlignmentRequiredIfEmittedAt(Jump require_jump,
664 int pc_offset) const {
665 int size_up_to_marker = PrologueSize(require_jump);
666 if (Entry64Count() != 0 &&
667 !IsAligned(pc_offset + size_up_to_marker, kInt64Size)) {
668 return Alignment::kRequired;
669 }
670 return Alignment::kOmitted;
671 }
672
IsInImmRangeIfEmittedAt(int pc_offset)673 bool ConstantPool::IsInImmRangeIfEmittedAt(int pc_offset) {
674 // Check that all entries are in range if the pool is emitted at {pc_offset}.
675 // This ignores kPcLoadDelta (conservatively, since all offsets are positive),
676 // and over-estimates the last entry's address with the pool's end.
677 Alignment require_alignment =
678 IsAlignmentRequiredIfEmittedAt(Jump::kRequired, pc_offset);
679 size_t pool_end_32 =
680 pc_offset + ComputeSize(Jump::kRequired, require_alignment);
681 size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
682 bool entries_in_range_32 =
683 Entry32Count() == 0 || (pool_end_32 < first_use_32_ + kMaxDistToPool32);
684 bool entries_in_range_64 =
685 Entry64Count() == 0 || (pool_end_64 < first_use_64_ + kMaxDistToPool64);
686 return entries_in_range_32 && entries_in_range_64;
687 }
688
BlockScope(Assembler * assm,size_t margin)689 ConstantPool::BlockScope::BlockScope(Assembler* assm, size_t margin)
690 : pool_(&assm->constpool_) {
691 pool_->assm_->EmitConstPoolWithJumpIfNeeded(margin);
692 pool_->StartBlock();
693 }
694
BlockScope(Assembler * assm,PoolEmissionCheck check)695 ConstantPool::BlockScope::BlockScope(Assembler* assm, PoolEmissionCheck check)
696 : pool_(&assm->constpool_) {
697 DCHECK_EQ(check, PoolEmissionCheck::kSkip);
698 pool_->StartBlock();
699 }
700
~BlockScope()701 ConstantPool::BlockScope::~BlockScope() { pool_->EndBlock(); }
702
MaybeCheck()703 void ConstantPool::MaybeCheck() {
704 if (assm_->pc_offset() >= next_check_) {
705 Check(Emission::kIfNeeded, Jump::kRequired);
706 }
707 }
708
709 #endif // defined(V8_TARGET_ARCH_RISCV64)
710
711 } // namespace internal
712 } // namespace v8
713