1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/read-only-spaces.h"
6
7 #include <memory>
8
9 #include "include/v8-internal.h"
10 #include "include/v8-platform.h"
11 #include "src/base/logging.h"
12 #include "src/common/globals.h"
13 #include "src/common/ptr-compr-inl.h"
14 #include "src/execution/isolate.h"
15 #include "src/heap/allocation-stats.h"
16 #include "src/heap/basic-memory-chunk.h"
17 #include "src/heap/combined-heap.h"
18 #include "src/heap/heap-inl.h"
19 #include "src/heap/memory-allocator.h"
20 #include "src/heap/memory-chunk.h"
21 #include "src/heap/read-only-heap.h"
22 #include "src/objects/objects-inl.h"
23 #include "src/objects/property-details.h"
24 #include "src/objects/string.h"
25 #include "src/snapshot/read-only-deserializer.h"
26
27 namespace v8 {
28 namespace internal {
29
CopyAndRebaseRoots(Address * src,Address * dst,Address new_base)30 void CopyAndRebaseRoots(Address* src, Address* dst, Address new_base) {
31 Address src_base = GetIsolateRootAddress(src[0]);
32 for (size_t i = 0; i < ReadOnlyHeap::kEntriesCount; ++i) {
33 dst[i] = src[i] - src_base + new_base;
34 }
35 }
36
set_read_only_heap(std::unique_ptr<ReadOnlyHeap> read_only_heap)37 void ReadOnlyArtifacts::set_read_only_heap(
38 std::unique_ptr<ReadOnlyHeap> read_only_heap) {
39 read_only_heap_ = std::move(read_only_heap);
40 }
41
InitializeChecksum(SnapshotData * read_only_snapshot_data)42 void ReadOnlyArtifacts::InitializeChecksum(
43 SnapshotData* read_only_snapshot_data) {
44 #ifdef DEBUG
45 read_only_blob_checksum_ = Checksum(read_only_snapshot_data->Payload());
46 #endif // DEBUG
47 }
48
VerifyChecksum(SnapshotData * read_only_snapshot_data,bool read_only_heap_created)49 void ReadOnlyArtifacts::VerifyChecksum(SnapshotData* read_only_snapshot_data,
50 bool read_only_heap_created) {
51 #ifdef DEBUG
52 if (read_only_blob_checksum_) {
53 // The read-only heap was set up from a snapshot. Make sure it's the always
54 // the same snapshot.
55 uint32_t snapshot_checksum = Checksum(read_only_snapshot_data->Payload());
56 CHECK_WITH_MSG(snapshot_checksum,
57 "Attempt to create the read-only heap after already "
58 "creating from a snapshot.");
59 CHECK_EQ(read_only_blob_checksum_, snapshot_checksum);
60 } else {
61 // If there's no checksum, then that means the read-only heap objects are
62 // being created.
63 CHECK(read_only_heap_created);
64 }
65 #endif // DEBUG
66 }
67
~SingleCopyReadOnlyArtifacts()68 SingleCopyReadOnlyArtifacts::~SingleCopyReadOnlyArtifacts() {
69 // This particular SharedReadOnlySpace should not destroy its own pages as
70 // TearDown requires MemoryAllocator which itself is tied to an Isolate.
71 shared_read_only_space_->pages_.resize(0);
72
73 v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
74 for (ReadOnlyPage* chunk : pages_) {
75 void* chunk_address = reinterpret_cast<void*>(chunk->address());
76 size_t size = RoundUp(chunk->size(), page_allocator->AllocatePageSize());
77 CHECK(page_allocator->FreePages(chunk_address, size));
78 }
79 }
80
GetReadOnlyHeapForIsolate(Isolate * isolate)81 ReadOnlyHeap* SingleCopyReadOnlyArtifacts::GetReadOnlyHeapForIsolate(
82 Isolate* isolate) {
83 return read_only_heap();
84 }
85
Initialize(Isolate * isolate,std::vector<ReadOnlyPage * > && pages,const AllocationStats & stats)86 void SingleCopyReadOnlyArtifacts::Initialize(Isolate* isolate,
87 std::vector<ReadOnlyPage*>&& pages,
88 const AllocationStats& stats) {
89 pages_ = std::move(pages);
90 set_accounting_stats(stats);
91 set_shared_read_only_space(
92 std::make_unique<SharedReadOnlySpace>(isolate->heap(), this));
93 }
94
ReinstallReadOnlySpace(Isolate * isolate)95 void SingleCopyReadOnlyArtifacts::ReinstallReadOnlySpace(Isolate* isolate) {
96 isolate->heap()->ReplaceReadOnlySpace(shared_read_only_space());
97 }
98
VerifyHeapAndSpaceRelationships(Isolate * isolate)99 void SingleCopyReadOnlyArtifacts::VerifyHeapAndSpaceRelationships(
100 Isolate* isolate) {
101 DCHECK_EQ(read_only_heap()->read_only_space(), shared_read_only_space());
102
103 // Confirm the Isolate is using the shared ReadOnlyHeap and ReadOnlySpace.
104 DCHECK_EQ(read_only_heap(), isolate->read_only_heap());
105 DCHECK_EQ(shared_read_only_space(), isolate->heap()->read_only_space());
106 }
107
InitializeRootsFrom(Isolate * isolate)108 void PointerCompressedReadOnlyArtifacts::InitializeRootsFrom(Isolate* isolate) {
109 auto isolate_ro_roots =
110 isolate->roots_table().read_only_roots_begin().location();
111 CopyAndRebaseRoots(isolate_ro_roots, read_only_roots_, 0);
112 }
113
InitializeRootsIn(Isolate * isolate)114 void PointerCompressedReadOnlyArtifacts::InitializeRootsIn(Isolate* isolate) {
115 auto isolate_ro_roots =
116 isolate->roots_table().read_only_roots_begin().location();
117 CopyAndRebaseRoots(read_only_roots_, isolate_ro_roots,
118 isolate->isolate_root());
119 }
120
CreateReadOnlySpace(Isolate * isolate)121 SharedReadOnlySpace* PointerCompressedReadOnlyArtifacts::CreateReadOnlySpace(
122 Isolate* isolate) {
123 AllocationStats new_stats;
124 new_stats.IncreaseCapacity(accounting_stats().Capacity());
125
126 std::vector<std::unique_ptr<v8::PageAllocator::SharedMemoryMapping>> mappings;
127 std::vector<ReadOnlyPage*> pages;
128 Address isolate_root = isolate->isolate_root();
129 for (size_t i = 0; i < pages_.size(); ++i) {
130 const ReadOnlyPage* page = pages_[i];
131 const Tagged_t offset = OffsetForPage(i);
132 Address new_address = isolate_root + offset;
133 ReadOnlyPage* new_page = nullptr;
134 bool success = isolate->heap()
135 ->memory_allocator()
136 ->data_page_allocator()
137 ->ReserveForSharedMemoryMapping(
138 reinterpret_cast<void*>(new_address), page->size());
139 CHECK(success);
140 auto shared_memory = RemapPageTo(i, new_address, new_page);
141 // Later it's possible that this might fail, but for now on Linux this is
142 // not possible. When we move onto windows, it's not possible to reserve
143 // memory and then map into the middle of it at which point we will have to
144 // reserve the memory free it and then attempt to remap to it which could
145 // fail. At that point this will need to change.
146 CHECK(shared_memory);
147 CHECK_NOT_NULL(new_page);
148
149 new_stats.IncreaseAllocatedBytes(page->allocated_bytes(), new_page);
150 mappings.push_back(std::move(shared_memory));
151 pages.push_back(new_page);
152 }
153
154 auto* shared_read_only_space =
155 new SharedReadOnlySpace(isolate->heap(), std::move(pages),
156 std::move(mappings), std::move(new_stats));
157 return shared_read_only_space;
158 }
159
GetReadOnlyHeapForIsolate(Isolate * isolate)160 ReadOnlyHeap* PointerCompressedReadOnlyArtifacts::GetReadOnlyHeapForIsolate(
161 Isolate* isolate) {
162 DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
163 InitializeRootsIn(isolate);
164
165 SharedReadOnlySpace* shared_read_only_space = CreateReadOnlySpace(isolate);
166 ReadOnlyHeap* read_only_heap = new ReadOnlyHeap(shared_read_only_space);
167
168 // TODO(v8:10699): The cache should just live uncompressed in
169 // ReadOnlyArtifacts and be decompressed on the fly.
170 auto original_cache = read_only_heap_->read_only_object_cache_;
171 auto& cache = read_only_heap->read_only_object_cache_;
172 Address isolate_root = isolate->isolate_root();
173 for (Object original_object : original_cache) {
174 Address original_address = original_object.ptr();
175 Address new_address = isolate_root + CompressTagged(original_address);
176 Object new_object = Object(new_address);
177 cache.push_back(new_object);
178 }
179
180 return read_only_heap;
181 }
182
183 std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping>
RemapPageTo(size_t i,Address new_address,ReadOnlyPage * & new_page)184 PointerCompressedReadOnlyArtifacts::RemapPageTo(size_t i, Address new_address,
185 ReadOnlyPage*& new_page) {
186 std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping> mapping =
187 shared_memory_[i]->RemapTo(reinterpret_cast<void*>(new_address));
188 if (mapping) {
189 new_page = static_cast<ReadOnlyPage*>(reinterpret_cast<void*>(new_address));
190 return mapping;
191 } else {
192 return {};
193 }
194 }
195
Initialize(Isolate * isolate,std::vector<ReadOnlyPage * > && pages,const AllocationStats & stats)196 void PointerCompressedReadOnlyArtifacts::Initialize(
197 Isolate* isolate, std::vector<ReadOnlyPage*>&& pages,
198 const AllocationStats& stats) {
199 DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
200 DCHECK(pages_.empty());
201 DCHECK(!pages.empty());
202
203 // It's not possible to copy the AllocationStats directly as the new pages
204 // will be mapped to different addresses.
205 stats_.IncreaseCapacity(stats.Capacity());
206
207 v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
208 DCHECK(page_allocator->CanAllocateSharedPages());
209
210 for (const ReadOnlyPage* page : pages) {
211 size_t size = RoundUp(page->size(), page_allocator->AllocatePageSize());
212 // 1. Allocate some new memory for a shared copy of the page and copy the
213 // original contents into it. Doesn't need to be V8 page aligned, since
214 // we'll never use it directly.
215 auto shared_memory = page_allocator->AllocateSharedPages(size, page);
216 void* ptr = shared_memory->GetMemory();
217 CHECK_NOT_NULL(ptr);
218
219 // 2. Copy the contents of the original page into the shared page.
220 ReadOnlyPage* new_page = reinterpret_cast<ReadOnlyPage*>(ptr);
221
222 pages_.push_back(new_page);
223 shared_memory_.push_back(std::move(shared_memory));
224 // This is just CompressTagged but inlined so it will always compile.
225 Tagged_t compressed_address = CompressTagged(page->address());
226 page_offsets_.push_back(compressed_address);
227
228 // 3. Update the accounting stats so the allocated bytes are for the new
229 // shared page rather than the original.
230 stats_.IncreaseAllocatedBytes(page->allocated_bytes(), new_page);
231 }
232
233 InitializeRootsFrom(isolate);
234 set_shared_read_only_space(
235 std::make_unique<SharedReadOnlySpace>(isolate->heap(), this));
236 }
237
ReinstallReadOnlySpace(Isolate * isolate)238 void PointerCompressedReadOnlyArtifacts::ReinstallReadOnlySpace(
239 Isolate* isolate) {
240 // We need to build a new SharedReadOnlySpace that occupies the same memory as
241 // the original one, so first the original space's pages must be freed.
242 Heap* heap = isolate->heap();
243 heap->read_only_space()->TearDown(heap->memory_allocator());
244
245 heap->ReplaceReadOnlySpace(CreateReadOnlySpace(heap->isolate()));
246
247 DCHECK_NE(heap->read_only_space(), shared_read_only_space());
248
249 // Also recreate the ReadOnlyHeap using the this space.
250 auto* ro_heap = new ReadOnlyHeap(isolate->read_only_heap(),
251 isolate->heap()->read_only_space());
252 isolate->set_read_only_heap(ro_heap);
253
254 DCHECK_NE(*isolate->roots_table().read_only_roots_begin().location(), 0);
255 }
256
VerifyHeapAndSpaceRelationships(Isolate * isolate)257 void PointerCompressedReadOnlyArtifacts::VerifyHeapAndSpaceRelationships(
258 Isolate* isolate) {
259 // Confirm the canonical versions of the ReadOnlySpace/ReadOnlyHeap from the
260 // ReadOnlyArtifacts are not accidentally present in a real Isolate (which
261 // might destroy them) and the ReadOnlyHeaps and Spaces are correctly
262 // associated with each other.
263 DCHECK_NE(shared_read_only_space(), isolate->heap()->read_only_space());
264 DCHECK_NE(read_only_heap(), isolate->read_only_heap());
265 DCHECK_EQ(read_only_heap()->read_only_space(), shared_read_only_space());
266 DCHECK_EQ(isolate->read_only_heap()->read_only_space(),
267 isolate->heap()->read_only_space());
268 }
269
270 // -----------------------------------------------------------------------------
271 // ReadOnlySpace implementation
272
ReadOnlySpace(Heap * heap)273 ReadOnlySpace::ReadOnlySpace(Heap* heap)
274 : BaseSpace(heap, RO_SPACE),
275 top_(kNullAddress),
276 limit_(kNullAddress),
277 is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()),
278 capacity_(0),
279 area_size_(MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE)) {}
280
281 // Needs to be defined in the cc file to force the vtable to be emitted in
282 // component builds.
283 ReadOnlySpace::~ReadOnlySpace() = default;
284
TearDown(MemoryAllocator * memory_allocator)285 void SharedReadOnlySpace::TearDown(MemoryAllocator* memory_allocator) {
286 // SharedReadOnlySpaces do not tear down their own pages since they are either
287 // freed down by the ReadOnlyArtifacts that contains them or in the case of
288 // pointer compression, they are freed when the SharedMemoryMappings are
289 // freed.
290 pages_.resize(0);
291 accounting_stats_.Clear();
292 }
293
TearDown(MemoryAllocator * memory_allocator)294 void ReadOnlySpace::TearDown(MemoryAllocator* memory_allocator) {
295 for (ReadOnlyPage* chunk : pages_) {
296 memory_allocator->FreeReadOnlyPage(chunk);
297 }
298 pages_.resize(0);
299 accounting_stats_.Clear();
300 }
301
DetachPagesAndAddToArtifacts(std::shared_ptr<ReadOnlyArtifacts> artifacts)302 void ReadOnlySpace::DetachPagesAndAddToArtifacts(
303 std::shared_ptr<ReadOnlyArtifacts> artifacts) {
304 DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
305
306 Heap* heap = ReadOnlySpace::heap();
307 // Without pointer compression, ReadOnlySpace pages are directly shared
308 // between all heaps and so must be unregistered from their originating
309 // allocator.
310 Seal(COMPRESS_POINTERS_BOOL ? SealMode::kDetachFromHeap
311 : SealMode::kDetachFromHeapAndUnregisterMemory);
312 artifacts->Initialize(heap->isolate(), std::move(pages_), accounting_stats_);
313 }
314
MakeHeaderRelocatable()315 void ReadOnlyPage::MakeHeaderRelocatable() {
316 heap_ = nullptr;
317 owner_ = nullptr;
318 reservation_.Reset();
319 }
320
SetPermissionsForPages(MemoryAllocator * memory_allocator,PageAllocator::Permission access)321 void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator,
322 PageAllocator::Permission access) {
323 for (BasicMemoryChunk* chunk : pages_) {
324 // Read only pages don't have valid reservation object so we get proper
325 // page allocator manually.
326 v8::PageAllocator* page_allocator =
327 memory_allocator->page_allocator(NOT_EXECUTABLE);
328 CHECK(SetPermissions(page_allocator, chunk->address(), chunk->size(),
329 access));
330 }
331 }
332
333 // After we have booted, we have created a map which represents free space
334 // on the heap. If there was already a free list then the elements on it
335 // were created with the wrong FreeSpaceMap (normally nullptr), so we need to
336 // fix them.
RepairFreeSpacesAfterDeserialization()337 void ReadOnlySpace::RepairFreeSpacesAfterDeserialization() {
338 BasicMemoryChunk::UpdateHighWaterMark(top_);
339 // Each page may have a small free space that is not tracked by a free list.
340 // Those free spaces still contain null as their map pointer.
341 // Overwrite them with new fillers.
342 for (BasicMemoryChunk* chunk : pages_) {
343 Address start = chunk->HighWaterMark();
344 Address end = chunk->area_end();
345 // Put a filler object in the gap between the end of the allocated objects
346 // and the end of the allocatable area.
347 if (start < end) {
348 heap()->CreateFillerObjectAt(start, static_cast<int>(end - start),
349 ClearRecordedSlots::kNo);
350 }
351 }
352 }
353
ClearStringPaddingIfNeeded()354 void ReadOnlySpace::ClearStringPaddingIfNeeded() {
355 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
356 // TODO(ulan): Revisit this once third-party heap supports iteration.
357 return;
358 }
359 if (is_string_padding_cleared_) return;
360
361 ReadOnlyHeapObjectIterator iterator(this);
362 for (HeapObject o = iterator.Next(); !o.is_null(); o = iterator.Next()) {
363 if (o.IsSeqOneByteString()) {
364 SeqOneByteString::cast(o).clear_padding();
365 } else if (o.IsSeqTwoByteString()) {
366 SeqTwoByteString::cast(o).clear_padding();
367 }
368 }
369 is_string_padding_cleared_ = true;
370 }
371
Seal(SealMode ro_mode)372 void ReadOnlySpace::Seal(SealMode ro_mode) {
373 DCHECK(!is_marked_read_only_);
374
375 FreeLinearAllocationArea();
376 is_marked_read_only_ = true;
377 auto* memory_allocator = heap()->memory_allocator();
378
379 if (ro_mode != SealMode::kDoNotDetachFromHeap) {
380 DetachFromHeap();
381 for (ReadOnlyPage* p : pages_) {
382 if (ro_mode == SealMode::kDetachFromHeapAndUnregisterMemory) {
383 memory_allocator->UnregisterMemory(p);
384 }
385 if (ReadOnlyHeap::IsReadOnlySpaceShared()) {
386 p->MakeHeaderRelocatable();
387 }
388 }
389 }
390
391 SetPermissionsForPages(memory_allocator, PageAllocator::kRead);
392 }
393
Unseal()394 void ReadOnlySpace::Unseal() {
395 DCHECK(is_marked_read_only_);
396 if (!pages_.empty()) {
397 SetPermissionsForPages(heap()->memory_allocator(),
398 PageAllocator::kReadWrite);
399 }
400 is_marked_read_only_ = false;
401 }
402
ContainsSlow(Address addr)403 bool ReadOnlySpace::ContainsSlow(Address addr) {
404 BasicMemoryChunk* c = BasicMemoryChunk::FromAddress(addr);
405 for (BasicMemoryChunk* chunk : pages_) {
406 if (chunk == c) return true;
407 }
408 return false;
409 }
410
411 namespace {
412 // Only iterates over a single chunk as the chunk iteration is done externally.
413 class ReadOnlySpaceObjectIterator : public ObjectIterator {
414 public:
ReadOnlySpaceObjectIterator(Heap * heap,ReadOnlySpace * space,BasicMemoryChunk * chunk)415 ReadOnlySpaceObjectIterator(Heap* heap, ReadOnlySpace* space,
416 BasicMemoryChunk* chunk)
417 : cur_addr_(kNullAddress), cur_end_(kNullAddress), space_(space) {}
418
419 // Advance to the next object, skipping free spaces and other fillers and
420 // skipping the special garbage section of which there is one per space.
421 // Returns nullptr when the iteration has ended.
Next()422 HeapObject Next() override {
423 HeapObject next_obj = FromCurrentPage();
424 if (!next_obj.is_null()) return next_obj;
425 return HeapObject();
426 }
427
428 private:
FromCurrentPage()429 HeapObject FromCurrentPage() {
430 while (cur_addr_ != cur_end_) {
431 if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
432 cur_addr_ = space_->limit();
433 continue;
434 }
435 HeapObject obj = HeapObject::FromAddress(cur_addr_);
436 const int obj_size = obj.Size();
437 cur_addr_ += obj_size;
438 DCHECK_LE(cur_addr_, cur_end_);
439 if (!obj.IsFreeSpaceOrFiller()) {
440 if (obj.IsCode()) {
441 DCHECK(Code::cast(obj).is_builtin());
442 DCHECK_CODEOBJECT_SIZE(obj_size, space_);
443 } else {
444 DCHECK_OBJECT_SIZE(obj_size);
445 }
446 return obj;
447 }
448 }
449 return HeapObject();
450 }
451
452 Address cur_addr_; // Current iteration point.
453 Address cur_end_; // End iteration point.
454 ReadOnlySpace* space_;
455 };
456 } // namespace
457
458 #ifdef VERIFY_HEAP
459 namespace {
460 class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
461 public:
VerifyReadOnlyPointersVisitor(Heap * heap)462 explicit VerifyReadOnlyPointersVisitor(Heap* heap)
463 : VerifyPointersVisitor(heap) {}
464
465 protected:
VerifyPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)466 void VerifyPointers(HeapObject host, MaybeObjectSlot start,
467 MaybeObjectSlot end) override {
468 if (!host.is_null()) {
469 CHECK(ReadOnlyHeap::Contains(host.map()));
470 }
471 VerifyPointersVisitor::VerifyPointers(host, start, end);
472
473 for (MaybeObjectSlot current = start; current < end; ++current) {
474 HeapObject heap_object;
475 if ((*current)->GetHeapObject(&heap_object)) {
476 CHECK(ReadOnlyHeap::Contains(heap_object));
477 }
478 }
479 }
480 };
481 } // namespace
482
Verify(Isolate * isolate)483 void ReadOnlySpace::Verify(Isolate* isolate) {
484 bool allocation_pointer_found_in_space = top_ == limit_;
485 VerifyReadOnlyPointersVisitor visitor(isolate->heap());
486
487 for (BasicMemoryChunk* page : pages_) {
488 if (ReadOnlyHeap::IsReadOnlySpaceShared()) {
489 CHECK_NULL(page->owner());
490 } else {
491 CHECK_EQ(page->owner(), this);
492 }
493
494 if (page == Page::FromAllocationAreaAddress(top_)) {
495 allocation_pointer_found_in_space = true;
496 }
497 ReadOnlySpaceObjectIterator it(isolate->heap(), this, page);
498 Address end_of_previous_object = page->area_start();
499 Address top = page->area_end();
500
501 for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
502 CHECK(end_of_previous_object <= object.address());
503
504 Map map = object.map();
505 CHECK(map.IsMap());
506
507 // The object itself should look OK.
508 object.ObjectVerify(isolate);
509
510 // All the interior pointers should be contained in the heap.
511 int size = object.Size();
512 object.IterateBody(map, size, &visitor);
513 CHECK(object.address() + size <= top);
514 end_of_previous_object = object.address() + size;
515
516 CHECK(!object.IsExternalString());
517 CHECK(!object.IsJSArrayBuffer());
518 }
519 }
520 CHECK(allocation_pointer_found_in_space);
521
522 #ifdef DEBUG
523 VerifyCounters(isolate->heap());
524 #endif
525 }
526
527 #ifdef DEBUG
VerifyCounters(Heap * heap)528 void ReadOnlySpace::VerifyCounters(Heap* heap) {
529 size_t total_capacity = 0;
530 size_t total_allocated = 0;
531 for (BasicMemoryChunk* page : pages_) {
532 total_capacity += page->area_size();
533 ReadOnlySpaceObjectIterator it(heap, this, page);
534 size_t real_allocated = 0;
535 for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
536 if (!object.IsFreeSpaceOrFiller()) {
537 real_allocated += object.Size();
538 }
539 }
540 total_allocated += page->allocated_bytes();
541 // The real size can be smaller than the accounted size if array trimming,
542 // object slack tracking happened after sweeping.
543 DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
544 DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
545 }
546 DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
547 DCHECK_EQ(total_allocated, accounting_stats_.Size());
548 }
549 #endif // DEBUG
550 #endif // VERIFY_HEAP
551
CommittedPhysicalMemory()552 size_t ReadOnlySpace::CommittedPhysicalMemory() {
553 if (!base::OS::HasLazyCommits()) return CommittedMemory();
554 BasicMemoryChunk::UpdateHighWaterMark(top_);
555 size_t size = 0;
556 for (auto* chunk : pages_) {
557 size += chunk->size();
558 }
559
560 return size;
561 }
562
FreeLinearAllocationArea()563 void ReadOnlySpace::FreeLinearAllocationArea() {
564 // Mark the old linear allocation area with a free space map so it can be
565 // skipped when scanning the heap.
566 if (top_ == kNullAddress) {
567 DCHECK_EQ(kNullAddress, limit_);
568 return;
569 }
570
571 // Clear the bits in the unused black area.
572 ReadOnlyPage* page = pages_.back();
573 heap()->incremental_marking()->marking_state()->bitmap(page)->ClearRange(
574 page->AddressToMarkbitIndex(top_), page->AddressToMarkbitIndex(limit_));
575
576 heap()->CreateFillerObjectAt(top_, static_cast<int>(limit_ - top_),
577 ClearRecordedSlots::kNo);
578
579 BasicMemoryChunk::UpdateHighWaterMark(top_);
580
581 top_ = kNullAddress;
582 limit_ = kNullAddress;
583 }
584
EnsureSpaceForAllocation(int size_in_bytes)585 void ReadOnlySpace::EnsureSpaceForAllocation(int size_in_bytes) {
586 if (top_ + size_in_bytes <= limit_) {
587 return;
588 }
589
590 DCHECK_GE(size_in_bytes, 0);
591
592 FreeLinearAllocationArea();
593
594 BasicMemoryChunk* chunk =
595 heap()->memory_allocator()->AllocateReadOnlyPage(AreaSize(), this);
596 capacity_ += AreaSize();
597
598 accounting_stats_.IncreaseCapacity(chunk->area_size());
599 AccountCommitted(chunk->size());
600 CHECK_NOT_NULL(chunk);
601 pages_.push_back(static_cast<ReadOnlyPage*>(chunk));
602
603 heap()->CreateFillerObjectAt(chunk->area_start(),
604 static_cast<int>(chunk->area_size()),
605 ClearRecordedSlots::kNo);
606
607 top_ = chunk->area_start();
608 limit_ = chunk->area_end();
609 return;
610 }
611
TryAllocateLinearlyAligned(int size_in_bytes,AllocationAlignment alignment)612 HeapObject ReadOnlySpace::TryAllocateLinearlyAligned(
613 int size_in_bytes, AllocationAlignment alignment) {
614 Address current_top = top_;
615 int filler_size = Heap::GetFillToAlign(current_top, alignment);
616
617 Address new_top = current_top + filler_size + size_in_bytes;
618 if (new_top > limit_) return HeapObject();
619
620 // Allocation always occurs in the last chunk for RO_SPACE.
621 BasicMemoryChunk* chunk = pages_.back();
622 int allocated_size = filler_size + size_in_bytes;
623 accounting_stats_.IncreaseAllocatedBytes(allocated_size, chunk);
624 chunk->IncreaseAllocatedBytes(allocated_size);
625
626 top_ = new_top;
627 if (filler_size > 0) {
628 return Heap::PrecedeWithFiller(ReadOnlyRoots(heap()),
629 HeapObject::FromAddress(current_top),
630 filler_size);
631 }
632
633 return HeapObject::FromAddress(current_top);
634 }
635
AllocateRawAligned(int size_in_bytes,AllocationAlignment alignment)636 AllocationResult ReadOnlySpace::AllocateRawAligned(
637 int size_in_bytes, AllocationAlignment alignment) {
638 DCHECK(!IsDetached());
639 int allocation_size = size_in_bytes;
640
641 HeapObject object = TryAllocateLinearlyAligned(allocation_size, alignment);
642 if (object.is_null()) {
643 // We don't know exactly how much filler we need to align until space is
644 // allocated, so assume the worst case.
645 EnsureSpaceForAllocation(allocation_size +
646 Heap::GetMaximumFillToAlign(alignment));
647 allocation_size = size_in_bytes;
648 object = TryAllocateLinearlyAligned(size_in_bytes, alignment);
649 CHECK(!object.is_null());
650 }
651 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
652
653 return object;
654 }
655
AllocateRawUnaligned(int size_in_bytes)656 AllocationResult ReadOnlySpace::AllocateRawUnaligned(int size_in_bytes) {
657 DCHECK(!IsDetached());
658 EnsureSpaceForAllocation(size_in_bytes);
659 Address current_top = top_;
660 Address new_top = current_top + size_in_bytes;
661 DCHECK_LE(new_top, limit_);
662 top_ = new_top;
663 HeapObject object = HeapObject::FromAddress(current_top);
664
665 DCHECK(!object.is_null());
666 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
667
668 // Allocation always occurs in the last chunk for RO_SPACE.
669 BasicMemoryChunk* chunk = pages_.back();
670 accounting_stats_.IncreaseAllocatedBytes(size_in_bytes, chunk);
671 chunk->IncreaseAllocatedBytes(size_in_bytes);
672
673 return object;
674 }
675
AllocateRaw(int size_in_bytes,AllocationAlignment alignment)676 AllocationResult ReadOnlySpace::AllocateRaw(int size_in_bytes,
677 AllocationAlignment alignment) {
678 #ifdef V8_HOST_ARCH_32_BIT
679 AllocationResult result = alignment != kWordAligned
680 ? AllocateRawAligned(size_in_bytes, alignment)
681 : AllocateRawUnaligned(size_in_bytes);
682 #else
683 AllocationResult result = AllocateRawUnaligned(size_in_bytes);
684 #endif
685 HeapObject heap_obj;
686 if (!result.IsRetry() && result.To(&heap_obj)) {
687 DCHECK(heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
688 }
689 return result;
690 }
691
ShrinkToHighWaterMark()692 size_t ReadOnlyPage::ShrinkToHighWaterMark() {
693 // Shrink pages to high water mark. The water mark points either to a filler
694 // or the area_end.
695 HeapObject filler = HeapObject::FromAddress(HighWaterMark());
696 if (filler.address() == area_end()) return 0;
697 CHECK(filler.IsFreeSpaceOrFiller());
698 DCHECK_EQ(filler.address() + filler.Size(), area_end());
699
700 size_t unused = RoundDown(static_cast<size_t>(area_end() - filler.address()),
701 MemoryAllocator::GetCommitPageSize());
702 if (unused > 0) {
703 DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
704 if (FLAG_trace_gc_verbose) {
705 PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
706 reinterpret_cast<void*>(this),
707 reinterpret_cast<void*>(area_end()),
708 reinterpret_cast<void*>(area_end() - unused));
709 }
710 heap()->CreateFillerObjectAt(
711 filler.address(),
712 static_cast<int>(area_end() - filler.address() - unused),
713 ClearRecordedSlots::kNo);
714 heap()->memory_allocator()->PartialFreeMemory(
715 this, address() + size() - unused, unused, area_end() - unused);
716 if (filler.address() != area_end()) {
717 CHECK(filler.IsFreeSpaceOrFiller());
718 CHECK_EQ(filler.address() + filler.Size(), area_end());
719 }
720 }
721 return unused;
722 }
723
ShrinkPages()724 void ReadOnlySpace::ShrinkPages() {
725 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return;
726 BasicMemoryChunk::UpdateHighWaterMark(top_);
727 heap()->CreateFillerObjectAt(top_, static_cast<int>(limit_ - top_),
728 ClearRecordedSlots::kNo);
729
730 for (ReadOnlyPage* chunk : pages_) {
731 DCHECK(chunk->IsFlagSet(Page::NEVER_EVACUATE));
732 size_t unused = chunk->ShrinkToHighWaterMark();
733 capacity_ -= unused;
734 accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
735 AccountUncommitted(unused);
736 }
737 limit_ = pages_.back()->area_end();
738 }
739
InitializePage(BasicMemoryChunk * chunk)740 ReadOnlyPage* ReadOnlySpace::InitializePage(BasicMemoryChunk* chunk) {
741 ReadOnlyPage* page = reinterpret_cast<ReadOnlyPage*>(chunk);
742 page->allocated_bytes_ = 0;
743 page->SetFlag(BasicMemoryChunk::Flag::NEVER_EVACUATE);
744 heap()
745 ->incremental_marking()
746 ->non_atomic_marking_state()
747 ->bitmap(chunk)
748 ->MarkAllBits();
749 chunk->SetFlag(BasicMemoryChunk::READ_ONLY_HEAP);
750
751 return page;
752 }
753
SharedReadOnlySpace(Heap * heap,PointerCompressedReadOnlyArtifacts * artifacts)754 SharedReadOnlySpace::SharedReadOnlySpace(
755 Heap* heap, PointerCompressedReadOnlyArtifacts* artifacts)
756 : SharedReadOnlySpace(heap) {
757 // This constructor should only be used when RO_SPACE is shared with pointer
758 // compression.
759 DCHECK(V8_SHARED_RO_HEAP_BOOL);
760 DCHECK(COMPRESS_POINTERS_BOOL);
761 DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
762 DCHECK(!artifacts->pages().empty());
763
764 accounting_stats_.IncreaseCapacity(artifacts->accounting_stats().Capacity());
765 for (ReadOnlyPage* page : artifacts->pages()) {
766 pages_.push_back(page);
767 accounting_stats_.IncreaseAllocatedBytes(page->allocated_bytes(), page);
768 }
769 }
770
SharedReadOnlySpace(Heap * heap,std::vector<ReadOnlyPage * > && new_pages,std::vector<std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping>> && mappings,AllocationStats && new_stats)771 SharedReadOnlySpace::SharedReadOnlySpace(
772 Heap* heap, std::vector<ReadOnlyPage*>&& new_pages,
773 std::vector<std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping>>&&
774 mappings,
775 AllocationStats&& new_stats)
776 : SharedReadOnlySpace(heap) {
777 DCHECK(V8_SHARED_RO_HEAP_BOOL);
778 DCHECK(COMPRESS_POINTERS_BOOL);
779 DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
780
781 accounting_stats_ = std::move(new_stats);
782 pages_ = std::move(new_pages);
783 shared_memory_mappings_ = std::move(mappings);
784 }
785
SharedReadOnlySpace(Heap * heap,SingleCopyReadOnlyArtifacts * artifacts)786 SharedReadOnlySpace::SharedReadOnlySpace(Heap* heap,
787 SingleCopyReadOnlyArtifacts* artifacts)
788 : SharedReadOnlySpace(heap) {
789 // This constructor should only be used when RO_SPACE is shared without
790 // pointer compression.
791 DCHECK(V8_SHARED_RO_HEAP_BOOL);
792 DCHECK(!COMPRESS_POINTERS_BOOL);
793 accounting_stats_ = artifacts->accounting_stats();
794 pages_ = artifacts->pages();
795 }
796
797 } // namespace internal
798 } // namespace v8
799