1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/spaces.h"
6
7 #include <utility>
8
9 #include "src/base/bits.h"
10 #include "src/base/platform/platform.h"
11 #include "src/base/platform/semaphore.h"
12 #include "src/counters.h"
13 #include "src/full-codegen/full-codegen.h"
14 #include "src/heap/array-buffer-tracker.h"
15 #include "src/heap/incremental-marking.h"
16 #include "src/heap/mark-compact.h"
17 #include "src/heap/slot-set.h"
18 #include "src/macro-assembler.h"
19 #include "src/msan.h"
20 #include "src/objects-inl.h"
21 #include "src/snapshot/snapshot.h"
22 #include "src/v8.h"
23
24 namespace v8 {
25 namespace internal {
26
27
28 // ----------------------------------------------------------------------------
29 // HeapObjectIterator
30
HeapObjectIterator(PagedSpace * space)31 HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
32 : cur_addr_(nullptr),
33 cur_end_(nullptr),
34 space_(space),
35 page_range_(space->anchor()->next_page(), space->anchor()),
36 current_page_(page_range_.begin()) {}
37
HeapObjectIterator(Page * page)38 HeapObjectIterator::HeapObjectIterator(Page* page)
39 : cur_addr_(nullptr),
40 cur_end_(nullptr),
41 space_(reinterpret_cast<PagedSpace*>(page->owner())),
42 page_range_(page),
43 current_page_(page_range_.begin()) {
44 #ifdef DEBUG
45 Space* owner = page->owner();
46 DCHECK(owner == page->heap()->old_space() ||
47 owner == page->heap()->map_space() ||
48 owner == page->heap()->code_space());
49 #endif // DEBUG
50 }
51
52 // We have hit the end of the page and should advance to the next block of
53 // objects. This happens at the end of the page.
AdvanceToNextPage()54 bool HeapObjectIterator::AdvanceToNextPage() {
55 DCHECK_EQ(cur_addr_, cur_end_);
56 if (current_page_ == page_range_.end()) return false;
57 Page* cur_page = *(current_page_++);
58 space_->heap()
59 ->mark_compact_collector()
60 ->sweeper()
61 .SweepOrWaitUntilSweepingCompleted(cur_page);
62 cur_addr_ = cur_page->area_start();
63 cur_end_ = cur_page->area_end();
64 DCHECK(cur_page->SweepingDone());
65 return true;
66 }
67
PauseAllocationObserversScope(Heap * heap)68 PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
69 : heap_(heap) {
70 AllSpaces spaces(heap_);
71 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
72 space->PauseAllocationObservers();
73 }
74 }
75
~PauseAllocationObserversScope()76 PauseAllocationObserversScope::~PauseAllocationObserversScope() {
77 AllSpaces spaces(heap_);
78 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
79 space->ResumeAllocationObservers();
80 }
81 }
82
83 // -----------------------------------------------------------------------------
84 // CodeRange
85
86
CodeRange(Isolate * isolate)87 CodeRange::CodeRange(Isolate* isolate)
88 : isolate_(isolate),
89 code_range_(NULL),
90 free_list_(0),
91 allocation_list_(0),
92 current_allocation_block_index_(0) {}
93
94
SetUp(size_t requested)95 bool CodeRange::SetUp(size_t requested) {
96 DCHECK(code_range_ == NULL);
97
98 if (requested == 0) {
99 // When a target requires the code range feature, we put all code objects
100 // in a kMaximalCodeRangeSize range of virtual address space, so that
101 // they can call each other with near calls.
102 if (kRequiresCodeRange) {
103 requested = kMaximalCodeRangeSize;
104 } else {
105 return true;
106 }
107 }
108
109 if (requested <= kMinimumCodeRangeSize) {
110 requested = kMinimumCodeRangeSize;
111 }
112
113 const size_t reserved_area =
114 kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
115 if (requested < (kMaximalCodeRangeSize - reserved_area))
116 requested += reserved_area;
117
118 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
119
120 code_range_ = new base::VirtualMemory(
121 requested, Max(kCodeRangeAreaAlignment,
122 static_cast<size_t>(base::OS::AllocateAlignment())));
123 CHECK(code_range_ != NULL);
124 if (!code_range_->IsReserved()) {
125 delete code_range_;
126 code_range_ = NULL;
127 return false;
128 }
129
130 // We are sure that we have mapped a block of requested addresses.
131 DCHECK(code_range_->size() == requested);
132 Address base = reinterpret_cast<Address>(code_range_->address());
133
134 // On some platforms, specifically Win64, we need to reserve some pages at
135 // the beginning of an executable space.
136 if (reserved_area > 0) {
137 if (!code_range_->Commit(base, reserved_area, true)) {
138 delete code_range_;
139 code_range_ = NULL;
140 return false;
141 }
142 base += reserved_area;
143 }
144 Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
145 size_t size = code_range_->size() - (aligned_base - base) - reserved_area;
146 allocation_list_.Add(FreeBlock(aligned_base, size));
147 current_allocation_block_index_ = 0;
148
149 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
150 return true;
151 }
152
153
CompareFreeBlockAddress(const FreeBlock * left,const FreeBlock * right)154 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
155 const FreeBlock* right) {
156 // The entire point of CodeRange is that the difference between two
157 // addresses in the range can be represented as a signed 32-bit int,
158 // so the cast is semantically correct.
159 return static_cast<int>(left->start - right->start);
160 }
161
162
GetNextAllocationBlock(size_t requested)163 bool CodeRange::GetNextAllocationBlock(size_t requested) {
164 for (current_allocation_block_index_++;
165 current_allocation_block_index_ < allocation_list_.length();
166 current_allocation_block_index_++) {
167 if (requested <= allocation_list_[current_allocation_block_index_].size) {
168 return true; // Found a large enough allocation block.
169 }
170 }
171
172 // Sort and merge the free blocks on the free list and the allocation list.
173 free_list_.AddAll(allocation_list_);
174 allocation_list_.Clear();
175 free_list_.Sort(&CompareFreeBlockAddress);
176 for (int i = 0; i < free_list_.length();) {
177 FreeBlock merged = free_list_[i];
178 i++;
179 // Add adjacent free blocks to the current merged block.
180 while (i < free_list_.length() &&
181 free_list_[i].start == merged.start + merged.size) {
182 merged.size += free_list_[i].size;
183 i++;
184 }
185 if (merged.size > 0) {
186 allocation_list_.Add(merged);
187 }
188 }
189 free_list_.Clear();
190
191 for (current_allocation_block_index_ = 0;
192 current_allocation_block_index_ < allocation_list_.length();
193 current_allocation_block_index_++) {
194 if (requested <= allocation_list_[current_allocation_block_index_].size) {
195 return true; // Found a large enough allocation block.
196 }
197 }
198 current_allocation_block_index_ = 0;
199 // Code range is full or too fragmented.
200 return false;
201 }
202
203
AllocateRawMemory(const size_t requested_size,const size_t commit_size,size_t * allocated)204 Address CodeRange::AllocateRawMemory(const size_t requested_size,
205 const size_t commit_size,
206 size_t* allocated) {
207 // request_size includes guards while committed_size does not. Make sure
208 // callers know about the invariant.
209 CHECK_LE(commit_size,
210 requested_size - 2 * MemoryAllocator::CodePageGuardSize());
211 FreeBlock current;
212 if (!ReserveBlock(requested_size, ¤t)) {
213 *allocated = 0;
214 return NULL;
215 }
216 *allocated = current.size;
217 DCHECK(*allocated <= current.size);
218 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
219 if (!isolate_->heap()->memory_allocator()->CommitExecutableMemory(
220 code_range_, current.start, commit_size, *allocated)) {
221 *allocated = 0;
222 ReleaseBlock(¤t);
223 return NULL;
224 }
225 return current.start;
226 }
227
228
CommitRawMemory(Address start,size_t length)229 bool CodeRange::CommitRawMemory(Address start, size_t length) {
230 return isolate_->heap()->memory_allocator()->CommitMemory(start, length,
231 EXECUTABLE);
232 }
233
234
UncommitRawMemory(Address start,size_t length)235 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
236 return code_range_->Uncommit(start, length);
237 }
238
239
FreeRawMemory(Address address,size_t length)240 void CodeRange::FreeRawMemory(Address address, size_t length) {
241 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
242 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
243 free_list_.Add(FreeBlock(address, length));
244 code_range_->Uncommit(address, length);
245 }
246
247
TearDown()248 void CodeRange::TearDown() {
249 delete code_range_; // Frees all memory in the virtual memory range.
250 code_range_ = NULL;
251 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
252 free_list_.Free();
253 allocation_list_.Free();
254 }
255
256
ReserveBlock(const size_t requested_size,FreeBlock * block)257 bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
258 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
259 DCHECK(allocation_list_.length() == 0 ||
260 current_allocation_block_index_ < allocation_list_.length());
261 if (allocation_list_.length() == 0 ||
262 requested_size > allocation_list_[current_allocation_block_index_].size) {
263 // Find an allocation block large enough.
264 if (!GetNextAllocationBlock(requested_size)) return false;
265 }
266 // Commit the requested memory at the start of the current allocation block.
267 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
268 *block = allocation_list_[current_allocation_block_index_];
269 // Don't leave a small free block, useless for a large object or chunk.
270 if (aligned_requested < (block->size - Page::kPageSize)) {
271 block->size = aligned_requested;
272 }
273 DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
274 allocation_list_[current_allocation_block_index_].start += block->size;
275 allocation_list_[current_allocation_block_index_].size -= block->size;
276 return true;
277 }
278
279
ReleaseBlock(const FreeBlock * block)280 void CodeRange::ReleaseBlock(const FreeBlock* block) {
281 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
282 free_list_.Add(*block);
283 }
284
285
286 // -----------------------------------------------------------------------------
287 // MemoryAllocator
288 //
289
MemoryAllocator(Isolate * isolate)290 MemoryAllocator::MemoryAllocator(Isolate* isolate)
291 : isolate_(isolate),
292 code_range_(nullptr),
293 capacity_(0),
294 capacity_executable_(0),
295 size_(0),
296 size_executable_(0),
297 lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
298 highest_ever_allocated_(reinterpret_cast<void*>(0)),
299 unmapper_(this) {}
300
SetUp(size_t capacity,size_t capacity_executable,size_t code_range_size)301 bool MemoryAllocator::SetUp(size_t capacity, size_t capacity_executable,
302 size_t code_range_size) {
303 capacity_ = RoundUp(capacity, Page::kPageSize);
304 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
305 DCHECK_GE(capacity_, capacity_executable_);
306
307 size_ = 0;
308 size_executable_ = 0;
309
310 code_range_ = new CodeRange(isolate_);
311 if (!code_range_->SetUp(code_range_size)) return false;
312
313 return true;
314 }
315
316
TearDown()317 void MemoryAllocator::TearDown() {
318 unmapper()->TearDown();
319
320 // Check that spaces were torn down before MemoryAllocator.
321 DCHECK_EQ(size_.Value(), 0u);
322 // TODO(gc) this will be true again when we fix FreeMemory.
323 // DCHECK(size_executable_ == 0);
324 capacity_ = 0;
325 capacity_executable_ = 0;
326
327 if (last_chunk_.IsReserved()) {
328 last_chunk_.Release();
329 }
330
331 delete code_range_;
332 code_range_ = nullptr;
333 }
334
335 class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task {
336 public:
UnmapFreeMemoryTask(Unmapper * unmapper)337 explicit UnmapFreeMemoryTask(Unmapper* unmapper) : unmapper_(unmapper) {}
338
339 private:
340 // v8::Task overrides.
Run()341 void Run() override {
342 unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
343 unmapper_->pending_unmapping_tasks_semaphore_.Signal();
344 }
345
346 Unmapper* unmapper_;
347 DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
348 };
349
FreeQueuedChunks()350 void MemoryAllocator::Unmapper::FreeQueuedChunks() {
351 ReconsiderDelayedChunks();
352 if (FLAG_concurrent_sweeping) {
353 V8::GetCurrentPlatform()->CallOnBackgroundThread(
354 new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
355 concurrent_unmapping_tasks_active_++;
356 } else {
357 PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
358 }
359 }
360
WaitUntilCompleted()361 bool MemoryAllocator::Unmapper::WaitUntilCompleted() {
362 bool waited = false;
363 while (concurrent_unmapping_tasks_active_ > 0) {
364 pending_unmapping_tasks_semaphore_.Wait();
365 concurrent_unmapping_tasks_active_--;
366 waited = true;
367 }
368 return waited;
369 }
370
371 template <MemoryAllocator::Unmapper::FreeMode mode>
PerformFreeMemoryOnQueuedChunks()372 void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
373 MemoryChunk* chunk = nullptr;
374 // Regular chunks.
375 while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
376 bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
377 allocator_->PerformFreeMemory(chunk);
378 if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
379 }
380 if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
381 // The previous loop uncommitted any pages marked as pooled and added them
382 // to the pooled list. In case of kReleasePooled we need to free them
383 // though.
384 while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
385 allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
386 }
387 }
388 // Non-regular chunks.
389 while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
390 allocator_->PerformFreeMemory(chunk);
391 }
392 }
393
TearDown()394 void MemoryAllocator::Unmapper::TearDown() {
395 WaitUntilCompleted();
396 ReconsiderDelayedChunks();
397 CHECK(delayed_regular_chunks_.empty());
398 PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
399 for (int i = 0; i < kNumberOfChunkQueues; i++) {
400 DCHECK(chunks_[i].empty());
401 }
402 }
403
ReconsiderDelayedChunks()404 void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
405 std::list<MemoryChunk*> delayed_chunks(std::move(delayed_regular_chunks_));
406 // Move constructed, so the permanent list should be empty.
407 DCHECK(delayed_regular_chunks_.empty());
408 for (auto it = delayed_chunks.begin(); it != delayed_chunks.end(); ++it) {
409 AddMemoryChunkSafe<kRegular>(*it);
410 }
411 }
412
CanFreeMemoryChunk(MemoryChunk * chunk)413 bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
414 MarkCompactCollector* mc = isolate_->heap()->mark_compact_collector();
415 // We cannot free a memory chunk in new space while the sweeper is running
416 // because the memory chunk can be in the queue of a sweeper task.
417 // Chunks in old generation are unmapped if they are empty.
418 DCHECK(chunk->InNewSpace() || chunk->SweepingDone());
419 return !chunk->InNewSpace() || mc == nullptr || !FLAG_concurrent_sweeping ||
420 mc->sweeper().IsSweepingCompleted(NEW_SPACE);
421 }
422
CommitMemory(Address base,size_t size,Executability executable)423 bool MemoryAllocator::CommitMemory(Address base, size_t size,
424 Executability executable) {
425 if (!base::VirtualMemory::CommitRegion(base, size,
426 executable == EXECUTABLE)) {
427 return false;
428 }
429 UpdateAllocatedSpaceLimits(base, base + size);
430 return true;
431 }
432
433
FreeMemory(base::VirtualMemory * reservation,Executability executable)434 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
435 Executability executable) {
436 // TODO(gc) make code_range part of memory allocator?
437 // Code which is part of the code-range does not have its own VirtualMemory.
438 DCHECK(code_range() == NULL ||
439 !code_range()->contains(static_cast<Address>(reservation->address())));
440 DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid() ||
441 reservation->size() <= Page::kPageSize);
442
443 reservation->Release();
444 }
445
446
FreeMemory(Address base,size_t size,Executability executable)447 void MemoryAllocator::FreeMemory(Address base, size_t size,
448 Executability executable) {
449 // TODO(gc) make code_range part of memory allocator?
450 if (code_range() != NULL &&
451 code_range()->contains(static_cast<Address>(base))) {
452 DCHECK(executable == EXECUTABLE);
453 code_range()->FreeRawMemory(base, size);
454 } else {
455 DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
456 bool result = base::VirtualMemory::ReleaseRegion(base, size);
457 USE(result);
458 DCHECK(result);
459 }
460 }
461
ReserveAlignedMemory(size_t size,size_t alignment,base::VirtualMemory * controller)462 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
463 base::VirtualMemory* controller) {
464 base::VirtualMemory reservation(size, alignment);
465
466 if (!reservation.IsReserved()) return NULL;
467 size_.Increment(reservation.size());
468 Address base =
469 RoundUp(static_cast<Address>(reservation.address()), alignment);
470 controller->TakeControl(&reservation);
471 return base;
472 }
473
AllocateAlignedMemory(size_t reserve_size,size_t commit_size,size_t alignment,Executability executable,base::VirtualMemory * controller)474 Address MemoryAllocator::AllocateAlignedMemory(
475 size_t reserve_size, size_t commit_size, size_t alignment,
476 Executability executable, base::VirtualMemory* controller) {
477 DCHECK(commit_size <= reserve_size);
478 base::VirtualMemory reservation;
479 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
480 if (base == NULL) return NULL;
481
482 if (executable == EXECUTABLE) {
483 if (!CommitExecutableMemory(&reservation, base, commit_size,
484 reserve_size)) {
485 base = NULL;
486 }
487 } else {
488 if (reservation.Commit(base, commit_size, false)) {
489 UpdateAllocatedSpaceLimits(base, base + commit_size);
490 } else {
491 base = NULL;
492 }
493 }
494
495 if (base == NULL) {
496 // Failed to commit the body. Release the mapping and any partially
497 // commited regions inside it.
498 reservation.Release();
499 size_.Decrement(reserve_size);
500 return NULL;
501 }
502
503 controller->TakeControl(&reservation);
504 return base;
505 }
506
InitializeAsAnchor(Space * space)507 void Page::InitializeAsAnchor(Space* space) {
508 set_owner(space);
509 set_next_chunk(this);
510 set_prev_chunk(this);
511 SetFlags(0, ~0);
512 SetFlag(ANCHOR);
513 }
514
Initialize(Heap * heap,Address base,size_t size,Address area_start,Address area_end,Executability executable,Space * owner,base::VirtualMemory * reservation)515 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
516 Address area_start, Address area_end,
517 Executability executable, Space* owner,
518 base::VirtualMemory* reservation) {
519 MemoryChunk* chunk = FromAddress(base);
520
521 DCHECK(base == chunk->address());
522
523 chunk->heap_ = heap;
524 chunk->size_ = size;
525 chunk->area_start_ = area_start;
526 chunk->area_end_ = area_end;
527 chunk->flags_ = Flags(NO_FLAGS);
528 chunk->set_owner(owner);
529 chunk->InitializeReservedMemory();
530 chunk->old_to_new_slots_.SetValue(nullptr);
531 chunk->old_to_old_slots_ = nullptr;
532 chunk->typed_old_to_new_slots_.SetValue(nullptr);
533 chunk->typed_old_to_old_slots_ = nullptr;
534 chunk->skip_list_ = nullptr;
535 chunk->progress_bar_ = 0;
536 chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
537 chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
538 chunk->mutex_ = new base::Mutex();
539 chunk->available_in_free_list_ = 0;
540 chunk->wasted_memory_ = 0;
541 chunk->ResetLiveBytes();
542 chunk->ClearLiveness();
543 chunk->set_next_chunk(nullptr);
544 chunk->set_prev_chunk(nullptr);
545 chunk->local_tracker_ = nullptr;
546
547 DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
548
549 if (executable == EXECUTABLE) {
550 chunk->SetFlag(IS_EXECUTABLE);
551 }
552
553 if (reservation != nullptr) {
554 chunk->reservation_.TakeControl(reservation);
555 }
556
557 return chunk;
558 }
559
560
561 // Commit MemoryChunk area to the requested size.
CommitArea(size_t requested)562 bool MemoryChunk::CommitArea(size_t requested) {
563 size_t guard_size =
564 IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
565 size_t header_size = area_start() - address() - guard_size;
566 size_t commit_size =
567 RoundUp(header_size + requested, MemoryAllocator::GetCommitPageSize());
568 size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
569 MemoryAllocator::GetCommitPageSize());
570
571 if (commit_size > committed_size) {
572 // Commit size should be less or equal than the reserved size.
573 DCHECK(commit_size <= size() - 2 * guard_size);
574 // Append the committed area.
575 Address start = address() + committed_size + guard_size;
576 size_t length = commit_size - committed_size;
577 if (reservation_.IsReserved()) {
578 Executability executable =
579 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
580 if (!heap()->memory_allocator()->CommitMemory(start, length,
581 executable)) {
582 return false;
583 }
584 } else {
585 CodeRange* code_range = heap_->memory_allocator()->code_range();
586 DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE));
587 if (!code_range->CommitRawMemory(start, length)) return false;
588 }
589
590 if (Heap::ShouldZapGarbage()) {
591 heap_->memory_allocator()->ZapBlock(start, length);
592 }
593 } else if (commit_size < committed_size) {
594 DCHECK(commit_size > 0);
595 // Shrink the committed area.
596 size_t length = committed_size - commit_size;
597 Address start = address() + committed_size + guard_size - length;
598 if (reservation_.IsReserved()) {
599 if (!reservation_.Uncommit(start, length)) return false;
600 } else {
601 CodeRange* code_range = heap_->memory_allocator()->code_range();
602 DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE));
603 if (!code_range->UncommitRawMemory(start, length)) return false;
604 }
605 }
606
607 area_end_ = area_start_ + requested;
608 return true;
609 }
610
CommittedPhysicalMemory()611 size_t MemoryChunk::CommittedPhysicalMemory() {
612 if (!base::VirtualMemory::HasLazyCommits() || owner()->identity() == LO_SPACE)
613 return size();
614 return high_water_mark_.Value();
615 }
616
InsertAfter(MemoryChunk * other)617 void MemoryChunk::InsertAfter(MemoryChunk* other) {
618 MemoryChunk* other_next = other->next_chunk();
619
620 set_next_chunk(other_next);
621 set_prev_chunk(other);
622 other_next->set_prev_chunk(this);
623 other->set_next_chunk(this);
624 }
625
626
Unlink()627 void MemoryChunk::Unlink() {
628 MemoryChunk* next_element = next_chunk();
629 MemoryChunk* prev_element = prev_chunk();
630 next_element->set_prev_chunk(prev_element);
631 prev_element->set_next_chunk(next_element);
632 set_prev_chunk(NULL);
633 set_next_chunk(NULL);
634 }
635
ShrinkChunk(MemoryChunk * chunk,size_t bytes_to_shrink)636 void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
637 DCHECK_GE(bytes_to_shrink, static_cast<size_t>(GetCommitPageSize()));
638 DCHECK_EQ(0u, bytes_to_shrink % GetCommitPageSize());
639 Address free_start = chunk->area_end_ - bytes_to_shrink;
640 // Don't adjust the size of the page. The area is just uncomitted but not
641 // released.
642 chunk->area_end_ -= bytes_to_shrink;
643 UncommitBlock(free_start, bytes_to_shrink);
644 if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
645 if (chunk->reservation_.IsReserved())
646 chunk->reservation_.Guard(chunk->area_end_);
647 else
648 base::OS::Guard(chunk->area_end_, GetCommitPageSize());
649 }
650 }
651
AllocateChunk(size_t reserve_area_size,size_t commit_area_size,Executability executable,Space * owner)652 MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
653 size_t commit_area_size,
654 Executability executable,
655 Space* owner) {
656 DCHECK_LE(commit_area_size, reserve_area_size);
657
658 size_t chunk_size;
659 Heap* heap = isolate_->heap();
660 Address base = nullptr;
661 base::VirtualMemory reservation;
662 Address area_start = nullptr;
663 Address area_end = nullptr;
664
665 //
666 // MemoryChunk layout:
667 //
668 // Executable
669 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
670 // | Header |
671 // +----------------------------+<- base + CodePageGuardStartOffset
672 // | Guard |
673 // +----------------------------+<- area_start_
674 // | Area |
675 // +----------------------------+<- area_end_ (area_start + commit_area_size)
676 // | Committed but not used |
677 // +----------------------------+<- aligned at OS page boundary
678 // | Reserved but not committed |
679 // +----------------------------+<- aligned at OS page boundary
680 // | Guard |
681 // +----------------------------+<- base + chunk_size
682 //
683 // Non-executable
684 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
685 // | Header |
686 // +----------------------------+<- area_start_ (base + kObjectStartOffset)
687 // | Area |
688 // +----------------------------+<- area_end_ (area_start + commit_area_size)
689 // | Committed but not used |
690 // +----------------------------+<- aligned at OS page boundary
691 // | Reserved but not committed |
692 // +----------------------------+<- base + chunk_size
693 //
694
695 if (executable == EXECUTABLE) {
696 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
697 GetCommitPageSize()) +
698 CodePageGuardSize();
699
700 // Check executable memory limit.
701 if ((size_executable_.Value() + chunk_size) > capacity_executable_) {
702 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
703 "V8 Executable Allocation capacity exceeded"));
704 return NULL;
705 }
706
707 // Size of header (not executable) plus area (executable).
708 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
709 GetCommitPageSize());
710 // Allocate executable memory either from code range or from the
711 // OS.
712 #ifdef V8_TARGET_ARCH_MIPS64
713 // Use code range only for large object space on mips64 to keep address
714 // range within 256-MB memory region.
715 if (code_range()->valid() && reserve_area_size > CodePageAreaSize()) {
716 #else
717 if (code_range()->valid()) {
718 #endif
719 base =
720 code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size);
721 DCHECK(
722 IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
723 if (base == NULL) return NULL;
724 size_.Increment(chunk_size);
725 // Update executable memory size.
726 size_executable_.Increment(chunk_size);
727 } else {
728 base = AllocateAlignedMemory(chunk_size, commit_size,
729 MemoryChunk::kAlignment, executable,
730 &reservation);
731 if (base == NULL) return NULL;
732 // Update executable memory size.
733 size_executable_.Increment(reservation.size());
734 }
735
736 if (Heap::ShouldZapGarbage()) {
737 ZapBlock(base, CodePageGuardStartOffset());
738 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
739 }
740
741 area_start = base + CodePageAreaStartOffset();
742 area_end = area_start + commit_area_size;
743 } else {
744 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
745 GetCommitPageSize());
746 size_t commit_size =
747 RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
748 GetCommitPageSize());
749 base =
750 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
751 executable, &reservation);
752
753 if (base == NULL) return NULL;
754
755 if (Heap::ShouldZapGarbage()) {
756 ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
757 }
758
759 area_start = base + Page::kObjectStartOffset;
760 area_end = area_start + commit_area_size;
761 }
762
763 // Use chunk_size for statistics and callbacks because we assume that they
764 // treat reserved but not-yet committed memory regions of chunks as allocated.
765 isolate_->counters()->memory_allocated()->Increment(
766 static_cast<int>(chunk_size));
767
768 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
769
770 // We cannot use the last chunk in the address space because we would
771 // overflow when comparing top and limit if this chunk is used for a
772 // linear allocation area.
773 if ((reinterpret_cast<uintptr_t>(base) + chunk_size) == 0u) {
774 CHECK(!last_chunk_.IsReserved());
775 last_chunk_.TakeControl(&reservation);
776 UncommitBlock(reinterpret_cast<Address>(last_chunk_.address()),
777 last_chunk_.size());
778 size_.Decrement(chunk_size);
779 if (executable == EXECUTABLE) {
780 size_executable_.Decrement(chunk_size);
781 }
782 CHECK(last_chunk_.IsReserved());
783 return AllocateChunk(reserve_area_size, commit_area_size, executable,
784 owner);
785 }
786
787 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
788 executable, owner, &reservation);
789 }
790
791
792 void Page::ResetFreeListStatistics() {
793 wasted_memory_ = 0;
794 available_in_free_list_ = 0;
795 }
796
797 size_t Page::AvailableInFreeList() {
798 size_t sum = 0;
799 ForAllFreeListCategories([&sum](FreeListCategory* category) {
800 sum += category->available();
801 });
802 return sum;
803 }
804
805 size_t Page::ShrinkToHighWaterMark() {
806 // Shrink pages to high water mark. The water mark points either to a filler
807 // or the area_end.
808 HeapObject* filler = HeapObject::FromAddress(HighWaterMark());
809 if (filler->address() == area_end()) return 0;
810 CHECK(filler->IsFiller());
811 if (!filler->IsFreeSpace()) return 0;
812
813 #ifdef DEBUG
814 // Check the the filler is indeed the last filler on the page.
815 HeapObjectIterator it(this);
816 HeapObject* filler2 = nullptr;
817 for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
818 filler2 = HeapObject::FromAddress(obj->address() + obj->Size());
819 }
820 if (filler2 == nullptr || filler2->address() == area_end()) return 0;
821 DCHECK(filler2->IsFiller());
822 // The deserializer might leave behind fillers. In this case we need to
823 // iterate even further.
824 while ((filler2->address() + filler2->Size()) != area_end()) {
825 filler2 = HeapObject::FromAddress(filler2->address() + filler2->Size());
826 DCHECK(filler2->IsFiller());
827 }
828 DCHECK_EQ(filler->address(), filler2->address());
829 #endif // DEBUG
830
831 size_t unused = RoundDown(
832 static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize),
833 MemoryAllocator::GetCommitPageSize());
834 if (unused > 0) {
835 if (FLAG_trace_gc_verbose) {
836 PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
837 reinterpret_cast<void*>(this),
838 reinterpret_cast<void*>(area_end()),
839 reinterpret_cast<void*>(area_end() - unused));
840 }
841 heap()->CreateFillerObjectAt(
842 filler->address(),
843 static_cast<int>(area_end() - filler->address() - unused),
844 ClearRecordedSlots::kNo);
845 heap()->memory_allocator()->ShrinkChunk(this, unused);
846 CHECK(filler->IsFiller());
847 CHECK_EQ(filler->address() + filler->Size(), area_end());
848 }
849 return unused;
850 }
851
852 void Page::CreateBlackArea(Address start, Address end) {
853 DCHECK(heap()->incremental_marking()->black_allocation());
854 DCHECK_EQ(Page::FromAddress(start), this);
855 DCHECK_NE(start, end);
856 DCHECK_EQ(Page::FromAddress(end - 1), this);
857 markbits()->SetRange(AddressToMarkbitIndex(start),
858 AddressToMarkbitIndex(end));
859 IncrementLiveBytes(static_cast<int>(end - start));
860 }
861
862 void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk,
863 Address start_free) {
864 // We do not allow partial shrink for code.
865 DCHECK(chunk->executable() == NOT_EXECUTABLE);
866
867 intptr_t size;
868 base::VirtualMemory* reservation = chunk->reserved_memory();
869 DCHECK(reservation->IsReserved());
870 size = static_cast<intptr_t>(reservation->size());
871
872 size_t to_free_size = size - (start_free - chunk->address());
873
874 DCHECK(size_.Value() >= to_free_size);
875 size_.Decrement(to_free_size);
876 isolate_->counters()->memory_allocated()->Decrement(
877 static_cast<int>(to_free_size));
878 chunk->set_size(size - to_free_size);
879
880 reservation->ReleasePartial(start_free);
881 }
882
883 void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
884 DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
885 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
886
887 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
888 chunk->IsEvacuationCandidate());
889
890 base::VirtualMemory* reservation = chunk->reserved_memory();
891 const size_t size =
892 reservation->IsReserved() ? reservation->size() : chunk->size();
893 DCHECK_GE(size_.Value(), static_cast<size_t>(size));
894 size_.Decrement(size);
895 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
896 if (chunk->executable() == EXECUTABLE) {
897 DCHECK_GE(size_executable_.Value(), size);
898 size_executable_.Decrement(size);
899 }
900
901 chunk->SetFlag(MemoryChunk::PRE_FREED);
902 }
903
904
905 void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
906 DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
907 chunk->ReleaseAllocatedMemory();
908
909 base::VirtualMemory* reservation = chunk->reserved_memory();
910 if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
911 UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
912 } else {
913 if (reservation->IsReserved()) {
914 FreeMemory(reservation, chunk->executable());
915 } else {
916 FreeMemory(chunk->address(), chunk->size(), chunk->executable());
917 }
918 }
919 }
920
921 template <MemoryAllocator::FreeMode mode>
922 void MemoryAllocator::Free(MemoryChunk* chunk) {
923 switch (mode) {
924 case kFull:
925 PreFreeMemory(chunk);
926 PerformFreeMemory(chunk);
927 break;
928 case kAlreadyPooled:
929 // Pooled pages cannot be touched anymore as their memory is uncommitted.
930 FreeMemory(chunk->address(), static_cast<size_t>(MemoryChunk::kPageSize),
931 Executability::NOT_EXECUTABLE);
932 break;
933 case kPooledAndQueue:
934 DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
935 DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
936 chunk->SetFlag(MemoryChunk::POOLED);
937 // Fall through to kPreFreeAndQueue.
938 case kPreFreeAndQueue:
939 PreFreeMemory(chunk);
940 // The chunks added to this queue will be freed by a concurrent thread.
941 unmapper()->AddMemoryChunkSafe(chunk);
942 break;
943 }
944 }
945
946 template void MemoryAllocator::Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
947
948 template void MemoryAllocator::Free<MemoryAllocator::kAlreadyPooled>(
949 MemoryChunk* chunk);
950
951 template void MemoryAllocator::Free<MemoryAllocator::kPreFreeAndQueue>(
952 MemoryChunk* chunk);
953
954 template void MemoryAllocator::Free<MemoryAllocator::kPooledAndQueue>(
955 MemoryChunk* chunk);
956
957 template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
958 Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
959 Executability executable) {
960 MemoryChunk* chunk = nullptr;
961 if (alloc_mode == kPooled) {
962 DCHECK_EQ(size, static_cast<size_t>(MemoryChunk::kAllocatableMemory));
963 DCHECK_EQ(executable, NOT_EXECUTABLE);
964 chunk = AllocatePagePooled(owner);
965 }
966 if (chunk == nullptr) {
967 chunk = AllocateChunk(size, size, executable, owner);
968 }
969 if (chunk == nullptr) return nullptr;
970 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
971 }
972
973 template Page*
974 MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
975 size_t size, PagedSpace* owner, Executability executable);
976 template Page*
977 MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
978 size_t size, SemiSpace* owner, Executability executable);
979 template Page*
980 MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
981 size_t size, SemiSpace* owner, Executability executable);
982
983 LargePage* MemoryAllocator::AllocateLargePage(size_t size,
984 LargeObjectSpace* owner,
985 Executability executable) {
986 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
987 if (chunk == nullptr) return nullptr;
988 return LargePage::Initialize(isolate_->heap(), chunk, executable, owner);
989 }
990
991 template <typename SpaceType>
992 MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
993 MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
994 if (chunk == nullptr) return nullptr;
995 const int size = MemoryChunk::kPageSize;
996 const Address start = reinterpret_cast<Address>(chunk);
997 const Address area_start = start + MemoryChunk::kObjectStartOffset;
998 const Address area_end = start + size;
999 if (!CommitBlock(reinterpret_cast<Address>(chunk), size, NOT_EXECUTABLE)) {
1000 return nullptr;
1001 }
1002 base::VirtualMemory reservation(start, size);
1003 MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
1004 NOT_EXECUTABLE, owner, &reservation);
1005 size_.Increment(size);
1006 return chunk;
1007 }
1008
1009 bool MemoryAllocator::CommitBlock(Address start, size_t size,
1010 Executability executable) {
1011 if (!CommitMemory(start, size, executable)) return false;
1012
1013 if (Heap::ShouldZapGarbage()) {
1014 ZapBlock(start, size);
1015 }
1016
1017 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
1018 return true;
1019 }
1020
1021
1022 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
1023 if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
1024 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
1025 return true;
1026 }
1027
1028
1029 void MemoryAllocator::ZapBlock(Address start, size_t size) {
1030 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
1031 Memory::Address_at(start + s) = kZapValue;
1032 }
1033 }
1034
1035 #ifdef DEBUG
1036 void MemoryAllocator::ReportStatistics() {
1037 size_t size = Size();
1038 float pct = static_cast<float>(capacity_ - size) / capacity_;
1039 PrintF(" capacity: %zu , used: %" V8PRIdPTR ", available: %%%d\n\n",
1040 capacity_, size, static_cast<int>(pct * 100));
1041 }
1042 #endif
1043
1044 size_t MemoryAllocator::CodePageGuardStartOffset() {
1045 // We are guarding code pages: the first OS page after the header
1046 // will be protected as non-writable.
1047 return RoundUp(Page::kObjectStartOffset, GetCommitPageSize());
1048 }
1049
1050 size_t MemoryAllocator::CodePageGuardSize() {
1051 return static_cast<int>(GetCommitPageSize());
1052 }
1053
1054 size_t MemoryAllocator::CodePageAreaStartOffset() {
1055 // We are guarding code pages: the first OS page after the header
1056 // will be protected as non-writable.
1057 return CodePageGuardStartOffset() + CodePageGuardSize();
1058 }
1059
1060 size_t MemoryAllocator::CodePageAreaEndOffset() {
1061 // We are guarding code pages: the last OS page will be protected as
1062 // non-writable.
1063 return Page::kPageSize - static_cast<int>(GetCommitPageSize());
1064 }
1065
1066 intptr_t MemoryAllocator::GetCommitPageSize() {
1067 if (FLAG_v8_os_page_size != 0) {
1068 DCHECK(base::bits::IsPowerOfTwo32(FLAG_v8_os_page_size));
1069 return FLAG_v8_os_page_size * KB;
1070 } else {
1071 return base::OS::CommitPageSize();
1072 }
1073 }
1074
1075
1076 bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
1077 Address start, size_t commit_size,
1078 size_t reserved_size) {
1079 // Commit page header (not executable).
1080 Address header = start;
1081 size_t header_size = CodePageGuardStartOffset();
1082 if (vm->Commit(header, header_size, false)) {
1083 // Create guard page after the header.
1084 if (vm->Guard(start + CodePageGuardStartOffset())) {
1085 // Commit page body (executable).
1086 Address body = start + CodePageAreaStartOffset();
1087 size_t body_size = commit_size - CodePageGuardStartOffset();
1088 if (vm->Commit(body, body_size, true)) {
1089 // Create guard page before the end.
1090 if (vm->Guard(start + reserved_size - CodePageGuardSize())) {
1091 UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
1092 commit_size -
1093 CodePageGuardStartOffset());
1094 return true;
1095 }
1096 vm->Uncommit(body, body_size);
1097 }
1098 }
1099 vm->Uncommit(header, header_size);
1100 }
1101 return false;
1102 }
1103
1104
1105 // -----------------------------------------------------------------------------
1106 // MemoryChunk implementation
1107
1108 void MemoryChunk::ReleaseAllocatedMemory() {
1109 if (skip_list_ != nullptr) {
1110 delete skip_list_;
1111 skip_list_ = nullptr;
1112 }
1113 if (mutex_ != nullptr) {
1114 delete mutex_;
1115 mutex_ = nullptr;
1116 }
1117 if (old_to_new_slots_.Value() != nullptr) ReleaseOldToNewSlots();
1118 if (old_to_old_slots_ != nullptr) ReleaseOldToOldSlots();
1119 if (typed_old_to_new_slots_.Value() != nullptr) ReleaseTypedOldToNewSlots();
1120 if (typed_old_to_old_slots_ != nullptr) ReleaseTypedOldToOldSlots();
1121 if (local_tracker_ != nullptr) ReleaseLocalTracker();
1122 }
1123
1124 static SlotSet* AllocateSlotSet(size_t size, Address page_start) {
1125 size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
1126 DCHECK(pages > 0);
1127 SlotSet* slot_set = new SlotSet[pages];
1128 for (size_t i = 0; i < pages; i++) {
1129 slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
1130 }
1131 return slot_set;
1132 }
1133
1134 void MemoryChunk::AllocateOldToNewSlots() {
1135 DCHECK(nullptr == old_to_new_slots_.Value());
1136 old_to_new_slots_.SetValue(AllocateSlotSet(size_, address()));
1137 }
1138
1139 void MemoryChunk::ReleaseOldToNewSlots() {
1140 SlotSet* old_to_new_slots = old_to_new_slots_.Value();
1141 delete[] old_to_new_slots;
1142 old_to_new_slots_.SetValue(nullptr);
1143 }
1144
1145 void MemoryChunk::AllocateOldToOldSlots() {
1146 DCHECK(nullptr == old_to_old_slots_);
1147 old_to_old_slots_ = AllocateSlotSet(size_, address());
1148 }
1149
1150 void MemoryChunk::ReleaseOldToOldSlots() {
1151 delete[] old_to_old_slots_;
1152 old_to_old_slots_ = nullptr;
1153 }
1154
1155 void MemoryChunk::AllocateTypedOldToNewSlots() {
1156 DCHECK(nullptr == typed_old_to_new_slots_.Value());
1157 typed_old_to_new_slots_.SetValue(new TypedSlotSet(address()));
1158 }
1159
1160 void MemoryChunk::ReleaseTypedOldToNewSlots() {
1161 TypedSlotSet* typed_old_to_new_slots = typed_old_to_new_slots_.Value();
1162 delete typed_old_to_new_slots;
1163 typed_old_to_new_slots_.SetValue(nullptr);
1164 }
1165
1166 void MemoryChunk::AllocateTypedOldToOldSlots() {
1167 DCHECK(nullptr == typed_old_to_old_slots_);
1168 typed_old_to_old_slots_ = new TypedSlotSet(address());
1169 }
1170
1171 void MemoryChunk::ReleaseTypedOldToOldSlots() {
1172 delete typed_old_to_old_slots_;
1173 typed_old_to_old_slots_ = nullptr;
1174 }
1175
1176 void MemoryChunk::AllocateLocalTracker() {
1177 DCHECK_NULL(local_tracker_);
1178 local_tracker_ = new LocalArrayBufferTracker(heap());
1179 }
1180
1181 void MemoryChunk::ReleaseLocalTracker() {
1182 DCHECK_NOT_NULL(local_tracker_);
1183 delete local_tracker_;
1184 local_tracker_ = nullptr;
1185 }
1186
1187 void MemoryChunk::ClearLiveness() {
1188 markbits()->Clear();
1189 ResetLiveBytes();
1190 }
1191
1192 // -----------------------------------------------------------------------------
1193 // PagedSpace implementation
1194
1195 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) ==
1196 ObjectSpace::kObjectSpaceNewSpace);
1197 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_SPACE) ==
1198 ObjectSpace::kObjectSpaceOldSpace);
1199 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
1200 ObjectSpace::kObjectSpaceCodeSpace);
1201 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
1202 ObjectSpace::kObjectSpaceMapSpace);
1203
1204 void Space::AllocationStep(Address soon_object, int size) {
1205 if (!allocation_observers_paused_) {
1206 for (int i = 0; i < allocation_observers_->length(); ++i) {
1207 AllocationObserver* o = (*allocation_observers_)[i];
1208 o->AllocationStep(size, soon_object, size);
1209 }
1210 }
1211 }
1212
1213 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
1214 Executability executable)
1215 : Space(heap, space, executable), anchor_(this), free_list_(this) {
1216 area_size_ = MemoryAllocator::PageAreaSize(space);
1217 accounting_stats_.Clear();
1218
1219 allocation_info_.Reset(nullptr, nullptr);
1220 }
1221
1222
1223 bool PagedSpace::SetUp() { return true; }
1224
1225
1226 bool PagedSpace::HasBeenSetUp() { return true; }
1227
1228
1229 void PagedSpace::TearDown() {
1230 for (auto it = begin(); it != end();) {
1231 Page* page = *(it++); // Will be erased.
1232 ArrayBufferTracker::FreeAll(page);
1233 heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
1234 }
1235 anchor_.set_next_page(&anchor_);
1236 anchor_.set_prev_page(&anchor_);
1237 accounting_stats_.Clear();
1238 }
1239
1240 void PagedSpace::RefillFreeList() {
1241 // Any PagedSpace might invoke RefillFreeList. We filter all but our old
1242 // generation spaces out.
1243 if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
1244 identity() != MAP_SPACE) {
1245 return;
1246 }
1247 MarkCompactCollector* collector = heap()->mark_compact_collector();
1248 intptr_t added = 0;
1249 {
1250 Page* p = nullptr;
1251 while ((p = collector->sweeper().GetSweptPageSafe(this)) != nullptr) {
1252 // Only during compaction pages can actually change ownership. This is
1253 // safe because there exists no other competing action on the page links
1254 // during compaction.
1255 if (is_local() && (p->owner() != this)) {
1256 base::LockGuard<base::Mutex> guard(
1257 reinterpret_cast<PagedSpace*>(p->owner())->mutex());
1258 p->Unlink();
1259 p->set_owner(this);
1260 p->InsertAfter(anchor_.prev_page());
1261 }
1262 added += RelinkFreeListCategories(p);
1263 added += p->wasted_memory();
1264 if (is_local() && (added > kCompactionMemoryWanted)) break;
1265 }
1266 }
1267 accounting_stats_.IncreaseCapacity(added);
1268 }
1269
1270 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
1271 DCHECK(identity() == other->identity());
1272 // Unmerged fields:
1273 // area_size_
1274 // anchor_
1275
1276 other->EmptyAllocationInfo();
1277
1278 // Update and clear accounting statistics.
1279 accounting_stats_.Merge(other->accounting_stats_);
1280 other->accounting_stats_.Clear();
1281
1282 // The linear allocation area of {other} should be destroyed now.
1283 DCHECK(other->top() == nullptr);
1284 DCHECK(other->limit() == nullptr);
1285
1286 AccountCommitted(other->CommittedMemory());
1287
1288 // Move over pages.
1289 for (auto it = other->begin(); it != other->end();) {
1290 Page* p = *(it++);
1291
1292 // Relinking requires the category to be unlinked.
1293 other->UnlinkFreeListCategories(p);
1294
1295 p->Unlink();
1296 p->set_owner(this);
1297 p->InsertAfter(anchor_.prev_page());
1298 RelinkFreeListCategories(p);
1299 DCHECK_EQ(p->AvailableInFreeList(), p->available_in_free_list());
1300 }
1301 }
1302
1303
1304 size_t PagedSpace::CommittedPhysicalMemory() {
1305 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
1306 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1307 size_t size = 0;
1308 for (Page* page : *this) {
1309 size += page->CommittedPhysicalMemory();
1310 }
1311 return size;
1312 }
1313
1314 bool PagedSpace::ContainsSlow(Address addr) {
1315 Page* p = Page::FromAddress(addr);
1316 for (Page* page : *this) {
1317 if (page == p) return true;
1318 }
1319 return false;
1320 }
1321
1322 void PagedSpace::ShrinkImmortalImmovablePages() {
1323 DCHECK(!heap()->deserialization_complete());
1324 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1325 EmptyAllocationInfo();
1326 ResetFreeList();
1327
1328 for (Page* page : *this) {
1329 DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
1330 size_t unused = page->ShrinkToHighWaterMark();
1331 accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
1332 AccountUncommitted(unused);
1333 }
1334 }
1335
1336 bool PagedSpace::Expand() {
1337 const int size = AreaSize();
1338
1339 if (!heap()->CanExpandOldGeneration(size)) return false;
1340
1341 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable());
1342 if (p == nullptr) return false;
1343
1344 AccountCommitted(p->size());
1345
1346 // Pages created during bootstrapping may contain immortal immovable objects.
1347 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
1348
1349 DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
1350
1351 p->InsertAfter(anchor_.prev_page());
1352
1353 return true;
1354 }
1355
1356
1357 int PagedSpace::CountTotalPages() {
1358 int count = 0;
1359 for (Page* page : *this) {
1360 count++;
1361 USE(page);
1362 }
1363 return count;
1364 }
1365
1366
1367 void PagedSpace::ResetFreeListStatistics() {
1368 for (Page* page : *this) {
1369 page->ResetFreeListStatistics();
1370 }
1371 }
1372
1373 void PagedSpace::SetAllocationInfo(Address top, Address limit) {
1374 SetTopAndLimit(top, limit);
1375 if (top != nullptr && top != limit &&
1376 heap()->incremental_marking()->black_allocation()) {
1377 Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
1378 }
1379 }
1380
1381 void PagedSpace::MarkAllocationInfoBlack() {
1382 DCHECK(heap()->incremental_marking()->black_allocation());
1383 Address current_top = top();
1384 Address current_limit = limit();
1385 if (current_top != nullptr && current_top != current_limit) {
1386 Page::FromAllocationAreaAddress(current_top)
1387 ->CreateBlackArea(current_top, current_limit);
1388 }
1389 }
1390
1391 // Empty space allocation info, returning unused area to free list.
1392 void PagedSpace::EmptyAllocationInfo() {
1393 // Mark the old linear allocation area with a free space map so it can be
1394 // skipped when scanning the heap.
1395 Address current_top = top();
1396 Address current_limit = limit();
1397 if (current_top == nullptr) {
1398 DCHECK(current_limit == nullptr);
1399 return;
1400 }
1401
1402 if (heap()->incremental_marking()->black_allocation()) {
1403 Page* page = Page::FromAllocationAreaAddress(current_top);
1404
1405 // Clear the bits in the unused black area.
1406 if (current_top != current_limit) {
1407 page->markbits()->ClearRange(page->AddressToMarkbitIndex(current_top),
1408 page->AddressToMarkbitIndex(current_limit));
1409 page->IncrementLiveBytes(-static_cast<int>(current_limit - current_top));
1410 }
1411 }
1412
1413 SetTopAndLimit(NULL, NULL);
1414 DCHECK_GE(current_limit, current_top);
1415 Free(current_top, current_limit - current_top);
1416 }
1417
1418 void PagedSpace::IncreaseCapacity(size_t bytes) {
1419 accounting_stats_.ExpandSpace(bytes);
1420 }
1421
1422 void PagedSpace::ReleasePage(Page* page) {
1423 DCHECK_EQ(page->LiveBytes(), 0);
1424 DCHECK_EQ(page->owner(), this);
1425
1426 free_list_.EvictFreeListItems(page);
1427 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1428
1429 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
1430 allocation_info_.Reset(nullptr, nullptr);
1431 }
1432
1433 // If page is still in a list, unlink it from that list.
1434 if (page->next_chunk() != NULL) {
1435 DCHECK(page->prev_chunk() != NULL);
1436 page->Unlink();
1437 }
1438
1439 AccountUncommitted(page->size());
1440 accounting_stats_.ShrinkSpace(page->area_size());
1441 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
1442 }
1443
1444 std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
1445 return std::unique_ptr<ObjectIterator>(new HeapObjectIterator(this));
1446 }
1447
1448 #ifdef DEBUG
1449 void PagedSpace::Print() {}
1450 #endif
1451
1452 #ifdef VERIFY_HEAP
1453 void PagedSpace::Verify(ObjectVisitor* visitor) {
1454 bool allocation_pointer_found_in_space =
1455 (allocation_info_.top() == allocation_info_.limit());
1456 for (Page* page : *this) {
1457 CHECK(page->owner() == this);
1458 if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
1459 allocation_pointer_found_in_space = true;
1460 }
1461 CHECK(page->SweepingDone());
1462 HeapObjectIterator it(page);
1463 Address end_of_previous_object = page->area_start();
1464 Address top = page->area_end();
1465 int black_size = 0;
1466 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1467 CHECK(end_of_previous_object <= object->address());
1468
1469 // The first word should be a map, and we expect all map pointers to
1470 // be in map space.
1471 Map* map = object->map();
1472 CHECK(map->IsMap());
1473 CHECK(heap()->map_space()->Contains(map));
1474
1475 // Perform space-specific object verification.
1476 VerifyObject(object);
1477
1478 // The object itself should look OK.
1479 object->ObjectVerify();
1480
1481 // All the interior pointers should be contained in the heap.
1482 int size = object->Size();
1483 object->IterateBody(map->instance_type(), size, visitor);
1484 if (ObjectMarking::IsBlack(object)) {
1485 black_size += size;
1486 }
1487
1488 CHECK(object->address() + size <= top);
1489 end_of_previous_object = object->address() + size;
1490 }
1491 CHECK_LE(black_size, page->LiveBytes());
1492 }
1493 CHECK(allocation_pointer_found_in_space);
1494 }
1495 #endif // VERIFY_HEAP
1496
1497 // -----------------------------------------------------------------------------
1498 // NewSpace implementation
1499
1500 bool NewSpace::SetUp(size_t initial_semispace_capacity,
1501 size_t maximum_semispace_capacity) {
1502 DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
1503 DCHECK(base::bits::IsPowerOfTwo32(
1504 static_cast<uint32_t>(maximum_semispace_capacity)));
1505
1506 to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
1507 from_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
1508 if (!to_space_.Commit()) {
1509 return false;
1510 }
1511 DCHECK(!from_space_.is_committed()); // No need to use memory yet.
1512 ResetAllocationInfo();
1513
1514 // Allocate and set up the histogram arrays if necessary.
1515 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1516 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1517 #define SET_NAME(name) \
1518 allocated_histogram_[name].set_name(#name); \
1519 promoted_histogram_[name].set_name(#name);
1520 INSTANCE_TYPE_LIST(SET_NAME)
1521 #undef SET_NAME
1522
1523 return true;
1524 }
1525
1526
1527 void NewSpace::TearDown() {
1528 if (allocated_histogram_) {
1529 DeleteArray(allocated_histogram_);
1530 allocated_histogram_ = NULL;
1531 }
1532 if (promoted_histogram_) {
1533 DeleteArray(promoted_histogram_);
1534 promoted_histogram_ = NULL;
1535 }
1536
1537 allocation_info_.Reset(nullptr, nullptr);
1538
1539 to_space_.TearDown();
1540 from_space_.TearDown();
1541 }
1542
1543 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
1544
1545
1546 void NewSpace::Grow() {
1547 // Double the semispace size but only up to maximum capacity.
1548 DCHECK(TotalCapacity() < MaximumCapacity());
1549 size_t new_capacity =
1550 Min(MaximumCapacity(),
1551 static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity());
1552 if (to_space_.GrowTo(new_capacity)) {
1553 // Only grow from space if we managed to grow to-space.
1554 if (!from_space_.GrowTo(new_capacity)) {
1555 // If we managed to grow to-space but couldn't grow from-space,
1556 // attempt to shrink to-space.
1557 if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
1558 // We are in an inconsistent state because we could not
1559 // commit/uncommit memory from new space.
1560 CHECK(false);
1561 }
1562 }
1563 }
1564 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1565 }
1566
1567
1568 void NewSpace::Shrink() {
1569 size_t new_capacity = Max(InitialTotalCapacity(), 2 * Size());
1570 size_t rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1571 if (rounded_new_capacity < TotalCapacity() &&
1572 to_space_.ShrinkTo(rounded_new_capacity)) {
1573 // Only shrink from-space if we managed to shrink to-space.
1574 from_space_.Reset();
1575 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1576 // If we managed to shrink to-space but couldn't shrink from
1577 // space, attempt to grow to-space again.
1578 if (!to_space_.GrowTo(from_space_.current_capacity())) {
1579 // We are in an inconsistent state because we could not
1580 // commit/uncommit memory from new space.
1581 CHECK(false);
1582 }
1583 }
1584 }
1585 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1586 }
1587
1588 bool NewSpace::Rebalance() {
1589 CHECK(heap()->promotion_queue()->is_empty());
1590 // Order here is important to make use of the page pool.
1591 return to_space_.EnsureCurrentCapacity() &&
1592 from_space_.EnsureCurrentCapacity();
1593 }
1594
1595 bool SemiSpace::EnsureCurrentCapacity() {
1596 if (is_committed()) {
1597 const int expected_pages =
1598 static_cast<int>(current_capacity_ / Page::kPageSize);
1599 int actual_pages = 0;
1600 Page* current_page = anchor()->next_page();
1601 while (current_page != anchor()) {
1602 actual_pages++;
1603 current_page = current_page->next_page();
1604 if (actual_pages > expected_pages) {
1605 Page* to_remove = current_page->prev_page();
1606 // Make sure we don't overtake the actual top pointer.
1607 CHECK_NE(to_remove, current_page_);
1608 to_remove->Unlink();
1609 // Clear new space flags to avoid this page being treated as a new
1610 // space page that is potentially being swept.
1611 to_remove->SetFlags(0, Page::kIsInNewSpaceMask);
1612 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
1613 to_remove);
1614 }
1615 }
1616 while (actual_pages < expected_pages) {
1617 actual_pages++;
1618 current_page =
1619 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
1620 Page::kAllocatableMemory, this, executable());
1621 if (current_page == nullptr) return false;
1622 DCHECK_NOT_NULL(current_page);
1623 current_page->InsertAfter(anchor());
1624 current_page->ClearLiveness();
1625 current_page->SetFlags(anchor()->prev_page()->GetFlags(),
1626 Page::kCopyAllFlags);
1627 heap()->CreateFillerObjectAt(current_page->area_start(),
1628 static_cast<int>(current_page->area_size()),
1629 ClearRecordedSlots::kNo);
1630 }
1631 }
1632 return true;
1633 }
1634
1635 void LocalAllocationBuffer::Close() {
1636 if (IsValid()) {
1637 heap_->CreateFillerObjectAt(
1638 allocation_info_.top(),
1639 static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
1640 ClearRecordedSlots::kNo);
1641 }
1642 }
1643
1644
1645 LocalAllocationBuffer::LocalAllocationBuffer(Heap* heap,
1646 AllocationInfo allocation_info)
1647 : heap_(heap), allocation_info_(allocation_info) {
1648 if (IsValid()) {
1649 heap_->CreateFillerObjectAt(
1650 allocation_info_.top(),
1651 static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
1652 ClearRecordedSlots::kNo);
1653 }
1654 }
1655
1656
1657 LocalAllocationBuffer::LocalAllocationBuffer(
1658 const LocalAllocationBuffer& other) {
1659 *this = other;
1660 }
1661
1662
1663 LocalAllocationBuffer& LocalAllocationBuffer::operator=(
1664 const LocalAllocationBuffer& other) {
1665 Close();
1666 heap_ = other.heap_;
1667 allocation_info_ = other.allocation_info_;
1668
1669 // This is needed since we (a) cannot yet use move-semantics, and (b) want
1670 // to make the use of the class easy by it as value and (c) implicitly call
1671 // {Close} upon copy.
1672 const_cast<LocalAllocationBuffer&>(other)
1673 .allocation_info_.Reset(nullptr, nullptr);
1674 return *this;
1675 }
1676
1677
1678 void NewSpace::UpdateAllocationInfo() {
1679 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1680 allocation_info_.Reset(to_space_.page_low(), to_space_.page_high());
1681 UpdateInlineAllocationLimit(0);
1682 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1683 }
1684
1685
1686 void NewSpace::ResetAllocationInfo() {
1687 Address old_top = allocation_info_.top();
1688 to_space_.Reset();
1689 UpdateAllocationInfo();
1690 // Clear all mark-bits in the to-space.
1691 for (Page* p : to_space_) {
1692 p->ClearLiveness();
1693 }
1694 InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
1695 }
1696
1697
1698 void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
1699 if (heap()->inline_allocation_disabled()) {
1700 // Lowest limit when linear allocation was disabled.
1701 Address high = to_space_.page_high();
1702 Address new_top = allocation_info_.top() + size_in_bytes;
1703 allocation_info_.set_limit(Min(new_top, high));
1704 } else if (allocation_observers_paused_ || top_on_previous_step_ == 0) {
1705 // Normal limit is the end of the current page.
1706 allocation_info_.set_limit(to_space_.page_high());
1707 } else {
1708 // Lower limit during incremental marking.
1709 Address high = to_space_.page_high();
1710 Address new_top = allocation_info_.top() + size_in_bytes;
1711 Address new_limit = new_top + GetNextInlineAllocationStepSize() - 1;
1712 allocation_info_.set_limit(Min(new_limit, high));
1713 }
1714 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1715 }
1716
1717
1718 bool NewSpace::AddFreshPage() {
1719 Address top = allocation_info_.top();
1720 DCHECK(!Page::IsAtObjectStart(top));
1721 if (!to_space_.AdvancePage()) {
1722 // No more pages left to advance.
1723 return false;
1724 }
1725
1726 // Clear remainder of current page.
1727 Address limit = Page::FromAllocationAreaAddress(top)->area_end();
1728 if (heap()->gc_state() == Heap::SCAVENGE) {
1729 heap()->promotion_queue()->SetNewLimit(limit);
1730 }
1731
1732 int remaining_in_page = static_cast<int>(limit - top);
1733 heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
1734 UpdateAllocationInfo();
1735
1736 return true;
1737 }
1738
1739
1740 bool NewSpace::AddFreshPageSynchronized() {
1741 base::LockGuard<base::Mutex> guard(&mutex_);
1742 return AddFreshPage();
1743 }
1744
1745
1746 bool NewSpace::EnsureAllocation(int size_in_bytes,
1747 AllocationAlignment alignment) {
1748 Address old_top = allocation_info_.top();
1749 Address high = to_space_.page_high();
1750 int filler_size = Heap::GetFillToAlign(old_top, alignment);
1751 int aligned_size_in_bytes = size_in_bytes + filler_size;
1752
1753 if (old_top + aligned_size_in_bytes > high) {
1754 // Not enough room in the page, try to allocate a new one.
1755 if (!AddFreshPage()) {
1756 return false;
1757 }
1758
1759 InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
1760
1761 old_top = allocation_info_.top();
1762 high = to_space_.page_high();
1763 filler_size = Heap::GetFillToAlign(old_top, alignment);
1764 }
1765
1766 DCHECK(old_top + aligned_size_in_bytes <= high);
1767
1768 if (allocation_info_.limit() < high) {
1769 // Either the limit has been lowered because linear allocation was disabled
1770 // or because incremental marking wants to get a chance to do a step,
1771 // or because idle scavenge job wants to get a chance to post a task.
1772 // Set the new limit accordingly.
1773 Address new_top = old_top + aligned_size_in_bytes;
1774 Address soon_object = old_top + filler_size;
1775 InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
1776 UpdateInlineAllocationLimit(aligned_size_in_bytes);
1777 }
1778 return true;
1779 }
1780
1781
1782 void NewSpace::StartNextInlineAllocationStep() {
1783 if (!allocation_observers_paused_) {
1784 top_on_previous_step_ =
1785 allocation_observers_->length() ? allocation_info_.top() : 0;
1786 UpdateInlineAllocationLimit(0);
1787 }
1788 }
1789
1790
1791 intptr_t NewSpace::GetNextInlineAllocationStepSize() {
1792 intptr_t next_step = 0;
1793 for (int i = 0; i < allocation_observers_->length(); ++i) {
1794 AllocationObserver* o = (*allocation_observers_)[i];
1795 next_step = next_step ? Min(next_step, o->bytes_to_next_step())
1796 : o->bytes_to_next_step();
1797 }
1798 DCHECK(allocation_observers_->length() == 0 || next_step != 0);
1799 return next_step;
1800 }
1801
1802 void NewSpace::AddAllocationObserver(AllocationObserver* observer) {
1803 Space::AddAllocationObserver(observer);
1804 StartNextInlineAllocationStep();
1805 }
1806
1807 void NewSpace::RemoveAllocationObserver(AllocationObserver* observer) {
1808 Space::RemoveAllocationObserver(observer);
1809 StartNextInlineAllocationStep();
1810 }
1811
1812 void NewSpace::PauseAllocationObservers() {
1813 // Do a step to account for memory allocated so far.
1814 InlineAllocationStep(top(), top(), nullptr, 0);
1815 Space::PauseAllocationObservers();
1816 top_on_previous_step_ = 0;
1817 UpdateInlineAllocationLimit(0);
1818 }
1819
1820 void NewSpace::ResumeAllocationObservers() {
1821 DCHECK(top_on_previous_step_ == 0);
1822 Space::ResumeAllocationObservers();
1823 StartNextInlineAllocationStep();
1824 }
1825
1826
1827 void NewSpace::InlineAllocationStep(Address top, Address new_top,
1828 Address soon_object, size_t size) {
1829 if (top_on_previous_step_) {
1830 int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
1831 for (int i = 0; i < allocation_observers_->length(); ++i) {
1832 (*allocation_observers_)[i]->AllocationStep(bytes_allocated, soon_object,
1833 size);
1834 }
1835 top_on_previous_step_ = new_top;
1836 }
1837 }
1838
1839 std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator() {
1840 return std::unique_ptr<ObjectIterator>(new SemiSpaceIterator(this));
1841 }
1842
1843 #ifdef VERIFY_HEAP
1844 // We do not use the SemiSpaceIterator because verification doesn't assume
1845 // that it works (it depends on the invariants we are checking).
1846 void NewSpace::Verify() {
1847 // The allocation pointer should be in the space or at the very end.
1848 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1849
1850 // There should be objects packed in from the low address up to the
1851 // allocation pointer.
1852 Address current = to_space_.first_page()->area_start();
1853 CHECK_EQ(current, to_space_.space_start());
1854
1855 while (current != top()) {
1856 if (!Page::IsAlignedToPageSize(current)) {
1857 // The allocation pointer should not be in the middle of an object.
1858 CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
1859 current < top());
1860
1861 HeapObject* object = HeapObject::FromAddress(current);
1862
1863 // The first word should be a map, and we expect all map pointers to
1864 // be in map space.
1865 Map* map = object->map();
1866 CHECK(map->IsMap());
1867 CHECK(heap()->map_space()->Contains(map));
1868
1869 // The object should not be code or a map.
1870 CHECK(!object->IsMap());
1871 CHECK(!object->IsAbstractCode());
1872
1873 // The object itself should look OK.
1874 object->ObjectVerify();
1875
1876 // All the interior pointers should be contained in the heap.
1877 VerifyPointersVisitor visitor;
1878 int size = object->Size();
1879 object->IterateBody(map->instance_type(), size, &visitor);
1880
1881 current += size;
1882 } else {
1883 // At end of page, switch to next page.
1884 Page* page = Page::FromAllocationAreaAddress(current)->next_page();
1885 // Next page should be valid.
1886 CHECK(!page->is_anchor());
1887 current = page->area_start();
1888 }
1889 }
1890
1891 // Check semi-spaces.
1892 CHECK_EQ(from_space_.id(), kFromSpace);
1893 CHECK_EQ(to_space_.id(), kToSpace);
1894 from_space_.Verify();
1895 to_space_.Verify();
1896 }
1897 #endif
1898
1899 // -----------------------------------------------------------------------------
1900 // SemiSpace implementation
1901
1902 void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
1903 DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize));
1904 minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1905 current_capacity_ = minimum_capacity_;
1906 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1907 committed_ = false;
1908 }
1909
1910
1911 void SemiSpace::TearDown() {
1912 // Properly uncommit memory to keep the allocator counters in sync.
1913 if (is_committed()) {
1914 for (Page* p : *this) {
1915 ArrayBufferTracker::FreeAll(p);
1916 }
1917 Uncommit();
1918 }
1919 current_capacity_ = maximum_capacity_ = 0;
1920 }
1921
1922
1923 bool SemiSpace::Commit() {
1924 DCHECK(!is_committed());
1925 Page* current = anchor();
1926 const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
1927 for (int pages_added = 0; pages_added < num_pages; pages_added++) {
1928 Page* new_page =
1929 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
1930 Page::kAllocatableMemory, this, executable());
1931 if (new_page == nullptr) {
1932 RewindPages(current, pages_added);
1933 return false;
1934 }
1935 new_page->InsertAfter(current);
1936 current = new_page;
1937 }
1938 Reset();
1939 AccountCommitted(current_capacity_);
1940 if (age_mark_ == nullptr) {
1941 age_mark_ = first_page()->area_start();
1942 }
1943 committed_ = true;
1944 return true;
1945 }
1946
1947
1948 bool SemiSpace::Uncommit() {
1949 DCHECK(is_committed());
1950 for (auto it = begin(); it != end();) {
1951 Page* p = *(it++);
1952 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(p);
1953 }
1954 anchor()->set_next_page(anchor());
1955 anchor()->set_prev_page(anchor());
1956 AccountUncommitted(current_capacity_);
1957 committed_ = false;
1958 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
1959 return true;
1960 }
1961
1962
1963 size_t SemiSpace::CommittedPhysicalMemory() {
1964 if (!is_committed()) return 0;
1965 size_t size = 0;
1966 for (Page* p : *this) {
1967 size += p->CommittedPhysicalMemory();
1968 }
1969 return size;
1970 }
1971
1972 bool SemiSpace::GrowTo(size_t new_capacity) {
1973 if (!is_committed()) {
1974 if (!Commit()) return false;
1975 }
1976 DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0u);
1977 DCHECK_LE(new_capacity, maximum_capacity_);
1978 DCHECK_GT(new_capacity, current_capacity_);
1979 const size_t delta = new_capacity - current_capacity_;
1980 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1981 const int delta_pages = static_cast<int>(delta / Page::kPageSize);
1982 Page* last_page = anchor()->prev_page();
1983 DCHECK_NE(last_page, anchor());
1984 for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
1985 Page* new_page =
1986 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
1987 Page::kAllocatableMemory, this, executable());
1988 if (new_page == nullptr) {
1989 RewindPages(last_page, pages_added);
1990 return false;
1991 }
1992 new_page->InsertAfter(last_page);
1993 new_page->ClearLiveness();
1994 // Duplicate the flags that was set on the old page.
1995 new_page->SetFlags(last_page->GetFlags(), Page::kCopyOnFlipFlagsMask);
1996 last_page = new_page;
1997 }
1998 AccountCommitted(delta);
1999 current_capacity_ = new_capacity;
2000 return true;
2001 }
2002
2003 void SemiSpace::RewindPages(Page* start, int num_pages) {
2004 Page* new_last_page = nullptr;
2005 Page* last_page = start;
2006 while (num_pages > 0) {
2007 DCHECK_NE(last_page, anchor());
2008 new_last_page = last_page->prev_page();
2009 last_page->prev_page()->set_next_page(last_page->next_page());
2010 last_page->next_page()->set_prev_page(last_page->prev_page());
2011 last_page = new_last_page;
2012 num_pages--;
2013 }
2014 }
2015
2016 bool SemiSpace::ShrinkTo(size_t new_capacity) {
2017 DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0u);
2018 DCHECK_GE(new_capacity, minimum_capacity_);
2019 DCHECK_LT(new_capacity, current_capacity_);
2020 if (is_committed()) {
2021 const size_t delta = current_capacity_ - new_capacity;
2022 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
2023 int delta_pages = static_cast<int>(delta / Page::kPageSize);
2024 Page* new_last_page;
2025 Page* last_page;
2026 while (delta_pages > 0) {
2027 last_page = anchor()->prev_page();
2028 new_last_page = last_page->prev_page();
2029 new_last_page->set_next_page(anchor());
2030 anchor()->set_prev_page(new_last_page);
2031 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
2032 last_page);
2033 delta_pages--;
2034 }
2035 AccountUncommitted(delta);
2036 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2037 }
2038 current_capacity_ = new_capacity;
2039 return true;
2040 }
2041
2042 void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
2043 anchor_.set_owner(this);
2044 anchor_.prev_page()->set_next_page(&anchor_);
2045 anchor_.next_page()->set_prev_page(&anchor_);
2046
2047 for (Page* page : *this) {
2048 page->set_owner(this);
2049 page->SetFlags(flags, mask);
2050 if (id_ == kToSpace) {
2051 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
2052 page->SetFlag(MemoryChunk::IN_TO_SPACE);
2053 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
2054 page->ResetLiveBytes();
2055 } else {
2056 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
2057 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
2058 }
2059 DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
2060 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
2061 }
2062 }
2063
2064
2065 void SemiSpace::Reset() {
2066 DCHECK_NE(anchor_.next_page(), &anchor_);
2067 current_page_ = anchor_.next_page();
2068 pages_used_ = 0;
2069 }
2070
2071 void SemiSpace::RemovePage(Page* page) {
2072 if (current_page_ == page) {
2073 current_page_ = page->prev_page();
2074 }
2075 page->Unlink();
2076 }
2077
2078 void SemiSpace::PrependPage(Page* page) {
2079 page->SetFlags(current_page()->GetFlags(), Page::kCopyAllFlags);
2080 page->set_owner(this);
2081 page->InsertAfter(anchor());
2082 pages_used_++;
2083 }
2084
2085 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
2086 // We won't be swapping semispaces without data in them.
2087 DCHECK_NE(from->anchor_.next_page(), &from->anchor_);
2088 DCHECK_NE(to->anchor_.next_page(), &to->anchor_);
2089
2090 intptr_t saved_to_space_flags = to->current_page()->GetFlags();
2091
2092 // We swap all properties but id_.
2093 std::swap(from->current_capacity_, to->current_capacity_);
2094 std::swap(from->maximum_capacity_, to->maximum_capacity_);
2095 std::swap(from->minimum_capacity_, to->minimum_capacity_);
2096 std::swap(from->age_mark_, to->age_mark_);
2097 std::swap(from->committed_, to->committed_);
2098 std::swap(from->anchor_, to->anchor_);
2099 std::swap(from->current_page_, to->current_page_);
2100
2101 to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
2102 from->FixPagesFlags(0, 0);
2103 }
2104
2105 void SemiSpace::set_age_mark(Address mark) {
2106 DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
2107 age_mark_ = mark;
2108 // Mark all pages up to the one containing mark.
2109 for (Page* p : PageRange(space_start(), mark)) {
2110 p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
2111 }
2112 }
2113
2114 std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator() {
2115 // Use the NewSpace::NewObjectIterator to iterate the ToSpace.
2116 UNREACHABLE();
2117 return std::unique_ptr<ObjectIterator>();
2118 }
2119
2120 #ifdef DEBUG
2121 void SemiSpace::Print() {}
2122 #endif
2123
2124 #ifdef VERIFY_HEAP
2125 void SemiSpace::Verify() {
2126 bool is_from_space = (id_ == kFromSpace);
2127 Page* page = anchor_.next_page();
2128 CHECK(anchor_.owner() == this);
2129 while (page != &anchor_) {
2130 CHECK_EQ(page->owner(), this);
2131 CHECK(page->InNewSpace());
2132 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
2133 : MemoryChunk::IN_TO_SPACE));
2134 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
2135 : MemoryChunk::IN_FROM_SPACE));
2136 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
2137 if (!is_from_space) {
2138 // The pointers-from-here-are-interesting flag isn't updated dynamically
2139 // on from-space pages, so it might be out of sync with the marking state.
2140 if (page->heap()->incremental_marking()->IsMarking()) {
2141 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
2142 } else {
2143 CHECK(
2144 !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
2145 }
2146 // TODO(gc): Check that the live_bytes_count_ field matches the
2147 // black marking on the page (if we make it match in new-space).
2148 }
2149 CHECK_EQ(page->prev_page()->next_page(), page);
2150 page = page->next_page();
2151 }
2152 }
2153 #endif
2154
2155 #ifdef DEBUG
2156 void SemiSpace::AssertValidRange(Address start, Address end) {
2157 // Addresses belong to same semi-space
2158 Page* page = Page::FromAllocationAreaAddress(start);
2159 Page* end_page = Page::FromAllocationAreaAddress(end);
2160 SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
2161 CHECK_EQ(space, end_page->owner());
2162 // Start address is before end address, either on same page,
2163 // or end address is on a later page in the linked list of
2164 // semi-space pages.
2165 if (page == end_page) {
2166 CHECK_LE(start, end);
2167 } else {
2168 while (page != end_page) {
2169 page = page->next_page();
2170 CHECK_NE(page, space->anchor());
2171 }
2172 }
2173 }
2174 #endif
2175
2176
2177 // -----------------------------------------------------------------------------
2178 // SemiSpaceIterator implementation.
2179
2180 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
2181 Initialize(space->bottom(), space->top());
2182 }
2183
2184
2185 void SemiSpaceIterator::Initialize(Address start, Address end) {
2186 SemiSpace::AssertValidRange(start, end);
2187 current_ = start;
2188 limit_ = end;
2189 }
2190
2191 #ifdef DEBUG
2192 // heap_histograms is shared, always clear it before using it.
2193 static void ClearHistograms(Isolate* isolate) {
2194 // We reset the name each time, though it hasn't changed.
2195 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
2196 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
2197 #undef DEF_TYPE_NAME
2198
2199 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
2200 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
2201 #undef CLEAR_HISTOGRAM
2202
2203 isolate->js_spill_information()->Clear();
2204 }
2205
2206 static int CollectHistogramInfo(HeapObject* obj) {
2207 Isolate* isolate = obj->GetIsolate();
2208 InstanceType type = obj->map()->instance_type();
2209 DCHECK(0 <= type && type <= LAST_TYPE);
2210 DCHECK(isolate->heap_histograms()[type].name() != NULL);
2211 isolate->heap_histograms()[type].increment_number(1);
2212 isolate->heap_histograms()[type].increment_bytes(obj->Size());
2213
2214 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
2215 JSObject::cast(obj)
2216 ->IncrementSpillStatistics(isolate->js_spill_information());
2217 }
2218
2219 return obj->Size();
2220 }
2221
2222
2223 static void ReportHistogram(Isolate* isolate, bool print_spill) {
2224 PrintF("\n Object Histogram:\n");
2225 for (int i = 0; i <= LAST_TYPE; i++) {
2226 if (isolate->heap_histograms()[i].number() > 0) {
2227 PrintF(" %-34s%10d (%10d bytes)\n",
2228 isolate->heap_histograms()[i].name(),
2229 isolate->heap_histograms()[i].number(),
2230 isolate->heap_histograms()[i].bytes());
2231 }
2232 }
2233 PrintF("\n");
2234
2235 // Summarize string types.
2236 int string_number = 0;
2237 int string_bytes = 0;
2238 #define INCREMENT(type, size, name, camel_name) \
2239 string_number += isolate->heap_histograms()[type].number(); \
2240 string_bytes += isolate->heap_histograms()[type].bytes();
2241 STRING_TYPE_LIST(INCREMENT)
2242 #undef INCREMENT
2243 if (string_number > 0) {
2244 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
2245 string_bytes);
2246 }
2247
2248 if (FLAG_collect_heap_spill_statistics && print_spill) {
2249 isolate->js_spill_information()->Print();
2250 }
2251 }
2252 #endif // DEBUG
2253
2254
2255 // Support for statistics gathering for --heap-stats and --log-gc.
2256 void NewSpace::ClearHistograms() {
2257 for (int i = 0; i <= LAST_TYPE; i++) {
2258 allocated_histogram_[i].clear();
2259 promoted_histogram_[i].clear();
2260 }
2261 }
2262
2263
2264 // Because the copying collector does not touch garbage objects, we iterate
2265 // the new space before a collection to get a histogram of allocated objects.
2266 // This only happens when --log-gc flag is set.
2267 void NewSpace::CollectStatistics() {
2268 ClearHistograms();
2269 SemiSpaceIterator it(this);
2270 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
2271 RecordAllocation(obj);
2272 }
2273
2274
2275 static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
2276 const char* description) {
2277 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
2278 // Lump all the string types together.
2279 int string_number = 0;
2280 int string_bytes = 0;
2281 #define INCREMENT(type, size, name, camel_name) \
2282 string_number += info[type].number(); \
2283 string_bytes += info[type].bytes();
2284 STRING_TYPE_LIST(INCREMENT)
2285 #undef INCREMENT
2286 if (string_number > 0) {
2287 LOG(isolate,
2288 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
2289 }
2290
2291 // Then do the other types.
2292 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
2293 if (info[i].number() > 0) {
2294 LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
2295 info[i].bytes()));
2296 }
2297 }
2298 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
2299 }
2300
2301
2302 void NewSpace::ReportStatistics() {
2303 #ifdef DEBUG
2304 if (FLAG_heap_stats) {
2305 float pct = static_cast<float>(Available()) / TotalCapacity();
2306 PrintF(" capacity: %" V8PRIdPTR ", available: %" V8PRIdPTR ", %%%d\n",
2307 TotalCapacity(), Available(), static_cast<int>(pct * 100));
2308 PrintF("\n Object Histogram:\n");
2309 for (int i = 0; i <= LAST_TYPE; i++) {
2310 if (allocated_histogram_[i].number() > 0) {
2311 PrintF(" %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
2312 allocated_histogram_[i].number(),
2313 allocated_histogram_[i].bytes());
2314 }
2315 }
2316 PrintF("\n");
2317 }
2318 #endif // DEBUG
2319
2320 if (FLAG_log_gc) {
2321 Isolate* isolate = heap()->isolate();
2322 DoReportStatistics(isolate, allocated_histogram_, "allocated");
2323 DoReportStatistics(isolate, promoted_histogram_, "promoted");
2324 }
2325 }
2326
2327
2328 void NewSpace::RecordAllocation(HeapObject* obj) {
2329 InstanceType type = obj->map()->instance_type();
2330 DCHECK(0 <= type && type <= LAST_TYPE);
2331 allocated_histogram_[type].increment_number(1);
2332 allocated_histogram_[type].increment_bytes(obj->Size());
2333 }
2334
2335
2336 void NewSpace::RecordPromotion(HeapObject* obj) {
2337 InstanceType type = obj->map()->instance_type();
2338 DCHECK(0 <= type && type <= LAST_TYPE);
2339 promoted_histogram_[type].increment_number(1);
2340 promoted_histogram_[type].increment_bytes(obj->Size());
2341 }
2342
2343
2344 size_t NewSpace::CommittedPhysicalMemory() {
2345 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2346 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2347 size_t size = to_space_.CommittedPhysicalMemory();
2348 if (from_space_.is_committed()) {
2349 size += from_space_.CommittedPhysicalMemory();
2350 }
2351 return size;
2352 }
2353
2354
2355 // -----------------------------------------------------------------------------
2356 // Free lists for old object spaces implementation
2357
2358
2359 void FreeListCategory::Reset() {
2360 set_top(nullptr);
2361 set_prev(nullptr);
2362 set_next(nullptr);
2363 available_ = 0;
2364 }
2365
2366 FreeSpace* FreeListCategory::PickNodeFromList(size_t* node_size) {
2367 DCHECK(page()->CanAllocate());
2368
2369 FreeSpace* node = top();
2370 if (node == nullptr) return nullptr;
2371 set_top(node->next());
2372 *node_size = node->Size();
2373 available_ -= *node_size;
2374 return node;
2375 }
2376
2377 FreeSpace* FreeListCategory::TryPickNodeFromList(size_t minimum_size,
2378 size_t* node_size) {
2379 DCHECK(page()->CanAllocate());
2380
2381 FreeSpace* node = PickNodeFromList(node_size);
2382 if ((node != nullptr) && (*node_size < minimum_size)) {
2383 Free(node, *node_size, kLinkCategory);
2384 *node_size = 0;
2385 return nullptr;
2386 }
2387 return node;
2388 }
2389
2390 FreeSpace* FreeListCategory::SearchForNodeInList(size_t minimum_size,
2391 size_t* node_size) {
2392 DCHECK(page()->CanAllocate());
2393
2394 FreeSpace* prev_non_evac_node = nullptr;
2395 for (FreeSpace* cur_node = top(); cur_node != nullptr;
2396 cur_node = cur_node->next()) {
2397 size_t size = cur_node->size();
2398 if (size >= minimum_size) {
2399 DCHECK_GE(available_, size);
2400 available_ -= size;
2401 if (cur_node == top()) {
2402 set_top(cur_node->next());
2403 }
2404 if (prev_non_evac_node != nullptr) {
2405 prev_non_evac_node->set_next(cur_node->next());
2406 }
2407 *node_size = size;
2408 return cur_node;
2409 }
2410
2411 prev_non_evac_node = cur_node;
2412 }
2413 return nullptr;
2414 }
2415
2416 bool FreeListCategory::Free(FreeSpace* free_space, size_t size_in_bytes,
2417 FreeMode mode) {
2418 if (!page()->CanAllocate()) return false;
2419
2420 free_space->set_next(top());
2421 set_top(free_space);
2422 available_ += size_in_bytes;
2423 if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
2424 owner()->AddCategory(this);
2425 }
2426 return true;
2427 }
2428
2429
2430 void FreeListCategory::RepairFreeList(Heap* heap) {
2431 FreeSpace* n = top();
2432 while (n != NULL) {
2433 Map** map_location = reinterpret_cast<Map**>(n->address());
2434 if (*map_location == NULL) {
2435 *map_location = heap->free_space_map();
2436 } else {
2437 DCHECK(*map_location == heap->free_space_map());
2438 }
2439 n = n->next();
2440 }
2441 }
2442
2443 void FreeListCategory::Relink() {
2444 DCHECK(!is_linked());
2445 owner()->AddCategory(this);
2446 }
2447
2448 void FreeListCategory::Invalidate() {
2449 page()->remove_available_in_free_list(available());
2450 Reset();
2451 type_ = kInvalidCategory;
2452 }
2453
2454 FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) {
2455 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2456 categories_[i] = nullptr;
2457 }
2458 Reset();
2459 }
2460
2461
2462 void FreeList::Reset() {
2463 ForAllFreeListCategories(
2464 [](FreeListCategory* category) { category->Reset(); });
2465 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2466 categories_[i] = nullptr;
2467 }
2468 ResetStats();
2469 }
2470
2471 size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
2472 if (size_in_bytes == 0) return 0;
2473
2474 owner()->heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
2475 ClearRecordedSlots::kNo);
2476
2477 Page* page = Page::FromAddress(start);
2478
2479 // Blocks have to be a minimum size to hold free list items.
2480 if (size_in_bytes < kMinBlockSize) {
2481 page->add_wasted_memory(size_in_bytes);
2482 wasted_bytes_.Increment(size_in_bytes);
2483 return size_in_bytes;
2484 }
2485
2486 FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
2487 // Insert other blocks at the head of a free list of the appropriate
2488 // magnitude.
2489 FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
2490 if (page->free_list_category(type)->Free(free_space, size_in_bytes, mode)) {
2491 page->add_available_in_free_list(size_in_bytes);
2492 }
2493 DCHECK_EQ(page->AvailableInFreeList(), page->available_in_free_list());
2494 return 0;
2495 }
2496
2497 FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, size_t* node_size) {
2498 FreeListCategoryIterator it(this, type);
2499 FreeSpace* node = nullptr;
2500 while (it.HasNext()) {
2501 FreeListCategory* current = it.Next();
2502 node = current->PickNodeFromList(node_size);
2503 if (node != nullptr) {
2504 Page::FromAddress(node->address())
2505 ->remove_available_in_free_list(*node_size);
2506 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2507 return node;
2508 }
2509 RemoveCategory(current);
2510 }
2511 return node;
2512 }
2513
2514 FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, size_t* node_size,
2515 size_t minimum_size) {
2516 if (categories_[type] == nullptr) return nullptr;
2517 FreeSpace* node =
2518 categories_[type]->TryPickNodeFromList(minimum_size, node_size);
2519 if (node != nullptr) {
2520 Page::FromAddress(node->address())
2521 ->remove_available_in_free_list(*node_size);
2522 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2523 }
2524 return node;
2525 }
2526
2527 FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
2528 size_t* node_size,
2529 size_t minimum_size) {
2530 FreeListCategoryIterator it(this, type);
2531 FreeSpace* node = nullptr;
2532 while (it.HasNext()) {
2533 FreeListCategory* current = it.Next();
2534 node = current->SearchForNodeInList(minimum_size, node_size);
2535 if (node != nullptr) {
2536 Page::FromAddress(node->address())
2537 ->remove_available_in_free_list(*node_size);
2538 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2539 return node;
2540 }
2541 if (current->is_empty()) {
2542 RemoveCategory(current);
2543 }
2544 }
2545 return node;
2546 }
2547
2548 FreeSpace* FreeList::FindNodeFor(size_t size_in_bytes, size_t* node_size) {
2549 FreeSpace* node = nullptr;
2550
2551 // First try the allocation fast path: try to allocate the minimum element
2552 // size of a free list category. This operation is constant time.
2553 FreeListCategoryType type =
2554 SelectFastAllocationFreeListCategoryType(size_in_bytes);
2555 for (int i = type; i < kHuge; i++) {
2556 node = FindNodeIn(static_cast<FreeListCategoryType>(i), node_size);
2557 if (node != nullptr) return node;
2558 }
2559
2560 // Next search the huge list for free list nodes. This takes linear time in
2561 // the number of huge elements.
2562 node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
2563 if (node != nullptr) {
2564 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2565 return node;
2566 }
2567
2568 // We need a huge block of memory, but we didn't find anything in the huge
2569 // list.
2570 if (type == kHuge) return nullptr;
2571
2572 // Now search the best fitting free list for a node that has at least the
2573 // requested size.
2574 type = SelectFreeListCategoryType(size_in_bytes);
2575 node = TryFindNodeIn(type, node_size, size_in_bytes);
2576
2577 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2578 return node;
2579 }
2580
2581 // Allocation on the old space free list. If it succeeds then a new linear
2582 // allocation space has been set up with the top and limit of the space. If
2583 // the allocation fails then NULL is returned, and the caller can perform a GC
2584 // or allocate a new page before retrying.
2585 HeapObject* FreeList::Allocate(size_t size_in_bytes) {
2586 DCHECK(size_in_bytes <= kMaxBlockSize);
2587 DCHECK(IsAligned(size_in_bytes, kPointerSize));
2588 DCHECK_LE(owner_->top(), owner_->limit());
2589 #ifdef DEBUG
2590 if (owner_->top() != owner_->limit()) {
2591 DCHECK_EQ(Page::FromAddress(owner_->top()),
2592 Page::FromAddress(owner_->limit() - 1));
2593 }
2594 #endif
2595 // Don't free list allocate if there is linear space available.
2596 DCHECK_LT(static_cast<size_t>(owner_->limit() - owner_->top()),
2597 size_in_bytes);
2598
2599 // Mark the old linear allocation area with a free space map so it can be
2600 // skipped when scanning the heap. This also puts it back in the free list
2601 // if it is big enough.
2602 owner_->EmptyAllocationInfo();
2603
2604 owner_->heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
2605 Heap::kNoGCFlags, kNoGCCallbackFlags);
2606
2607 size_t new_node_size = 0;
2608 FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2609 if (new_node == nullptr) return nullptr;
2610
2611 DCHECK_GE(new_node_size, size_in_bytes);
2612 size_t bytes_left = new_node_size - size_in_bytes;
2613
2614 #ifdef DEBUG
2615 for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) {
2616 reinterpret_cast<Object**>(new_node->address())[i] =
2617 Smi::FromInt(kCodeZapValue);
2618 }
2619 #endif
2620
2621 // The old-space-step might have finished sweeping and restarted marking.
2622 // Verify that it did not turn the page of the new node into an evacuation
2623 // candidate.
2624 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2625
2626 const size_t kThreshold = IncrementalMarking::kAllocatedThreshold;
2627
2628 // Memory in the linear allocation area is counted as allocated. We may free
2629 // a little of this again immediately - see below.
2630 owner_->AccountAllocatedBytes(new_node_size);
2631
2632 if (owner_->heap()->inline_allocation_disabled()) {
2633 // Keep the linear allocation area empty if requested to do so, just
2634 // return area back to the free list instead.
2635 owner_->Free(new_node->address() + size_in_bytes, bytes_left);
2636 owner_->SetAllocationInfo(new_node->address() + size_in_bytes,
2637 new_node->address() + size_in_bytes);
2638 } else if (bytes_left > kThreshold &&
2639 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2640 FLAG_incremental_marking) {
2641 size_t linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2642 // We don't want to give too large linear areas to the allocator while
2643 // incremental marking is going on, because we won't check again whether
2644 // we want to do another increment until the linear area is used up.
2645 DCHECK_GE(new_node_size, size_in_bytes + linear_size);
2646 owner_->Free(new_node->address() + size_in_bytes + linear_size,
2647 new_node_size - size_in_bytes - linear_size);
2648 owner_->SetAllocationInfo(
2649 new_node->address() + size_in_bytes,
2650 new_node->address() + size_in_bytes + linear_size);
2651 } else {
2652 // Normally we give the rest of the node to the allocator as its new
2653 // linear allocation area.
2654 owner_->SetAllocationInfo(new_node->address() + size_in_bytes,
2655 new_node->address() + new_node_size);
2656 }
2657
2658 return new_node;
2659 }
2660
2661 size_t FreeList::EvictFreeListItems(Page* page) {
2662 size_t sum = 0;
2663 page->ForAllFreeListCategories(
2664 [this, &sum](FreeListCategory* category) {
2665 DCHECK_EQ(this, category->owner());
2666 sum += category->available();
2667 RemoveCategory(category);
2668 category->Invalidate();
2669 });
2670 return sum;
2671 }
2672
2673 bool FreeList::ContainsPageFreeListItems(Page* page) {
2674 bool contained = false;
2675 page->ForAllFreeListCategories(
2676 [this, &contained](FreeListCategory* category) {
2677 if (category->owner() == this && category->is_linked()) {
2678 contained = true;
2679 }
2680 });
2681 return contained;
2682 }
2683
2684 void FreeList::RepairLists(Heap* heap) {
2685 ForAllFreeListCategories(
2686 [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
2687 }
2688
2689 bool FreeList::AddCategory(FreeListCategory* category) {
2690 FreeListCategoryType type = category->type_;
2691 FreeListCategory* top = categories_[type];
2692
2693 if (category->is_empty()) return false;
2694 if (top == category) return false;
2695
2696 // Common double-linked list insertion.
2697 if (top != nullptr) {
2698 top->set_prev(category);
2699 }
2700 category->set_next(top);
2701 categories_[type] = category;
2702 return true;
2703 }
2704
2705 void FreeList::RemoveCategory(FreeListCategory* category) {
2706 FreeListCategoryType type = category->type_;
2707 FreeListCategory* top = categories_[type];
2708
2709 // Common double-linked list removal.
2710 if (top == category) {
2711 categories_[type] = category->next();
2712 }
2713 if (category->prev() != nullptr) {
2714 category->prev()->set_next(category->next());
2715 }
2716 if (category->next() != nullptr) {
2717 category->next()->set_prev(category->prev());
2718 }
2719 category->set_next(nullptr);
2720 category->set_prev(nullptr);
2721 }
2722
2723 void FreeList::PrintCategories(FreeListCategoryType type) {
2724 FreeListCategoryIterator it(this, type);
2725 PrintF("FreeList[%p, top=%p, %d] ", static_cast<void*>(this),
2726 static_cast<void*>(categories_[type]), type);
2727 while (it.HasNext()) {
2728 FreeListCategory* current = it.Next();
2729 PrintF("%p -> ", static_cast<void*>(current));
2730 }
2731 PrintF("null\n");
2732 }
2733
2734
2735 #ifdef DEBUG
2736 size_t FreeListCategory::SumFreeList() {
2737 size_t sum = 0;
2738 FreeSpace* cur = top();
2739 while (cur != NULL) {
2740 DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
2741 sum += cur->nobarrier_size();
2742 cur = cur->next();
2743 }
2744 return sum;
2745 }
2746
2747 int FreeListCategory::FreeListLength() {
2748 int length = 0;
2749 FreeSpace* cur = top();
2750 while (cur != NULL) {
2751 length++;
2752 cur = cur->next();
2753 if (length == kVeryLongFreeList) return length;
2754 }
2755 return length;
2756 }
2757
2758 bool FreeList::IsVeryLong() {
2759 int len = 0;
2760 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2761 FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
2762 while (it.HasNext()) {
2763 len += it.Next()->FreeListLength();
2764 if (len >= FreeListCategory::kVeryLongFreeList) return true;
2765 }
2766 }
2767 return false;
2768 }
2769
2770
2771 // This can take a very long time because it is linear in the number of entries
2772 // on the free list, so it should not be called if FreeListLength returns
2773 // kVeryLongFreeList.
2774 size_t FreeList::SumFreeLists() {
2775 size_t sum = 0;
2776 ForAllFreeListCategories(
2777 [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
2778 return sum;
2779 }
2780 #endif
2781
2782
2783 // -----------------------------------------------------------------------------
2784 // OldSpace implementation
2785
2786 void PagedSpace::PrepareForMarkCompact() {
2787 // We don't have a linear allocation area while sweeping. It will be restored
2788 // on the first allocation after the sweep.
2789 EmptyAllocationInfo();
2790
2791 // Clear the free list before a full GC---it will be rebuilt afterward.
2792 free_list_.Reset();
2793 }
2794
2795 size_t PagedSpace::SizeOfObjects() {
2796 CHECK_GE(limit(), top());
2797 DCHECK_GE(Size(), static_cast<size_t>(limit() - top()));
2798 return Size() - (limit() - top());
2799 }
2800
2801
2802 // After we have booted, we have created a map which represents free space
2803 // on the heap. If there was already a free list then the elements on it
2804 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
2805 // fix them.
2806 void PagedSpace::RepairFreeListsAfterDeserialization() {
2807 free_list_.RepairLists(heap());
2808 // Each page may have a small free space that is not tracked by a free list.
2809 // Update the maps for those free space objects.
2810 for (Page* page : *this) {
2811 size_t size = page->wasted_memory();
2812 if (size == 0) continue;
2813 DCHECK_GE(static_cast<size_t>(Page::kPageSize), size);
2814 Address address = page->OffsetToAddress(Page::kPageSize - size);
2815 heap()->CreateFillerObjectAt(address, static_cast<int>(size),
2816 ClearRecordedSlots::kNo);
2817 }
2818 }
2819
2820 HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
2821 MarkCompactCollector* collector = heap()->mark_compact_collector();
2822 if (collector->sweeping_in_progress()) {
2823 // Wait for the sweeper threads here and complete the sweeping phase.
2824 collector->EnsureSweepingCompleted();
2825
2826 // After waiting for the sweeper threads, there may be new free-list
2827 // entries.
2828 return free_list_.Allocate(size_in_bytes);
2829 }
2830 return nullptr;
2831 }
2832
2833 HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
2834 MarkCompactCollector* collector = heap()->mark_compact_collector();
2835 if (collector->sweeping_in_progress()) {
2836 collector->SweepAndRefill(this);
2837 return free_list_.Allocate(size_in_bytes);
2838 }
2839 return nullptr;
2840 }
2841
2842 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2843 DCHECK_GE(size_in_bytes, 0);
2844 const int kMaxPagesToSweep = 1;
2845
2846 // Allocation in this space has failed.
2847
2848 MarkCompactCollector* collector = heap()->mark_compact_collector();
2849 // Sweeping is still in progress.
2850 if (collector->sweeping_in_progress()) {
2851 // First try to refill the free-list, concurrent sweeper threads
2852 // may have freed some objects in the meantime.
2853 RefillFreeList();
2854
2855 // Retry the free list allocation.
2856 HeapObject* object =
2857 free_list_.Allocate(static_cast<size_t>(size_in_bytes));
2858 if (object != NULL) return object;
2859
2860 // If sweeping is still in progress try to sweep pages on the main thread.
2861 int max_freed = collector->sweeper().ParallelSweepSpace(
2862 identity(), size_in_bytes, kMaxPagesToSweep);
2863 RefillFreeList();
2864 if (max_freed >= size_in_bytes) {
2865 object = free_list_.Allocate(static_cast<size_t>(size_in_bytes));
2866 if (object != nullptr) return object;
2867 }
2868 }
2869
2870 if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
2871 DCHECK((CountTotalPages() > 1) ||
2872 (static_cast<size_t>(size_in_bytes) <= free_list_.Available()));
2873 return free_list_.Allocate(static_cast<size_t>(size_in_bytes));
2874 }
2875
2876 // If sweeper threads are active, wait for them at that point and steal
2877 // elements form their free-lists. Allocation may still fail their which
2878 // would indicate that there is not enough memory for the given allocation.
2879 return SweepAndRetryAllocation(size_in_bytes);
2880 }
2881
2882 #ifdef DEBUG
2883 void PagedSpace::ReportStatistics() {
2884 int pct = static_cast<int>(Available() * 100 / Capacity());
2885 PrintF(" capacity: %" V8PRIdPTR ", waste: %" V8PRIdPTR
2886 ", available: %" V8PRIdPTR ", %%%d\n",
2887 Capacity(), Waste(), Available(), pct);
2888
2889 heap()->mark_compact_collector()->EnsureSweepingCompleted();
2890 ClearHistograms(heap()->isolate());
2891 HeapObjectIterator obj_it(this);
2892 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2893 CollectHistogramInfo(obj);
2894 ReportHistogram(heap()->isolate(), true);
2895 }
2896 #endif
2897
2898
2899 // -----------------------------------------------------------------------------
2900 // MapSpace implementation
2901
2902 #ifdef VERIFY_HEAP
2903 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
2904 #endif
2905
2906 Address LargePage::GetAddressToShrink() {
2907 HeapObject* object = GetObject();
2908 if (executable() == EXECUTABLE) {
2909 return 0;
2910 }
2911 size_t used_size = RoundUp((object->address() - address()) + object->Size(),
2912 MemoryAllocator::GetCommitPageSize());
2913 if (used_size < CommittedPhysicalMemory()) {
2914 return address() + used_size;
2915 }
2916 return 0;
2917 }
2918
2919 void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
2920 RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
2921 SlotSet::FREE_EMPTY_BUCKETS);
2922 RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
2923 SlotSet::FREE_EMPTY_BUCKETS);
2924 RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end());
2925 RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end());
2926 }
2927
2928 // -----------------------------------------------------------------------------
2929 // LargeObjectIterator
2930
2931 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2932 current_ = space->first_page_;
2933 }
2934
2935
2936 HeapObject* LargeObjectIterator::Next() {
2937 if (current_ == NULL) return NULL;
2938
2939 HeapObject* object = current_->GetObject();
2940 current_ = current_->next_page();
2941 return object;
2942 }
2943
2944
2945 // -----------------------------------------------------------------------------
2946 // LargeObjectSpace
2947
2948 LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
2949 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2950 first_page_(NULL),
2951 size_(0),
2952 page_count_(0),
2953 objects_size_(0),
2954 chunk_map_(1024) {}
2955
2956 LargeObjectSpace::~LargeObjectSpace() {}
2957
2958
2959 bool LargeObjectSpace::SetUp() {
2960 first_page_ = NULL;
2961 size_ = 0;
2962 page_count_ = 0;
2963 objects_size_ = 0;
2964 chunk_map_.Clear();
2965 return true;
2966 }
2967
2968
2969 void LargeObjectSpace::TearDown() {
2970 while (first_page_ != NULL) {
2971 LargePage* page = first_page_;
2972 first_page_ = first_page_->next_page();
2973 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2974 heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
2975 }
2976 SetUp();
2977 }
2978
2979
2980 AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
2981 Executability executable) {
2982 // Check if we want to force a GC before growing the old space further.
2983 // If so, fail the allocation.
2984 if (!heap()->CanExpandOldGeneration(object_size) ||
2985 !heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
2986 return AllocationResult::Retry(identity());
2987 }
2988
2989 LargePage* page = heap()->memory_allocator()->AllocateLargePage(
2990 object_size, this, executable);
2991 if (page == NULL) return AllocationResult::Retry(identity());
2992 DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
2993
2994 size_ += static_cast<int>(page->size());
2995 AccountCommitted(page->size());
2996 objects_size_ += object_size;
2997 page_count_++;
2998 page->set_next_page(first_page_);
2999 first_page_ = page;
3000
3001 InsertChunkMapEntries(page);
3002
3003 HeapObject* object = page->GetObject();
3004
3005 if (Heap::ShouldZapGarbage()) {
3006 // Make the object consistent so the heap can be verified in OldSpaceStep.
3007 // We only need to do this in debug builds or if verify_heap is on.
3008 reinterpret_cast<Object**>(object->address())[0] =
3009 heap()->fixed_array_map();
3010 reinterpret_cast<Object**>(object->address())[1] = Smi::kZero;
3011 }
3012
3013 heap()->StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
3014 kNoGCCallbackFlags);
3015 AllocationStep(object->address(), object_size);
3016
3017 if (heap()->incremental_marking()->black_allocation()) {
3018 // We cannot use ObjectMarking here as the object still lacks a size.
3019 Marking::WhiteToBlack(ObjectMarking::MarkBitFrom(object));
3020 MemoryChunk::IncrementLiveBytes(object, object_size);
3021 }
3022 return object;
3023 }
3024
3025
3026 size_t LargeObjectSpace::CommittedPhysicalMemory() {
3027 // On a platform that provides lazy committing of memory, we over-account
3028 // the actually committed memory. There is no easy way right now to support
3029 // precise accounting of committed memory in large object space.
3030 return CommittedMemory();
3031 }
3032
3033
3034 // GC support
3035 Object* LargeObjectSpace::FindObject(Address a) {
3036 LargePage* page = FindPage(a);
3037 if (page != NULL) {
3038 return page->GetObject();
3039 }
3040 return Smi::kZero; // Signaling not found.
3041 }
3042
3043 LargePage* LargeObjectSpace::FindPageThreadSafe(Address a) {
3044 base::LockGuard<base::Mutex> guard(&chunk_map_mutex_);
3045 return FindPage(a);
3046 }
3047
3048 LargePage* LargeObjectSpace::FindPage(Address a) {
3049 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
3050 base::HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
3051 static_cast<uint32_t>(key));
3052 if (e != NULL) {
3053 DCHECK(e->value != NULL);
3054 LargePage* page = reinterpret_cast<LargePage*>(e->value);
3055 DCHECK(LargePage::IsValid(page));
3056 if (page->Contains(a)) {
3057 return page;
3058 }
3059 }
3060 return NULL;
3061 }
3062
3063
3064 void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
3065 LargePage* current = first_page_;
3066 while (current != NULL) {
3067 HeapObject* object = current->GetObject();
3068 DCHECK(ObjectMarking::IsBlack(object));
3069 ObjectMarking::ClearMarkBit(object);
3070 Page::FromAddress(object->address())->ResetProgressBar();
3071 Page::FromAddress(object->address())->ResetLiveBytes();
3072 current = current->next_page();
3073 }
3074 }
3075
3076 void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
3077 // Register all MemoryChunk::kAlignment-aligned chunks covered by
3078 // this large page in the chunk map.
3079 uintptr_t start = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
3080 uintptr_t limit = (reinterpret_cast<uintptr_t>(page) + (page->size() - 1)) /
3081 MemoryChunk::kAlignment;
3082 // There may be concurrent access on the chunk map. We have to take the lock
3083 // here.
3084 base::LockGuard<base::Mutex> guard(&chunk_map_mutex_);
3085 for (uintptr_t key = start; key <= limit; key++) {
3086 base::HashMap::Entry* entry = chunk_map_.InsertNew(
3087 reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
3088 DCHECK(entry != NULL);
3089 entry->value = page;
3090 }
3091 }
3092
3093 void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
3094 RemoveChunkMapEntries(page, page->address());
3095 }
3096
3097 void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page,
3098 Address free_start) {
3099 uintptr_t start = RoundUp(reinterpret_cast<uintptr_t>(free_start),
3100 MemoryChunk::kAlignment) /
3101 MemoryChunk::kAlignment;
3102 uintptr_t limit = (reinterpret_cast<uintptr_t>(page) + (page->size() - 1)) /
3103 MemoryChunk::kAlignment;
3104 for (uintptr_t key = start; key <= limit; key++) {
3105 chunk_map_.Remove(reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
3106 }
3107 }
3108
3109 void LargeObjectSpace::FreeUnmarkedObjects() {
3110 LargePage* previous = NULL;
3111 LargePage* current = first_page_;
3112 while (current != NULL) {
3113 HeapObject* object = current->GetObject();
3114 DCHECK(!ObjectMarking::IsGrey(object));
3115 if (ObjectMarking::IsBlack(object)) {
3116 Address free_start;
3117 if ((free_start = current->GetAddressToShrink()) != 0) {
3118 // TODO(hpayer): Perform partial free concurrently.
3119 current->ClearOutOfLiveRangeSlots(free_start);
3120 RemoveChunkMapEntries(current, free_start);
3121 heap()->memory_allocator()->PartialFreeMemory(current, free_start);
3122 }
3123 previous = current;
3124 current = current->next_page();
3125 } else {
3126 LargePage* page = current;
3127 // Cut the chunk out from the chunk list.
3128 current = current->next_page();
3129 if (previous == NULL) {
3130 first_page_ = current;
3131 } else {
3132 previous->set_next_page(current);
3133 }
3134
3135 // Free the chunk.
3136 size_ -= static_cast<int>(page->size());
3137 AccountUncommitted(page->size());
3138 objects_size_ -= object->Size();
3139 page_count_--;
3140
3141 RemoveChunkMapEntries(page);
3142 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
3143 }
3144 }
3145 }
3146
3147
3148 bool LargeObjectSpace::Contains(HeapObject* object) {
3149 Address address = object->address();
3150 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3151
3152 bool owned = (chunk->owner() == this);
3153
3154 SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
3155
3156 return owned;
3157 }
3158
3159 std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
3160 return std::unique_ptr<ObjectIterator>(new LargeObjectIterator(this));
3161 }
3162
3163 #ifdef VERIFY_HEAP
3164 // We do not assume that the large object iterator works, because it depends
3165 // on the invariants we are checking during verification.
3166 void LargeObjectSpace::Verify() {
3167 for (LargePage* chunk = first_page_; chunk != NULL;
3168 chunk = chunk->next_page()) {
3169 // Each chunk contains an object that starts at the large object page's
3170 // object area start.
3171 HeapObject* object = chunk->GetObject();
3172 Page* page = Page::FromAddress(object->address());
3173 CHECK(object->address() == page->area_start());
3174
3175 // The first word should be a map, and we expect all map pointers to be
3176 // in map space.
3177 Map* map = object->map();
3178 CHECK(map->IsMap());
3179 CHECK(heap()->map_space()->Contains(map));
3180
3181 // We have only code, sequential strings, external strings
3182 // (sequential strings that have been morphed into external
3183 // strings), thin strings (sequential strings that have been
3184 // morphed into thin strings), fixed arrays, byte arrays, and
3185 // constant pool arrays in the large object space.
3186 CHECK(object->IsAbstractCode() || object->IsSeqString() ||
3187 object->IsExternalString() || object->IsThinString() ||
3188 object->IsFixedArray() || object->IsFixedDoubleArray() ||
3189 object->IsByteArray());
3190
3191 // The object itself should look OK.
3192 object->ObjectVerify();
3193
3194 // Byte arrays and strings don't have interior pointers.
3195 if (object->IsAbstractCode()) {
3196 VerifyPointersVisitor code_visitor;
3197 object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
3198 } else if (object->IsFixedArray()) {
3199 FixedArray* array = FixedArray::cast(object);
3200 for (int j = 0; j < array->length(); j++) {
3201 Object* element = array->get(j);
3202 if (element->IsHeapObject()) {
3203 HeapObject* element_object = HeapObject::cast(element);
3204 CHECK(heap()->Contains(element_object));
3205 CHECK(element_object->map()->IsMap());
3206 }
3207 }
3208 }
3209 }
3210 }
3211 #endif
3212
3213 #ifdef DEBUG
3214 void LargeObjectSpace::Print() {
3215 OFStream os(stdout);
3216 LargeObjectIterator it(this);
3217 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3218 obj->Print(os);
3219 }
3220 }
3221
3222
3223 void LargeObjectSpace::ReportStatistics() {
3224 PrintF(" size: %" V8PRIdPTR "\n", size_);
3225 int num_objects = 0;
3226 ClearHistograms(heap()->isolate());
3227 LargeObjectIterator it(this);
3228 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3229 num_objects++;
3230 CollectHistogramInfo(obj);
3231 }
3232
3233 PrintF(
3234 " number of objects %d, "
3235 "size of objects %" V8PRIdPTR "\n",
3236 num_objects, objects_size_);
3237 if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3238 }
3239
3240
3241 void Page::Print() {
3242 // Make a best-effort to print the objects in the page.
3243 PrintF("Page@%p in %s\n", static_cast<void*>(this->address()),
3244 AllocationSpaceName(this->owner()->identity()));
3245 printf(" --------------------------------------\n");
3246 HeapObjectIterator objects(this);
3247 unsigned mark_size = 0;
3248 for (HeapObject* object = objects.Next(); object != NULL;
3249 object = objects.Next()) {
3250 bool is_marked = ObjectMarking::IsBlackOrGrey(object);
3251 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
3252 if (is_marked) {
3253 mark_size += object->Size();
3254 }
3255 object->ShortPrint();
3256 PrintF("\n");
3257 }
3258 printf(" --------------------------------------\n");
3259 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3260 }
3261
3262 #endif // DEBUG
3263 } // namespace internal
3264 } // namespace v8
3265