1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "liveobjectlist-inl.h"
31 #include "macro-assembler.h"
32 #include "mark-compact.h"
33 #include "platform.h"
34
35 namespace v8 {
36 namespace internal {
37
38
39 // ----------------------------------------------------------------------------
40 // HeapObjectIterator
41
HeapObjectIterator(PagedSpace * space)42 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
43 // You can't actually iterate over the anchor page. It is not a real page,
44 // just an anchor for the double linked page list. Initialize as if we have
45 // reached the end of the anchor page, then the first iteration will move on
46 // to the first page.
47 Initialize(space,
48 NULL,
49 NULL,
50 kAllPagesInSpace,
51 NULL);
52 }
53
54
HeapObjectIterator(PagedSpace * space,HeapObjectCallback size_func)55 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
56 HeapObjectCallback size_func) {
57 // You can't actually iterate over the anchor page. It is not a real page,
58 // just an anchor for the double linked page list. Initialize the current
59 // address and end as NULL, then the first iteration will move on
60 // to the first page.
61 Initialize(space,
62 NULL,
63 NULL,
64 kAllPagesInSpace,
65 size_func);
66 }
67
68
HeapObjectIterator(Page * page,HeapObjectCallback size_func)69 HeapObjectIterator::HeapObjectIterator(Page* page,
70 HeapObjectCallback size_func) {
71 Space* owner = page->owner();
72 ASSERT(owner == HEAP->old_pointer_space() ||
73 owner == HEAP->old_data_space() ||
74 owner == HEAP->map_space() ||
75 owner == HEAP->cell_space() ||
76 owner == HEAP->code_space());
77 Initialize(reinterpret_cast<PagedSpace*>(owner),
78 page->area_start(),
79 page->area_end(),
80 kOnePageOnly,
81 size_func);
82 ASSERT(page->WasSweptPrecisely());
83 }
84
85
Initialize(PagedSpace * space,Address cur,Address end,HeapObjectIterator::PageMode mode,HeapObjectCallback size_f)86 void HeapObjectIterator::Initialize(PagedSpace* space,
87 Address cur, Address end,
88 HeapObjectIterator::PageMode mode,
89 HeapObjectCallback size_f) {
90 // Check that we actually can iterate this space.
91 ASSERT(!space->was_swept_conservatively());
92
93 space_ = space;
94 cur_addr_ = cur;
95 cur_end_ = end;
96 page_mode_ = mode;
97 size_func_ = size_f;
98 }
99
100
101 // We have hit the end of the page and should advance to the next block of
102 // objects. This happens at the end of the page.
AdvanceToNextPage()103 bool HeapObjectIterator::AdvanceToNextPage() {
104 ASSERT(cur_addr_ == cur_end_);
105 if (page_mode_ == kOnePageOnly) return false;
106 Page* cur_page;
107 if (cur_addr_ == NULL) {
108 cur_page = space_->anchor();
109 } else {
110 cur_page = Page::FromAddress(cur_addr_ - 1);
111 ASSERT(cur_addr_ == cur_page->area_end());
112 }
113 cur_page = cur_page->next_page();
114 if (cur_page == space_->anchor()) return false;
115 cur_addr_ = cur_page->area_start();
116 cur_end_ = cur_page->area_end();
117 ASSERT(cur_page->WasSweptPrecisely());
118 return true;
119 }
120
121
122 // -----------------------------------------------------------------------------
123 // CodeRange
124
125
CodeRange(Isolate * isolate)126 CodeRange::CodeRange(Isolate* isolate)
127 : isolate_(isolate),
128 code_range_(NULL),
129 free_list_(0),
130 allocation_list_(0),
131 current_allocation_block_index_(0) {
132 }
133
134
SetUp(const size_t requested)135 bool CodeRange::SetUp(const size_t requested) {
136 ASSERT(code_range_ == NULL);
137
138 code_range_ = new VirtualMemory(requested);
139 CHECK(code_range_ != NULL);
140 if (!code_range_->IsReserved()) {
141 delete code_range_;
142 code_range_ = NULL;
143 return false;
144 }
145
146 // We are sure that we have mapped a block of requested addresses.
147 ASSERT(code_range_->size() == requested);
148 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
149 Address base = reinterpret_cast<Address>(code_range_->address());
150 Address aligned_base =
151 RoundUp(reinterpret_cast<Address>(code_range_->address()),
152 MemoryChunk::kAlignment);
153 size_t size = code_range_->size() - (aligned_base - base);
154 allocation_list_.Add(FreeBlock(aligned_base, size));
155 current_allocation_block_index_ = 0;
156 return true;
157 }
158
159
CompareFreeBlockAddress(const FreeBlock * left,const FreeBlock * right)160 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
161 const FreeBlock* right) {
162 // The entire point of CodeRange is that the difference between two
163 // addresses in the range can be represented as a signed 32-bit int,
164 // so the cast is semantically correct.
165 return static_cast<int>(left->start - right->start);
166 }
167
168
GetNextAllocationBlock(size_t requested)169 void CodeRange::GetNextAllocationBlock(size_t requested) {
170 for (current_allocation_block_index_++;
171 current_allocation_block_index_ < allocation_list_.length();
172 current_allocation_block_index_++) {
173 if (requested <= allocation_list_[current_allocation_block_index_].size) {
174 return; // Found a large enough allocation block.
175 }
176 }
177
178 // Sort and merge the free blocks on the free list and the allocation list.
179 free_list_.AddAll(allocation_list_);
180 allocation_list_.Clear();
181 free_list_.Sort(&CompareFreeBlockAddress);
182 for (int i = 0; i < free_list_.length();) {
183 FreeBlock merged = free_list_[i];
184 i++;
185 // Add adjacent free blocks to the current merged block.
186 while (i < free_list_.length() &&
187 free_list_[i].start == merged.start + merged.size) {
188 merged.size += free_list_[i].size;
189 i++;
190 }
191 if (merged.size > 0) {
192 allocation_list_.Add(merged);
193 }
194 }
195 free_list_.Clear();
196
197 for (current_allocation_block_index_ = 0;
198 current_allocation_block_index_ < allocation_list_.length();
199 current_allocation_block_index_++) {
200 if (requested <= allocation_list_[current_allocation_block_index_].size) {
201 return; // Found a large enough allocation block.
202 }
203 }
204
205 // Code range is full or too fragmented.
206 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
207 }
208
209
210
AllocateRawMemory(const size_t requested,size_t * allocated)211 Address CodeRange::AllocateRawMemory(const size_t requested,
212 size_t* allocated) {
213 ASSERT(current_allocation_block_index_ < allocation_list_.length());
214 if (requested > allocation_list_[current_allocation_block_index_].size) {
215 // Find an allocation block large enough. This function call may
216 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
217 GetNextAllocationBlock(requested);
218 }
219 // Commit the requested memory at the start of the current allocation block.
220 size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment);
221 FreeBlock current = allocation_list_[current_allocation_block_index_];
222 if (aligned_requested >= (current.size - Page::kPageSize)) {
223 // Don't leave a small free block, useless for a large object or chunk.
224 *allocated = current.size;
225 } else {
226 *allocated = aligned_requested;
227 }
228 ASSERT(*allocated <= current.size);
229 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
230 if (!MemoryAllocator::CommitCodePage(code_range_,
231 current.start,
232 *allocated)) {
233 *allocated = 0;
234 return NULL;
235 }
236 allocation_list_[current_allocation_block_index_].start += *allocated;
237 allocation_list_[current_allocation_block_index_].size -= *allocated;
238 if (*allocated == current.size) {
239 GetNextAllocationBlock(0); // This block is used up, get the next one.
240 }
241 return current.start;
242 }
243
244
FreeRawMemory(Address address,size_t length)245 void CodeRange::FreeRawMemory(Address address, size_t length) {
246 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
247 free_list_.Add(FreeBlock(address, length));
248 code_range_->Uncommit(address, length);
249 }
250
251
TearDown()252 void CodeRange::TearDown() {
253 delete code_range_; // Frees all memory in the virtual memory range.
254 code_range_ = NULL;
255 free_list_.Free();
256 allocation_list_.Free();
257 }
258
259
260 // -----------------------------------------------------------------------------
261 // MemoryAllocator
262 //
263
MemoryAllocator(Isolate * isolate)264 MemoryAllocator::MemoryAllocator(Isolate* isolate)
265 : isolate_(isolate),
266 capacity_(0),
267 capacity_executable_(0),
268 size_(0),
269 size_executable_(0) {
270 }
271
272
SetUp(intptr_t capacity,intptr_t capacity_executable)273 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
274 capacity_ = RoundUp(capacity, Page::kPageSize);
275 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
276 ASSERT_GE(capacity_, capacity_executable_);
277
278 size_ = 0;
279 size_executable_ = 0;
280
281 return true;
282 }
283
284
TearDown()285 void MemoryAllocator::TearDown() {
286 // Check that spaces were torn down before MemoryAllocator.
287 ASSERT(size_ == 0);
288 // TODO(gc) this will be true again when we fix FreeMemory.
289 // ASSERT(size_executable_ == 0);
290 capacity_ = 0;
291 capacity_executable_ = 0;
292 }
293
294
FreeMemory(VirtualMemory * reservation,Executability executable)295 void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
296 Executability executable) {
297 // TODO(gc) make code_range part of memory allocator?
298 ASSERT(reservation->IsReserved());
299 size_t size = reservation->size();
300 ASSERT(size_ >= size);
301 size_ -= size;
302
303 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
304
305 if (executable == EXECUTABLE) {
306 ASSERT(size_executable_ >= size);
307 size_executable_ -= size;
308 }
309 // Code which is part of the code-range does not have its own VirtualMemory.
310 ASSERT(!isolate_->code_range()->contains(
311 static_cast<Address>(reservation->address())));
312 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
313 reservation->Release();
314 }
315
316
FreeMemory(Address base,size_t size,Executability executable)317 void MemoryAllocator::FreeMemory(Address base,
318 size_t size,
319 Executability executable) {
320 // TODO(gc) make code_range part of memory allocator?
321 ASSERT(size_ >= size);
322 size_ -= size;
323
324 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
325
326 if (executable == EXECUTABLE) {
327 ASSERT(size_executable_ >= size);
328 size_executable_ -= size;
329 }
330 if (isolate_->code_range()->contains(static_cast<Address>(base))) {
331 ASSERT(executable == EXECUTABLE);
332 isolate_->code_range()->FreeRawMemory(base, size);
333 } else {
334 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
335 bool result = VirtualMemory::ReleaseRegion(base, size);
336 USE(result);
337 ASSERT(result);
338 }
339 }
340
341
ReserveAlignedMemory(size_t size,size_t alignment,VirtualMemory * controller)342 Address MemoryAllocator::ReserveAlignedMemory(size_t size,
343 size_t alignment,
344 VirtualMemory* controller) {
345 VirtualMemory reservation(size, alignment);
346
347 if (!reservation.IsReserved()) return NULL;
348 size_ += reservation.size();
349 Address base = RoundUp(static_cast<Address>(reservation.address()),
350 alignment);
351 controller->TakeControl(&reservation);
352 return base;
353 }
354
355
AllocateAlignedMemory(size_t size,size_t alignment,Executability executable,VirtualMemory * controller)356 Address MemoryAllocator::AllocateAlignedMemory(size_t size,
357 size_t alignment,
358 Executability executable,
359 VirtualMemory* controller) {
360 VirtualMemory reservation;
361 Address base = ReserveAlignedMemory(size, alignment, &reservation);
362 if (base == NULL) return NULL;
363
364 if (executable == EXECUTABLE) {
365 if (!CommitCodePage(&reservation, base, size)) {
366 base = NULL;
367 }
368 } else {
369 if (!reservation.Commit(base, size, false)) {
370 base = NULL;
371 }
372 }
373
374 if (base == NULL) {
375 // Failed to commit the body. Release the mapping and any partially
376 // commited regions inside it.
377 reservation.Release();
378 return NULL;
379 }
380
381 controller->TakeControl(&reservation);
382 return base;
383 }
384
385
InitializeAsAnchor(PagedSpace * owner)386 void Page::InitializeAsAnchor(PagedSpace* owner) {
387 set_owner(owner);
388 set_prev_page(this);
389 set_next_page(this);
390 }
391
392
Initialize(Heap * heap,Address start,SemiSpace * semi_space)393 NewSpacePage* NewSpacePage::Initialize(Heap* heap,
394 Address start,
395 SemiSpace* semi_space) {
396 Address area_start = start + NewSpacePage::kObjectStartOffset;
397 Address area_end = start + Page::kPageSize;
398
399 MemoryChunk* chunk = MemoryChunk::Initialize(heap,
400 start,
401 Page::kPageSize,
402 area_start,
403 area_end,
404 NOT_EXECUTABLE,
405 semi_space);
406 chunk->set_next_chunk(NULL);
407 chunk->set_prev_chunk(NULL);
408 chunk->initialize_scan_on_scavenge(true);
409 bool in_to_space = (semi_space->id() != kFromSpace);
410 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
411 : MemoryChunk::IN_FROM_SPACE);
412 ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
413 : MemoryChunk::IN_TO_SPACE));
414 NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
415 heap->incremental_marking()->SetNewSpacePageFlags(page);
416 return page;
417 }
418
419
InitializeAsAnchor(SemiSpace * semi_space)420 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
421 set_owner(semi_space);
422 set_next_chunk(this);
423 set_prev_chunk(this);
424 // Flags marks this invalid page as not being in new-space.
425 // All real new-space pages will be in new-space.
426 SetFlags(0, ~0);
427 }
428
429
Initialize(Heap * heap,Address base,size_t size,Address area_start,Address area_end,Executability executable,Space * owner)430 MemoryChunk* MemoryChunk::Initialize(Heap* heap,
431 Address base,
432 size_t size,
433 Address area_start,
434 Address area_end,
435 Executability executable,
436 Space* owner) {
437 MemoryChunk* chunk = FromAddress(base);
438
439 ASSERT(base == chunk->address());
440
441 chunk->heap_ = heap;
442 chunk->size_ = size;
443 chunk->area_start_ = area_start;
444 chunk->area_end_ = area_end;
445 chunk->flags_ = 0;
446 chunk->set_owner(owner);
447 chunk->InitializeReservedMemory();
448 chunk->slots_buffer_ = NULL;
449 chunk->skip_list_ = NULL;
450 chunk->ResetLiveBytes();
451 Bitmap::Clear(chunk);
452 chunk->initialize_scan_on_scavenge(false);
453 chunk->SetFlag(WAS_SWEPT_PRECISELY);
454
455 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
456 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
457
458 if (executable == EXECUTABLE) {
459 chunk->SetFlag(IS_EXECUTABLE);
460 }
461
462 if (owner == heap->old_data_space()) {
463 chunk->SetFlag(CONTAINS_ONLY_DATA);
464 }
465
466 return chunk;
467 }
468
469
InsertAfter(MemoryChunk * other)470 void MemoryChunk::InsertAfter(MemoryChunk* other) {
471 next_chunk_ = other->next_chunk_;
472 prev_chunk_ = other;
473 other->next_chunk_->prev_chunk_ = this;
474 other->next_chunk_ = this;
475 }
476
477
Unlink()478 void MemoryChunk::Unlink() {
479 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
480 heap_->decrement_scan_on_scavenge_pages();
481 ClearFlag(SCAN_ON_SCAVENGE);
482 }
483 next_chunk_->prev_chunk_ = prev_chunk_;
484 prev_chunk_->next_chunk_ = next_chunk_;
485 prev_chunk_ = NULL;
486 next_chunk_ = NULL;
487 }
488
489
AllocateChunk(intptr_t body_size,Executability executable,Space * owner)490 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
491 Executability executable,
492 Space* owner) {
493 size_t chunk_size;
494 Heap* heap = isolate_->heap();
495 Address base = NULL;
496 VirtualMemory reservation;
497 Address area_start = NULL;
498 Address area_end = NULL;
499 if (executable == EXECUTABLE) {
500 chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
501 OS::CommitPageSize()) + CodePageGuardSize();
502
503 // Check executable memory limit.
504 if (size_executable_ + chunk_size > capacity_executable_) {
505 LOG(isolate_,
506 StringEvent("MemoryAllocator::AllocateRawMemory",
507 "V8 Executable Allocation capacity exceeded"));
508 return NULL;
509 }
510
511 // Allocate executable memory either from code range or from the
512 // OS.
513 if (isolate_->code_range()->exists()) {
514 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
515 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
516 MemoryChunk::kAlignment));
517 if (base == NULL) return NULL;
518 size_ += chunk_size;
519 // Update executable memory size.
520 size_executable_ += chunk_size;
521 } else {
522 base = AllocateAlignedMemory(chunk_size,
523 MemoryChunk::kAlignment,
524 executable,
525 &reservation);
526 if (base == NULL) return NULL;
527 // Update executable memory size.
528 size_executable_ += reservation.size();
529 }
530
531 #ifdef DEBUG
532 ZapBlock(base, CodePageGuardStartOffset());
533 ZapBlock(base + CodePageAreaStartOffset(), body_size);
534 #endif
535 area_start = base + CodePageAreaStartOffset();
536 area_end = area_start + body_size;
537 } else {
538 chunk_size = MemoryChunk::kObjectStartOffset + body_size;
539 base = AllocateAlignedMemory(chunk_size,
540 MemoryChunk::kAlignment,
541 executable,
542 &reservation);
543
544 if (base == NULL) return NULL;
545
546 #ifdef DEBUG
547 ZapBlock(base, chunk_size);
548 #endif
549
550 area_start = base + Page::kObjectStartOffset;
551 area_end = base + chunk_size;
552 }
553
554 isolate_->counters()->memory_allocated()->
555 Increment(static_cast<int>(chunk_size));
556
557 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
558 if (owner != NULL) {
559 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
560 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
561 }
562
563 MemoryChunk* result = MemoryChunk::Initialize(heap,
564 base,
565 chunk_size,
566 area_start,
567 area_end,
568 executable,
569 owner);
570 result->set_reserved_memory(&reservation);
571 return result;
572 }
573
574
AllocatePage(PagedSpace * owner,Executability executable)575 Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
576 Executability executable) {
577 MemoryChunk* chunk = AllocateChunk(owner->AreaSize(),
578 executable,
579 owner);
580
581 if (chunk == NULL) return NULL;
582
583 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
584 }
585
586
AllocateLargePage(intptr_t object_size,Executability executable,Space * owner)587 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
588 Executability executable,
589 Space* owner) {
590 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
591 if (chunk == NULL) return NULL;
592 return LargePage::Initialize(isolate_->heap(), chunk);
593 }
594
595
Free(MemoryChunk * chunk)596 void MemoryAllocator::Free(MemoryChunk* chunk) {
597 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
598 if (chunk->owner() != NULL) {
599 ObjectSpace space =
600 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
601 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
602 }
603
604 isolate_->heap()->RememberUnmappedPage(
605 reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
606
607 delete chunk->slots_buffer();
608 delete chunk->skip_list();
609
610 VirtualMemory* reservation = chunk->reserved_memory();
611 if (reservation->IsReserved()) {
612 FreeMemory(reservation, chunk->executable());
613 } else {
614 FreeMemory(chunk->address(),
615 chunk->size(),
616 chunk->executable());
617 }
618 }
619
620
CommitBlock(Address start,size_t size,Executability executable)621 bool MemoryAllocator::CommitBlock(Address start,
622 size_t size,
623 Executability executable) {
624 if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
625 #ifdef DEBUG
626 ZapBlock(start, size);
627 #endif
628 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
629 return true;
630 }
631
632
UncommitBlock(Address start,size_t size)633 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
634 if (!VirtualMemory::UncommitRegion(start, size)) return false;
635 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
636 return true;
637 }
638
639
ZapBlock(Address start,size_t size)640 void MemoryAllocator::ZapBlock(Address start, size_t size) {
641 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
642 Memory::Address_at(start + s) = kZapValue;
643 }
644 }
645
646
PerformAllocationCallback(ObjectSpace space,AllocationAction action,size_t size)647 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
648 AllocationAction action,
649 size_t size) {
650 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
651 MemoryAllocationCallbackRegistration registration =
652 memory_allocation_callbacks_[i];
653 if ((registration.space & space) == space &&
654 (registration.action & action) == action)
655 registration.callback(space, action, static_cast<int>(size));
656 }
657 }
658
659
MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback)660 bool MemoryAllocator::MemoryAllocationCallbackRegistered(
661 MemoryAllocationCallback callback) {
662 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
663 if (memory_allocation_callbacks_[i].callback == callback) return true;
664 }
665 return false;
666 }
667
668
AddMemoryAllocationCallback(MemoryAllocationCallback callback,ObjectSpace space,AllocationAction action)669 void MemoryAllocator::AddMemoryAllocationCallback(
670 MemoryAllocationCallback callback,
671 ObjectSpace space,
672 AllocationAction action) {
673 ASSERT(callback != NULL);
674 MemoryAllocationCallbackRegistration registration(callback, space, action);
675 ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
676 return memory_allocation_callbacks_.Add(registration);
677 }
678
679
RemoveMemoryAllocationCallback(MemoryAllocationCallback callback)680 void MemoryAllocator::RemoveMemoryAllocationCallback(
681 MemoryAllocationCallback callback) {
682 ASSERT(callback != NULL);
683 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
684 if (memory_allocation_callbacks_[i].callback == callback) {
685 memory_allocation_callbacks_.Remove(i);
686 return;
687 }
688 }
689 UNREACHABLE();
690 }
691
692
693 #ifdef DEBUG
ReportStatistics()694 void MemoryAllocator::ReportStatistics() {
695 float pct = static_cast<float>(capacity_ - size_) / capacity_;
696 PrintF(" capacity: %" V8_PTR_PREFIX "d"
697 ", used: %" V8_PTR_PREFIX "d"
698 ", available: %%%d\n\n",
699 capacity_, size_, static_cast<int>(pct*100));
700 }
701 #endif
702
703
CodePageGuardStartOffset()704 int MemoryAllocator::CodePageGuardStartOffset() {
705 // We are guarding code pages: the first OS page after the header
706 // will be protected as non-writable.
707 return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
708 }
709
710
CodePageGuardSize()711 int MemoryAllocator::CodePageGuardSize() {
712 return static_cast<int>(OS::CommitPageSize());
713 }
714
715
CodePageAreaStartOffset()716 int MemoryAllocator::CodePageAreaStartOffset() {
717 // We are guarding code pages: the first OS page after the header
718 // will be protected as non-writable.
719 return CodePageGuardStartOffset() + CodePageGuardSize();
720 }
721
722
CodePageAreaEndOffset()723 int MemoryAllocator::CodePageAreaEndOffset() {
724 // We are guarding code pages: the last OS page will be protected as
725 // non-writable.
726 return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
727 }
728
729
CommitCodePage(VirtualMemory * vm,Address start,size_t size)730 bool MemoryAllocator::CommitCodePage(VirtualMemory* vm,
731 Address start,
732 size_t size) {
733 // Commit page header (not executable).
734 if (!vm->Commit(start,
735 CodePageGuardStartOffset(),
736 false)) {
737 return false;
738 }
739
740 // Create guard page after the header.
741 if (!vm->Guard(start + CodePageGuardStartOffset())) {
742 return false;
743 }
744
745 // Commit page body (executable).
746 size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
747 if (!vm->Commit(start + CodePageAreaStartOffset(),
748 area_size,
749 true)) {
750 return false;
751 }
752
753 // Create guard page after the allocatable area.
754 if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) {
755 return false;
756 }
757
758 return true;
759 }
760
761
762 // -----------------------------------------------------------------------------
763 // MemoryChunk implementation
764
IncrementLiveBytesFromMutator(Address address,int by)765 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
766 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
767 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
768 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
769 }
770 chunk->IncrementLiveBytes(by);
771 }
772
773 // -----------------------------------------------------------------------------
774 // PagedSpace implementation
775
PagedSpace(Heap * heap,intptr_t max_capacity,AllocationSpace id,Executability executable)776 PagedSpace::PagedSpace(Heap* heap,
777 intptr_t max_capacity,
778 AllocationSpace id,
779 Executability executable)
780 : Space(heap, id, executable),
781 free_list_(this),
782 was_swept_conservatively_(false),
783 first_unswept_page_(Page::FromAddress(NULL)),
784 unswept_free_bytes_(0) {
785 if (id == CODE_SPACE) {
786 area_size_ = heap->isolate()->memory_allocator()->
787 CodePageAreaSize();
788 } else {
789 area_size_ = Page::kPageSize - Page::kObjectStartOffset;
790 }
791 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
792 * AreaSize();
793 accounting_stats_.Clear();
794
795 allocation_info_.top = NULL;
796 allocation_info_.limit = NULL;
797
798 anchor_.InitializeAsAnchor(this);
799 }
800
801
SetUp()802 bool PagedSpace::SetUp() {
803 return true;
804 }
805
806
HasBeenSetUp()807 bool PagedSpace::HasBeenSetUp() {
808 return true;
809 }
810
811
TearDown()812 void PagedSpace::TearDown() {
813 PageIterator iterator(this);
814 while (iterator.has_next()) {
815 heap()->isolate()->memory_allocator()->Free(iterator.next());
816 }
817 anchor_.set_next_page(&anchor_);
818 anchor_.set_prev_page(&anchor_);
819 accounting_stats_.Clear();
820 }
821
822
FindObject(Address addr)823 MaybeObject* PagedSpace::FindObject(Address addr) {
824 // Note: this function can only be called on precisely swept spaces.
825 ASSERT(!heap()->mark_compact_collector()->in_use());
826
827 if (!Contains(addr)) return Failure::Exception();
828
829 Page* p = Page::FromAddress(addr);
830 HeapObjectIterator it(p, NULL);
831 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
832 Address cur = obj->address();
833 Address next = cur + obj->Size();
834 if ((cur <= addr) && (addr < next)) return obj;
835 }
836
837 UNREACHABLE();
838 return Failure::Exception();
839 }
840
CanExpand()841 bool PagedSpace::CanExpand() {
842 ASSERT(max_capacity_ % AreaSize() == 0);
843 ASSERT(Capacity() % AreaSize() == 0);
844
845 if (Capacity() == max_capacity_) return false;
846
847 ASSERT(Capacity() < max_capacity_);
848
849 // Are we going to exceed capacity for this space?
850 if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
851
852 return true;
853 }
854
Expand()855 bool PagedSpace::Expand() {
856 if (!CanExpand()) return false;
857
858 Page* p = heap()->isolate()->memory_allocator()->
859 AllocatePage(this, executable());
860 if (p == NULL) return false;
861
862 ASSERT(Capacity() <= max_capacity_);
863
864 p->InsertAfter(anchor_.prev_page());
865
866 return true;
867 }
868
869
CountTotalPages()870 int PagedSpace::CountTotalPages() {
871 PageIterator it(this);
872 int count = 0;
873 while (it.has_next()) {
874 it.next();
875 count++;
876 }
877 return count;
878 }
879
880
ReleasePage(Page * page)881 void PagedSpace::ReleasePage(Page* page) {
882 ASSERT(page->LiveBytes() == 0);
883 ASSERT(AreaSize() == page->area_size());
884
885 // Adjust list of unswept pages if the page is the head of the list.
886 if (first_unswept_page_ == page) {
887 first_unswept_page_ = page->next_page();
888 if (first_unswept_page_ == anchor()) {
889 first_unswept_page_ = Page::FromAddress(NULL);
890 }
891 }
892
893 if (page->WasSwept()) {
894 intptr_t size = free_list_.EvictFreeListItems(page);
895 accounting_stats_.AllocateBytes(size);
896 ASSERT_EQ(AreaSize(), static_cast<int>(size));
897 } else {
898 DecreaseUnsweptFreeBytes(page);
899 }
900
901 if (Page::FromAllocationTop(allocation_info_.top) == page) {
902 allocation_info_.top = allocation_info_.limit = NULL;
903 }
904
905 page->Unlink();
906 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
907 heap()->isolate()->memory_allocator()->Free(page);
908 } else {
909 heap()->QueueMemoryChunkForFree(page);
910 }
911
912 ASSERT(Capacity() > 0);
913 ASSERT(Capacity() % AreaSize() == 0);
914 accounting_stats_.ShrinkSpace(AreaSize());
915 }
916
917
ReleaseAllUnusedPages()918 void PagedSpace::ReleaseAllUnusedPages() {
919 PageIterator it(this);
920 while (it.has_next()) {
921 Page* page = it.next();
922 if (!page->WasSwept()) {
923 if (page->LiveBytes() == 0) ReleasePage(page);
924 } else {
925 HeapObject* obj = HeapObject::FromAddress(page->area_start());
926 if (obj->IsFreeSpace() &&
927 FreeSpace::cast(obj)->size() == AreaSize()) {
928 // Sometimes we allocate memory from free list but don't
929 // immediately initialize it (e.g. see PagedSpace::ReserveSpace
930 // called from Heap::ReserveSpace that can cause GC before
931 // reserved space is actually initialized).
932 // Thus we can't simply assume that obj represents a valid
933 // node still owned by a free list
934 // Instead we should verify that the page is fully covered
935 // by free list items.
936 FreeList::SizeStats sizes;
937 free_list_.CountFreeListItems(page, &sizes);
938 if (sizes.Total() == AreaSize()) {
939 ReleasePage(page);
940 }
941 }
942 }
943 }
944 heap()->FreeQueuedChunks();
945 }
946
947
948 #ifdef DEBUG
Print()949 void PagedSpace::Print() { }
950 #endif
951
952
953 #ifdef DEBUG
Verify(ObjectVisitor * visitor)954 void PagedSpace::Verify(ObjectVisitor* visitor) {
955 // We can only iterate over the pages if they were swept precisely.
956 if (was_swept_conservatively_) return;
957
958 bool allocation_pointer_found_in_space =
959 (allocation_info_.top == allocation_info_.limit);
960 PageIterator page_iterator(this);
961 while (page_iterator.has_next()) {
962 Page* page = page_iterator.next();
963 ASSERT(page->owner() == this);
964 if (page == Page::FromAllocationTop(allocation_info_.top)) {
965 allocation_pointer_found_in_space = true;
966 }
967 ASSERT(page->WasSweptPrecisely());
968 HeapObjectIterator it(page, NULL);
969 Address end_of_previous_object = page->area_start();
970 Address top = page->area_end();
971 int black_size = 0;
972 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
973 ASSERT(end_of_previous_object <= object->address());
974
975 // The first word should be a map, and we expect all map pointers to
976 // be in map space.
977 Map* map = object->map();
978 ASSERT(map->IsMap());
979 ASSERT(heap()->map_space()->Contains(map));
980
981 // Perform space-specific object verification.
982 VerifyObject(object);
983
984 // The object itself should look OK.
985 object->Verify();
986
987 // All the interior pointers should be contained in the heap.
988 int size = object->Size();
989 object->IterateBody(map->instance_type(), size, visitor);
990 if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
991 black_size += size;
992 }
993
994 ASSERT(object->address() + size <= top);
995 end_of_previous_object = object->address() + size;
996 }
997 ASSERT_LE(black_size, page->LiveBytes());
998 }
999 ASSERT(allocation_pointer_found_in_space);
1000 }
1001 #endif
1002
1003
1004 // -----------------------------------------------------------------------------
1005 // NewSpace implementation
1006
1007
SetUp(int reserved_semispace_capacity,int maximum_semispace_capacity)1008 bool NewSpace::SetUp(int reserved_semispace_capacity,
1009 int maximum_semispace_capacity) {
1010 // Set up new space based on the preallocated memory block defined by
1011 // start and size. The provided space is divided into two semi-spaces.
1012 // To support fast containment testing in the new space, the size of
1013 // this chunk must be a power of two and it must be aligned to its size.
1014 int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1015
1016 size_t size = 2 * reserved_semispace_capacity;
1017 Address base =
1018 heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
1019 size, size, &reservation_);
1020 if (base == NULL) return false;
1021
1022 chunk_base_ = base;
1023 chunk_size_ = static_cast<uintptr_t>(size);
1024 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1025
1026 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1027 ASSERT(IsPowerOf2(maximum_semispace_capacity));
1028
1029 // Allocate and set up the histogram arrays if necessary.
1030 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1031 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1032
1033 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1034 promoted_histogram_[name].set_name(#name);
1035 INSTANCE_TYPE_LIST(SET_NAME)
1036 #undef SET_NAME
1037
1038 ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1039 ASSERT(static_cast<intptr_t>(chunk_size_) >=
1040 2 * heap()->ReservedSemiSpaceSize());
1041 ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1042
1043 to_space_.SetUp(chunk_base_,
1044 initial_semispace_capacity,
1045 maximum_semispace_capacity);
1046 from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1047 initial_semispace_capacity,
1048 maximum_semispace_capacity);
1049 if (!to_space_.Commit()) {
1050 return false;
1051 }
1052
1053 start_ = chunk_base_;
1054 address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1055 object_mask_ = address_mask_ | kHeapObjectTagMask;
1056 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
1057
1058 ResetAllocationInfo();
1059
1060 return true;
1061 }
1062
1063
TearDown()1064 void NewSpace::TearDown() {
1065 if (allocated_histogram_) {
1066 DeleteArray(allocated_histogram_);
1067 allocated_histogram_ = NULL;
1068 }
1069 if (promoted_histogram_) {
1070 DeleteArray(promoted_histogram_);
1071 promoted_histogram_ = NULL;
1072 }
1073
1074 start_ = NULL;
1075 allocation_info_.top = NULL;
1076 allocation_info_.limit = NULL;
1077
1078 to_space_.TearDown();
1079 from_space_.TearDown();
1080
1081 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1082
1083 ASSERT(reservation_.IsReserved());
1084 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1085 NOT_EXECUTABLE);
1086 chunk_base_ = NULL;
1087 chunk_size_ = 0;
1088 }
1089
1090
Flip()1091 void NewSpace::Flip() {
1092 SemiSpace::Swap(&from_space_, &to_space_);
1093 }
1094
1095
Grow()1096 void NewSpace::Grow() {
1097 // Double the semispace size but only up to maximum capacity.
1098 ASSERT(Capacity() < MaximumCapacity());
1099 int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
1100 if (to_space_.GrowTo(new_capacity)) {
1101 // Only grow from space if we managed to grow to-space.
1102 if (!from_space_.GrowTo(new_capacity)) {
1103 // If we managed to grow to-space but couldn't grow from-space,
1104 // attempt to shrink to-space.
1105 if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1106 // We are in an inconsistent state because we could not
1107 // commit/uncommit memory from new space.
1108 V8::FatalProcessOutOfMemory("Failed to grow new space.");
1109 }
1110 }
1111 }
1112 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1113 }
1114
1115
Shrink()1116 void NewSpace::Shrink() {
1117 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
1118 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1119 if (rounded_new_capacity < Capacity() &&
1120 to_space_.ShrinkTo(rounded_new_capacity)) {
1121 // Only shrink from-space if we managed to shrink to-space.
1122 from_space_.Reset();
1123 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1124 // If we managed to shrink to-space but couldn't shrink from
1125 // space, attempt to grow to-space again.
1126 if (!to_space_.GrowTo(from_space_.Capacity())) {
1127 // We are in an inconsistent state because we could not
1128 // commit/uncommit memory from new space.
1129 V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1130 }
1131 }
1132 }
1133 allocation_info_.limit = to_space_.page_high();
1134 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1135 }
1136
1137
UpdateAllocationInfo()1138 void NewSpace::UpdateAllocationInfo() {
1139 allocation_info_.top = to_space_.page_low();
1140 allocation_info_.limit = to_space_.page_high();
1141
1142 // Lower limit during incremental marking.
1143 if (heap()->incremental_marking()->IsMarking() &&
1144 inline_allocation_limit_step() != 0) {
1145 Address new_limit =
1146 allocation_info_.top + inline_allocation_limit_step();
1147 allocation_info_.limit = Min(new_limit, allocation_info_.limit);
1148 }
1149 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1150 }
1151
1152
ResetAllocationInfo()1153 void NewSpace::ResetAllocationInfo() {
1154 to_space_.Reset();
1155 UpdateAllocationInfo();
1156 pages_used_ = 0;
1157 // Clear all mark-bits in the to-space.
1158 NewSpacePageIterator it(&to_space_);
1159 while (it.has_next()) {
1160 Bitmap::Clear(it.next());
1161 }
1162 }
1163
1164
AddFreshPage()1165 bool NewSpace::AddFreshPage() {
1166 Address top = allocation_info_.top;
1167 if (NewSpacePage::IsAtStart(top)) {
1168 // The current page is already empty. Don't try to make another.
1169
1170 // We should only get here if someone asks to allocate more
1171 // than what can be stored in a single page.
1172 // TODO(gc): Change the limit on new-space allocation to prevent this
1173 // from happening (all such allocations should go directly to LOSpace).
1174 return false;
1175 }
1176 if (!to_space_.AdvancePage()) {
1177 // Failed to get a new page in to-space.
1178 return false;
1179 }
1180
1181 // Clear remainder of current page.
1182 Address limit = NewSpacePage::FromLimit(top)->area_end();
1183 if (heap()->gc_state() == Heap::SCAVENGE) {
1184 heap()->promotion_queue()->SetNewLimit(limit);
1185 heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
1186 }
1187
1188 int remaining_in_page = static_cast<int>(limit - top);
1189 heap()->CreateFillerObjectAt(top, remaining_in_page);
1190 pages_used_++;
1191 UpdateAllocationInfo();
1192
1193 return true;
1194 }
1195
1196
SlowAllocateRaw(int size_in_bytes)1197 MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
1198 Address old_top = allocation_info_.top;
1199 Address new_top = old_top + size_in_bytes;
1200 Address high = to_space_.page_high();
1201 if (allocation_info_.limit < high) {
1202 // Incremental marking has lowered the limit to get a
1203 // chance to do a step.
1204 allocation_info_.limit = Min(
1205 allocation_info_.limit + inline_allocation_limit_step_,
1206 high);
1207 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1208 heap()->incremental_marking()->Step(
1209 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
1210 top_on_previous_step_ = new_top;
1211 return AllocateRaw(size_in_bytes);
1212 } else if (AddFreshPage()) {
1213 // Switched to new page. Try allocating again.
1214 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1215 heap()->incremental_marking()->Step(
1216 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
1217 top_on_previous_step_ = to_space_.page_low();
1218 return AllocateRaw(size_in_bytes);
1219 } else {
1220 return Failure::RetryAfterGC();
1221 }
1222 }
1223
1224
1225 #ifdef DEBUG
1226 // We do not use the SemiSpaceIterator because verification doesn't assume
1227 // that it works (it depends on the invariants we are checking).
Verify()1228 void NewSpace::Verify() {
1229 // The allocation pointer should be in the space or at the very end.
1230 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1231
1232 // There should be objects packed in from the low address up to the
1233 // allocation pointer.
1234 Address current = to_space_.first_page()->area_start();
1235 CHECK_EQ(current, to_space_.space_start());
1236
1237 while (current != top()) {
1238 if (!NewSpacePage::IsAtEnd(current)) {
1239 // The allocation pointer should not be in the middle of an object.
1240 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1241 current < top());
1242
1243 HeapObject* object = HeapObject::FromAddress(current);
1244
1245 // The first word should be a map, and we expect all map pointers to
1246 // be in map space.
1247 Map* map = object->map();
1248 CHECK(map->IsMap());
1249 CHECK(heap()->map_space()->Contains(map));
1250
1251 // The object should not be code or a map.
1252 CHECK(!object->IsMap());
1253 CHECK(!object->IsCode());
1254
1255 // The object itself should look OK.
1256 object->Verify();
1257
1258 // All the interior pointers should be contained in the heap.
1259 VerifyPointersVisitor visitor;
1260 int size = object->Size();
1261 object->IterateBody(map->instance_type(), size, &visitor);
1262
1263 current += size;
1264 } else {
1265 // At end of page, switch to next page.
1266 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1267 // Next page should be valid.
1268 CHECK(!page->is_anchor());
1269 current = page->area_start();
1270 }
1271 }
1272
1273 // Check semi-spaces.
1274 ASSERT_EQ(from_space_.id(), kFromSpace);
1275 ASSERT_EQ(to_space_.id(), kToSpace);
1276 from_space_.Verify();
1277 to_space_.Verify();
1278 }
1279 #endif
1280
1281 // -----------------------------------------------------------------------------
1282 // SemiSpace implementation
1283
SetUp(Address start,int initial_capacity,int maximum_capacity)1284 void SemiSpace::SetUp(Address start,
1285 int initial_capacity,
1286 int maximum_capacity) {
1287 // Creates a space in the young generation. The constructor does not
1288 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1289 // memory of size 'capacity' when set up, and does not grow or shrink
1290 // otherwise. In the mark-compact collector, the memory region of the from
1291 // space is used as the marking stack. It requires contiguous memory
1292 // addresses.
1293 ASSERT(maximum_capacity >= Page::kPageSize);
1294 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1295 capacity_ = initial_capacity;
1296 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1297 committed_ = false;
1298 start_ = start;
1299 address_mask_ = ~(maximum_capacity - 1);
1300 object_mask_ = address_mask_ | kHeapObjectTagMask;
1301 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1302 age_mark_ = start_;
1303 }
1304
1305
TearDown()1306 void SemiSpace::TearDown() {
1307 start_ = NULL;
1308 capacity_ = 0;
1309 }
1310
1311
Commit()1312 bool SemiSpace::Commit() {
1313 ASSERT(!is_committed());
1314 int pages = capacity_ / Page::kPageSize;
1315 Address end = start_ + maximum_capacity_;
1316 Address start = end - pages * Page::kPageSize;
1317 if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
1318 capacity_,
1319 executable())) {
1320 return false;
1321 }
1322
1323 NewSpacePage* page = anchor();
1324 for (int i = 1; i <= pages; i++) {
1325 NewSpacePage* new_page =
1326 NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
1327 new_page->InsertAfter(page);
1328 page = new_page;
1329 }
1330
1331 committed_ = true;
1332 Reset();
1333 return true;
1334 }
1335
1336
Uncommit()1337 bool SemiSpace::Uncommit() {
1338 ASSERT(is_committed());
1339 Address start = start_ + maximum_capacity_ - capacity_;
1340 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
1341 return false;
1342 }
1343 anchor()->set_next_page(anchor());
1344 anchor()->set_prev_page(anchor());
1345
1346 committed_ = false;
1347 return true;
1348 }
1349
1350
GrowTo(int new_capacity)1351 bool SemiSpace::GrowTo(int new_capacity) {
1352 if (!is_committed()) {
1353 if (!Commit()) return false;
1354 }
1355 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1356 ASSERT(new_capacity <= maximum_capacity_);
1357 ASSERT(new_capacity > capacity_);
1358 int pages_before = capacity_ / Page::kPageSize;
1359 int pages_after = new_capacity / Page::kPageSize;
1360
1361 Address end = start_ + maximum_capacity_;
1362 Address start = end - new_capacity;
1363 size_t delta = new_capacity - capacity_;
1364
1365 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1366 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1367 start, delta, executable())) {
1368 return false;
1369 }
1370 capacity_ = new_capacity;
1371 NewSpacePage* last_page = anchor()->prev_page();
1372 ASSERT(last_page != anchor());
1373 for (int i = pages_before + 1; i <= pages_after; i++) {
1374 Address page_address = end - i * Page::kPageSize;
1375 NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
1376 page_address,
1377 this);
1378 new_page->InsertAfter(last_page);
1379 Bitmap::Clear(new_page);
1380 // Duplicate the flags that was set on the old page.
1381 new_page->SetFlags(last_page->GetFlags(),
1382 NewSpacePage::kCopyOnFlipFlagsMask);
1383 last_page = new_page;
1384 }
1385 return true;
1386 }
1387
1388
ShrinkTo(int new_capacity)1389 bool SemiSpace::ShrinkTo(int new_capacity) {
1390 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1391 ASSERT(new_capacity >= initial_capacity_);
1392 ASSERT(new_capacity < capacity_);
1393 if (is_committed()) {
1394 // Semispaces grow backwards from the end of their allocated capacity,
1395 // so we find the before and after start addresses relative to the
1396 // end of the space.
1397 Address space_end = start_ + maximum_capacity_;
1398 Address old_start = space_end - capacity_;
1399 size_t delta = capacity_ - new_capacity;
1400 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1401
1402 MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1403 if (!allocator->UncommitBlock(old_start, delta)) {
1404 return false;
1405 }
1406
1407 int pages_after = new_capacity / Page::kPageSize;
1408 NewSpacePage* new_last_page =
1409 NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
1410 new_last_page->set_next_page(anchor());
1411 anchor()->set_prev_page(new_last_page);
1412 ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
1413 }
1414
1415 capacity_ = new_capacity;
1416
1417 return true;
1418 }
1419
1420
FlipPages(intptr_t flags,intptr_t mask)1421 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1422 anchor_.set_owner(this);
1423 // Fixup back-pointers to anchor. Address of anchor changes
1424 // when we swap.
1425 anchor_.prev_page()->set_next_page(&anchor_);
1426 anchor_.next_page()->set_prev_page(&anchor_);
1427
1428 bool becomes_to_space = (id_ == kFromSpace);
1429 id_ = becomes_to_space ? kToSpace : kFromSpace;
1430 NewSpacePage* page = anchor_.next_page();
1431 while (page != &anchor_) {
1432 page->set_owner(this);
1433 page->SetFlags(flags, mask);
1434 if (becomes_to_space) {
1435 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1436 page->SetFlag(MemoryChunk::IN_TO_SPACE);
1437 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1438 page->ResetLiveBytes();
1439 } else {
1440 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1441 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1442 }
1443 ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1444 ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1445 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1446 page = page->next_page();
1447 }
1448 }
1449
1450
Reset()1451 void SemiSpace::Reset() {
1452 ASSERT(anchor_.next_page() != &anchor_);
1453 current_page_ = anchor_.next_page();
1454 }
1455
1456
Swap(SemiSpace * from,SemiSpace * to)1457 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1458 // We won't be swapping semispaces without data in them.
1459 ASSERT(from->anchor_.next_page() != &from->anchor_);
1460 ASSERT(to->anchor_.next_page() != &to->anchor_);
1461
1462 // Swap bits.
1463 SemiSpace tmp = *from;
1464 *from = *to;
1465 *to = tmp;
1466
1467 // Fixup back-pointers to the page list anchor now that its address
1468 // has changed.
1469 // Swap to/from-space bits on pages.
1470 // Copy GC flags from old active space (from-space) to new (to-space).
1471 intptr_t flags = from->current_page()->GetFlags();
1472 to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1473
1474 from->FlipPages(0, 0);
1475 }
1476
1477
set_age_mark(Address mark)1478 void SemiSpace::set_age_mark(Address mark) {
1479 ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
1480 age_mark_ = mark;
1481 // Mark all pages up to the one containing mark.
1482 NewSpacePageIterator it(space_start(), mark);
1483 while (it.has_next()) {
1484 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1485 }
1486 }
1487
1488
1489 #ifdef DEBUG
Print()1490 void SemiSpace::Print() { }
1491
1492
Verify()1493 void SemiSpace::Verify() {
1494 bool is_from_space = (id_ == kFromSpace);
1495 NewSpacePage* page = anchor_.next_page();
1496 CHECK(anchor_.semi_space() == this);
1497 while (page != &anchor_) {
1498 CHECK(page->semi_space() == this);
1499 CHECK(page->InNewSpace());
1500 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1501 : MemoryChunk::IN_TO_SPACE));
1502 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1503 : MemoryChunk::IN_FROM_SPACE));
1504 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1505 if (!is_from_space) {
1506 // The pointers-from-here-are-interesting flag isn't updated dynamically
1507 // on from-space pages, so it might be out of sync with the marking state.
1508 if (page->heap()->incremental_marking()->IsMarking()) {
1509 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1510 } else {
1511 CHECK(!page->IsFlagSet(
1512 MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1513 }
1514 // TODO(gc): Check that the live_bytes_count_ field matches the
1515 // black marking on the page (if we make it match in new-space).
1516 }
1517 CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1518 CHECK(page->prev_page()->next_page() == page);
1519 page = page->next_page();
1520 }
1521 }
1522
1523
AssertValidRange(Address start,Address end)1524 void SemiSpace::AssertValidRange(Address start, Address end) {
1525 // Addresses belong to same semi-space
1526 NewSpacePage* page = NewSpacePage::FromLimit(start);
1527 NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1528 SemiSpace* space = page->semi_space();
1529 CHECK_EQ(space, end_page->semi_space());
1530 // Start address is before end address, either on same page,
1531 // or end address is on a later page in the linked list of
1532 // semi-space pages.
1533 if (page == end_page) {
1534 CHECK(start <= end);
1535 } else {
1536 while (page != end_page) {
1537 page = page->next_page();
1538 CHECK_NE(page, space->anchor());
1539 }
1540 }
1541 }
1542 #endif
1543
1544
1545 // -----------------------------------------------------------------------------
1546 // SemiSpaceIterator implementation.
SemiSpaceIterator(NewSpace * space)1547 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1548 Initialize(space->bottom(), space->top(), NULL);
1549 }
1550
1551
SemiSpaceIterator(NewSpace * space,HeapObjectCallback size_func)1552 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1553 HeapObjectCallback size_func) {
1554 Initialize(space->bottom(), space->top(), size_func);
1555 }
1556
1557
SemiSpaceIterator(NewSpace * space,Address start)1558 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1559 Initialize(start, space->top(), NULL);
1560 }
1561
1562
SemiSpaceIterator(Address from,Address to)1563 SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
1564 Initialize(from, to, NULL);
1565 }
1566
1567
Initialize(Address start,Address end,HeapObjectCallback size_func)1568 void SemiSpaceIterator::Initialize(Address start,
1569 Address end,
1570 HeapObjectCallback size_func) {
1571 SemiSpace::AssertValidRange(start, end);
1572 current_ = start;
1573 limit_ = end;
1574 size_func_ = size_func;
1575 }
1576
1577
1578 #ifdef DEBUG
1579 // heap_histograms is shared, always clear it before using it.
ClearHistograms()1580 static void ClearHistograms() {
1581 Isolate* isolate = Isolate::Current();
1582 // We reset the name each time, though it hasn't changed.
1583 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1584 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1585 #undef DEF_TYPE_NAME
1586
1587 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1588 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1589 #undef CLEAR_HISTOGRAM
1590
1591 isolate->js_spill_information()->Clear();
1592 }
1593
1594
ClearCodeKindStatistics()1595 static void ClearCodeKindStatistics() {
1596 Isolate* isolate = Isolate::Current();
1597 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1598 isolate->code_kind_statistics()[i] = 0;
1599 }
1600 }
1601
1602
ReportCodeKindStatistics()1603 static void ReportCodeKindStatistics() {
1604 Isolate* isolate = Isolate::Current();
1605 const char* table[Code::NUMBER_OF_KINDS] = { NULL };
1606
1607 #define CASE(name) \
1608 case Code::name: table[Code::name] = #name; \
1609 break
1610
1611 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1612 switch (static_cast<Code::Kind>(i)) {
1613 CASE(FUNCTION);
1614 CASE(OPTIMIZED_FUNCTION);
1615 CASE(STUB);
1616 CASE(BUILTIN);
1617 CASE(LOAD_IC);
1618 CASE(KEYED_LOAD_IC);
1619 CASE(STORE_IC);
1620 CASE(KEYED_STORE_IC);
1621 CASE(CALL_IC);
1622 CASE(KEYED_CALL_IC);
1623 CASE(UNARY_OP_IC);
1624 CASE(BINARY_OP_IC);
1625 CASE(COMPARE_IC);
1626 CASE(TO_BOOLEAN_IC);
1627 }
1628 }
1629
1630 #undef CASE
1631
1632 PrintF("\n Code kind histograms: \n");
1633 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1634 if (isolate->code_kind_statistics()[i] > 0) {
1635 PrintF(" %-20s: %10d bytes\n", table[i],
1636 isolate->code_kind_statistics()[i]);
1637 }
1638 }
1639 PrintF("\n");
1640 }
1641
1642
CollectHistogramInfo(HeapObject * obj)1643 static int CollectHistogramInfo(HeapObject* obj) {
1644 Isolate* isolate = Isolate::Current();
1645 InstanceType type = obj->map()->instance_type();
1646 ASSERT(0 <= type && type <= LAST_TYPE);
1647 ASSERT(isolate->heap_histograms()[type].name() != NULL);
1648 isolate->heap_histograms()[type].increment_number(1);
1649 isolate->heap_histograms()[type].increment_bytes(obj->Size());
1650
1651 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1652 JSObject::cast(obj)->IncrementSpillStatistics(
1653 isolate->js_spill_information());
1654 }
1655
1656 return obj->Size();
1657 }
1658
1659
ReportHistogram(bool print_spill)1660 static void ReportHistogram(bool print_spill) {
1661 Isolate* isolate = Isolate::Current();
1662 PrintF("\n Object Histogram:\n");
1663 for (int i = 0; i <= LAST_TYPE; i++) {
1664 if (isolate->heap_histograms()[i].number() > 0) {
1665 PrintF(" %-34s%10d (%10d bytes)\n",
1666 isolate->heap_histograms()[i].name(),
1667 isolate->heap_histograms()[i].number(),
1668 isolate->heap_histograms()[i].bytes());
1669 }
1670 }
1671 PrintF("\n");
1672
1673 // Summarize string types.
1674 int string_number = 0;
1675 int string_bytes = 0;
1676 #define INCREMENT(type, size, name, camel_name) \
1677 string_number += isolate->heap_histograms()[type].number(); \
1678 string_bytes += isolate->heap_histograms()[type].bytes();
1679 STRING_TYPE_LIST(INCREMENT)
1680 #undef INCREMENT
1681 if (string_number > 0) {
1682 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1683 string_bytes);
1684 }
1685
1686 if (FLAG_collect_heap_spill_statistics && print_spill) {
1687 isolate->js_spill_information()->Print();
1688 }
1689 }
1690 #endif // DEBUG
1691
1692
1693 // Support for statistics gathering for --heap-stats and --log-gc.
ClearHistograms()1694 void NewSpace::ClearHistograms() {
1695 for (int i = 0; i <= LAST_TYPE; i++) {
1696 allocated_histogram_[i].clear();
1697 promoted_histogram_[i].clear();
1698 }
1699 }
1700
1701 // Because the copying collector does not touch garbage objects, we iterate
1702 // the new space before a collection to get a histogram of allocated objects.
1703 // This only happens when --log-gc flag is set.
CollectStatistics()1704 void NewSpace::CollectStatistics() {
1705 ClearHistograms();
1706 SemiSpaceIterator it(this);
1707 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
1708 RecordAllocation(obj);
1709 }
1710
1711
DoReportStatistics(Isolate * isolate,HistogramInfo * info,const char * description)1712 static void DoReportStatistics(Isolate* isolate,
1713 HistogramInfo* info, const char* description) {
1714 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1715 // Lump all the string types together.
1716 int string_number = 0;
1717 int string_bytes = 0;
1718 #define INCREMENT(type, size, name, camel_name) \
1719 string_number += info[type].number(); \
1720 string_bytes += info[type].bytes();
1721 STRING_TYPE_LIST(INCREMENT)
1722 #undef INCREMENT
1723 if (string_number > 0) {
1724 LOG(isolate,
1725 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1726 }
1727
1728 // Then do the other types.
1729 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1730 if (info[i].number() > 0) {
1731 LOG(isolate,
1732 HeapSampleItemEvent(info[i].name(), info[i].number(),
1733 info[i].bytes()));
1734 }
1735 }
1736 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
1737 }
1738
1739
ReportStatistics()1740 void NewSpace::ReportStatistics() {
1741 #ifdef DEBUG
1742 if (FLAG_heap_stats) {
1743 float pct = static_cast<float>(Available()) / Capacity();
1744 PrintF(" capacity: %" V8_PTR_PREFIX "d"
1745 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
1746 Capacity(), Available(), static_cast<int>(pct*100));
1747 PrintF("\n Object Histogram:\n");
1748 for (int i = 0; i <= LAST_TYPE; i++) {
1749 if (allocated_histogram_[i].number() > 0) {
1750 PrintF(" %-34s%10d (%10d bytes)\n",
1751 allocated_histogram_[i].name(),
1752 allocated_histogram_[i].number(),
1753 allocated_histogram_[i].bytes());
1754 }
1755 }
1756 PrintF("\n");
1757 }
1758 #endif // DEBUG
1759
1760 if (FLAG_log_gc) {
1761 Isolate* isolate = ISOLATE;
1762 DoReportStatistics(isolate, allocated_histogram_, "allocated");
1763 DoReportStatistics(isolate, promoted_histogram_, "promoted");
1764 }
1765 }
1766
1767
RecordAllocation(HeapObject * obj)1768 void NewSpace::RecordAllocation(HeapObject* obj) {
1769 InstanceType type = obj->map()->instance_type();
1770 ASSERT(0 <= type && type <= LAST_TYPE);
1771 allocated_histogram_[type].increment_number(1);
1772 allocated_histogram_[type].increment_bytes(obj->Size());
1773 }
1774
1775
RecordPromotion(HeapObject * obj)1776 void NewSpace::RecordPromotion(HeapObject* obj) {
1777 InstanceType type = obj->map()->instance_type();
1778 ASSERT(0 <= type && type <= LAST_TYPE);
1779 promoted_histogram_[type].increment_number(1);
1780 promoted_histogram_[type].increment_bytes(obj->Size());
1781 }
1782
1783 // -----------------------------------------------------------------------------
1784 // Free lists for old object spaces implementation
1785
set_size(Heap * heap,int size_in_bytes)1786 void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
1787 ASSERT(size_in_bytes > 0);
1788 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1789
1790 // We write a map and possibly size information to the block. If the block
1791 // is big enough to be a FreeSpace with at least one extra word (the next
1792 // pointer), we set its map to be the free space map and its size to an
1793 // appropriate array length for the desired size from HeapObject::Size().
1794 // If the block is too small (eg, one or two words), to hold both a size
1795 // field and a next pointer, we give it a filler map that gives it the
1796 // correct size.
1797 if (size_in_bytes > FreeSpace::kHeaderSize) {
1798 set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
1799 // Can't use FreeSpace::cast because it fails during deserialization.
1800 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
1801 this_as_free_space->set_size(size_in_bytes);
1802 } else if (size_in_bytes == kPointerSize) {
1803 set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
1804 } else if (size_in_bytes == 2 * kPointerSize) {
1805 set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
1806 } else {
1807 UNREACHABLE();
1808 }
1809 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
1810 // deserialization because the free space map is not done yet.
1811 }
1812
1813
next()1814 FreeListNode* FreeListNode::next() {
1815 ASSERT(IsFreeListNode(this));
1816 if (map() == HEAP->raw_unchecked_free_space_map()) {
1817 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
1818 return reinterpret_cast<FreeListNode*>(
1819 Memory::Address_at(address() + kNextOffset));
1820 } else {
1821 return reinterpret_cast<FreeListNode*>(
1822 Memory::Address_at(address() + kPointerSize));
1823 }
1824 }
1825
1826
next_address()1827 FreeListNode** FreeListNode::next_address() {
1828 ASSERT(IsFreeListNode(this));
1829 if (map() == HEAP->raw_unchecked_free_space_map()) {
1830 ASSERT(Size() >= kNextOffset + kPointerSize);
1831 return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
1832 } else {
1833 return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
1834 }
1835 }
1836
1837
set_next(FreeListNode * next)1838 void FreeListNode::set_next(FreeListNode* next) {
1839 ASSERT(IsFreeListNode(this));
1840 // While we are booting the VM the free space map will actually be null. So
1841 // we have to make sure that we don't try to use it for anything at that
1842 // stage.
1843 if (map() == HEAP->raw_unchecked_free_space_map()) {
1844 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
1845 Memory::Address_at(address() + kNextOffset) =
1846 reinterpret_cast<Address>(next);
1847 } else {
1848 Memory::Address_at(address() + kPointerSize) =
1849 reinterpret_cast<Address>(next);
1850 }
1851 }
1852
1853
FreeList(PagedSpace * owner)1854 FreeList::FreeList(PagedSpace* owner)
1855 : owner_(owner), heap_(owner->heap()) {
1856 Reset();
1857 }
1858
1859
Reset()1860 void FreeList::Reset() {
1861 available_ = 0;
1862 small_list_ = NULL;
1863 medium_list_ = NULL;
1864 large_list_ = NULL;
1865 huge_list_ = NULL;
1866 }
1867
1868
Free(Address start,int size_in_bytes)1869 int FreeList::Free(Address start, int size_in_bytes) {
1870 if (size_in_bytes == 0) return 0;
1871 FreeListNode* node = FreeListNode::FromAddress(start);
1872 node->set_size(heap_, size_in_bytes);
1873
1874 // Early return to drop too-small blocks on the floor.
1875 if (size_in_bytes < kSmallListMin) return size_in_bytes;
1876
1877 // Insert other blocks at the head of a free list of the appropriate
1878 // magnitude.
1879 if (size_in_bytes <= kSmallListMax) {
1880 node->set_next(small_list_);
1881 small_list_ = node;
1882 } else if (size_in_bytes <= kMediumListMax) {
1883 node->set_next(medium_list_);
1884 medium_list_ = node;
1885 } else if (size_in_bytes <= kLargeListMax) {
1886 node->set_next(large_list_);
1887 large_list_ = node;
1888 } else {
1889 node->set_next(huge_list_);
1890 huge_list_ = node;
1891 }
1892 available_ += size_in_bytes;
1893 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1894 return 0;
1895 }
1896
1897
PickNodeFromList(FreeListNode ** list,int * node_size)1898 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
1899 FreeListNode* node = *list;
1900
1901 if (node == NULL) return NULL;
1902
1903 while (node != NULL &&
1904 Page::FromAddress(node->address())->IsEvacuationCandidate()) {
1905 available_ -= node->Size();
1906 node = node->next();
1907 }
1908
1909 if (node != NULL) {
1910 *node_size = node->Size();
1911 *list = node->next();
1912 } else {
1913 *list = NULL;
1914 }
1915
1916 return node;
1917 }
1918
1919
FindNodeFor(int size_in_bytes,int * node_size)1920 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
1921 FreeListNode* node = NULL;
1922
1923 if (size_in_bytes <= kSmallAllocationMax) {
1924 node = PickNodeFromList(&small_list_, node_size);
1925 if (node != NULL) return node;
1926 }
1927
1928 if (size_in_bytes <= kMediumAllocationMax) {
1929 node = PickNodeFromList(&medium_list_, node_size);
1930 if (node != NULL) return node;
1931 }
1932
1933 if (size_in_bytes <= kLargeAllocationMax) {
1934 node = PickNodeFromList(&large_list_, node_size);
1935 if (node != NULL) return node;
1936 }
1937
1938 for (FreeListNode** cur = &huge_list_;
1939 *cur != NULL;
1940 cur = (*cur)->next_address()) {
1941 FreeListNode* cur_node = *cur;
1942 while (cur_node != NULL &&
1943 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
1944 available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
1945 cur_node = cur_node->next();
1946 }
1947
1948 *cur = cur_node;
1949 if (cur_node == NULL) break;
1950
1951 ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
1952 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
1953 int size = cur_as_free_space->Size();
1954 if (size >= size_in_bytes) {
1955 // Large enough node found. Unlink it from the list.
1956 node = *cur;
1957 *node_size = size;
1958 *cur = node->next();
1959 break;
1960 }
1961 }
1962
1963 return node;
1964 }
1965
1966
1967 // Allocation on the old space free list. If it succeeds then a new linear
1968 // allocation space has been set up with the top and limit of the space. If
1969 // the allocation fails then NULL is returned, and the caller can perform a GC
1970 // or allocate a new page before retrying.
Allocate(int size_in_bytes)1971 HeapObject* FreeList::Allocate(int size_in_bytes) {
1972 ASSERT(0 < size_in_bytes);
1973 ASSERT(size_in_bytes <= kMaxBlockSize);
1974 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1975 // Don't free list allocate if there is linear space available.
1976 ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
1977
1978 int new_node_size = 0;
1979 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
1980 if (new_node == NULL) return NULL;
1981
1982 available_ -= new_node_size;
1983 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1984
1985 int bytes_left = new_node_size - size_in_bytes;
1986 ASSERT(bytes_left >= 0);
1987
1988 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
1989 // Mark the old linear allocation area with a free space map so it can be
1990 // skipped when scanning the heap. This also puts it back in the free list
1991 // if it is big enough.
1992 owner_->Free(owner_->top(), old_linear_size);
1993
1994 #ifdef DEBUG
1995 for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
1996 reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0);
1997 }
1998 #endif
1999
2000 owner_->heap()->incremental_marking()->OldSpaceStep(
2001 size_in_bytes - old_linear_size);
2002
2003 // The old-space-step might have finished sweeping and restarted marking.
2004 // Verify that it did not turn the page of the new node into an evacuation
2005 // candidate.
2006 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2007
2008 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2009
2010 // Memory in the linear allocation area is counted as allocated. We may free
2011 // a little of this again immediately - see below.
2012 owner_->Allocate(new_node_size);
2013
2014 if (bytes_left > kThreshold &&
2015 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2016 FLAG_incremental_marking_steps) {
2017 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2018 // We don't want to give too large linear areas to the allocator while
2019 // incremental marking is going on, because we won't check again whether
2020 // we want to do another increment until the linear area is used up.
2021 owner_->Free(new_node->address() + size_in_bytes + linear_size,
2022 new_node_size - size_in_bytes - linear_size);
2023 owner_->SetTop(new_node->address() + size_in_bytes,
2024 new_node->address() + size_in_bytes + linear_size);
2025 } else if (bytes_left > 0) {
2026 // Normally we give the rest of the node to the allocator as its new
2027 // linear allocation area.
2028 owner_->SetTop(new_node->address() + size_in_bytes,
2029 new_node->address() + new_node_size);
2030 } else {
2031 // TODO(gc) Try not freeing linear allocation region when bytes_left
2032 // are zero.
2033 owner_->SetTop(NULL, NULL);
2034 }
2035
2036 return new_node;
2037 }
2038
2039
CountFreeListItemsInList(FreeListNode * n,Page * p)2040 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
2041 intptr_t sum = 0;
2042 while (n != NULL) {
2043 if (Page::FromAddress(n->address()) == p) {
2044 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n);
2045 sum += free_space->Size();
2046 }
2047 n = n->next();
2048 }
2049 return sum;
2050 }
2051
2052
CountFreeListItems(Page * p,SizeStats * sizes)2053 void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
2054 sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
2055 if (sizes->huge_size_ < p->area_size()) {
2056 sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
2057 sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
2058 sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
2059 } else {
2060 sizes->small_size_ = 0;
2061 sizes->medium_size_ = 0;
2062 sizes->large_size_ = 0;
2063 }
2064 }
2065
2066
EvictFreeListItemsInList(FreeListNode ** n,Page * p)2067 static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) {
2068 intptr_t sum = 0;
2069 while (*n != NULL) {
2070 if (Page::FromAddress((*n)->address()) == p) {
2071 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
2072 sum += free_space->Size();
2073 *n = (*n)->next();
2074 } else {
2075 n = (*n)->next_address();
2076 }
2077 }
2078 return sum;
2079 }
2080
2081
EvictFreeListItems(Page * p)2082 intptr_t FreeList::EvictFreeListItems(Page* p) {
2083 intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
2084
2085 if (sum < p->area_size()) {
2086 sum += EvictFreeListItemsInList(&small_list_, p) +
2087 EvictFreeListItemsInList(&medium_list_, p) +
2088 EvictFreeListItemsInList(&large_list_, p);
2089 }
2090
2091 available_ -= static_cast<int>(sum);
2092
2093 return sum;
2094 }
2095
2096
2097 #ifdef DEBUG
SumFreeList(FreeListNode * cur)2098 intptr_t FreeList::SumFreeList(FreeListNode* cur) {
2099 intptr_t sum = 0;
2100 while (cur != NULL) {
2101 ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map());
2102 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
2103 sum += cur_as_free_space->Size();
2104 cur = cur->next();
2105 }
2106 return sum;
2107 }
2108
2109
2110 static const int kVeryLongFreeList = 500;
2111
2112
FreeListLength(FreeListNode * cur)2113 int FreeList::FreeListLength(FreeListNode* cur) {
2114 int length = 0;
2115 while (cur != NULL) {
2116 length++;
2117 cur = cur->next();
2118 if (length == kVeryLongFreeList) return length;
2119 }
2120 return length;
2121 }
2122
2123
IsVeryLong()2124 bool FreeList::IsVeryLong() {
2125 if (FreeListLength(small_list_) == kVeryLongFreeList) return true;
2126 if (FreeListLength(medium_list_) == kVeryLongFreeList) return true;
2127 if (FreeListLength(large_list_) == kVeryLongFreeList) return true;
2128 if (FreeListLength(huge_list_) == kVeryLongFreeList) return true;
2129 return false;
2130 }
2131
2132
2133 // This can take a very long time because it is linear in the number of entries
2134 // on the free list, so it should not be called if FreeListLength returns
2135 // kVeryLongFreeList.
SumFreeLists()2136 intptr_t FreeList::SumFreeLists() {
2137 intptr_t sum = SumFreeList(small_list_);
2138 sum += SumFreeList(medium_list_);
2139 sum += SumFreeList(large_list_);
2140 sum += SumFreeList(huge_list_);
2141 return sum;
2142 }
2143 #endif
2144
2145
2146 // -----------------------------------------------------------------------------
2147 // OldSpace implementation
2148
ReserveSpace(int bytes)2149 bool NewSpace::ReserveSpace(int bytes) {
2150 // We can't reliably unpack a partial snapshot that needs more new space
2151 // space than the minimum NewSpace size. The limit can be set lower than
2152 // the end of new space either because there is more space on the next page
2153 // or because we have lowered the limit in order to get periodic incremental
2154 // marking. The most reliable way to ensure that there is linear space is
2155 // to do the allocation, then rewind the limit.
2156 ASSERT(bytes <= InitialCapacity());
2157 MaybeObject* maybe = AllocateRaw(bytes);
2158 Object* object = NULL;
2159 if (!maybe->ToObject(&object)) return false;
2160 HeapObject* allocation = HeapObject::cast(object);
2161 Address top = allocation_info_.top;
2162 if ((top - bytes) == allocation->address()) {
2163 allocation_info_.top = allocation->address();
2164 return true;
2165 }
2166 // There may be a borderline case here where the allocation succeeded, but
2167 // the limit and top have moved on to a new page. In that case we try again.
2168 return ReserveSpace(bytes);
2169 }
2170
2171
PrepareForMarkCompact()2172 void PagedSpace::PrepareForMarkCompact() {
2173 // We don't have a linear allocation area while sweeping. It will be restored
2174 // on the first allocation after the sweep.
2175 // Mark the old linear allocation area with a free space map so it can be
2176 // skipped when scanning the heap.
2177 int old_linear_size = static_cast<int>(limit() - top());
2178 Free(top(), old_linear_size);
2179 SetTop(NULL, NULL);
2180
2181 // Stop lazy sweeping and clear marking bits for unswept pages.
2182 if (first_unswept_page_ != NULL) {
2183 Page* p = first_unswept_page_;
2184 do {
2185 // Do not use ShouldBeSweptLazily predicate here.
2186 // New evacuation candidates were selected but they still have
2187 // to be swept before collection starts.
2188 if (!p->WasSwept()) {
2189 Bitmap::Clear(p);
2190 if (FLAG_gc_verbose) {
2191 PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
2192 reinterpret_cast<intptr_t>(p));
2193 }
2194 }
2195 p = p->next_page();
2196 } while (p != anchor());
2197 }
2198 first_unswept_page_ = Page::FromAddress(NULL);
2199 unswept_free_bytes_ = 0;
2200
2201 // Clear the free list before a full GC---it will be rebuilt afterward.
2202 free_list_.Reset();
2203 }
2204
2205
ReserveSpace(int size_in_bytes)2206 bool PagedSpace::ReserveSpace(int size_in_bytes) {
2207 ASSERT(size_in_bytes <= AreaSize());
2208 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
2209 Address current_top = allocation_info_.top;
2210 Address new_top = current_top + size_in_bytes;
2211 if (new_top <= allocation_info_.limit) return true;
2212
2213 HeapObject* new_area = free_list_.Allocate(size_in_bytes);
2214 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
2215 if (new_area == NULL) return false;
2216
2217 int old_linear_size = static_cast<int>(limit() - top());
2218 // Mark the old linear allocation area with a free space so it can be
2219 // skipped when scanning the heap. This also puts it back in the free list
2220 // if it is big enough.
2221 Free(top(), old_linear_size);
2222
2223 SetTop(new_area->address(), new_area->address() + size_in_bytes);
2224 Allocate(size_in_bytes);
2225 return true;
2226 }
2227
2228
2229 // You have to call this last, since the implementation from PagedSpace
2230 // doesn't know that memory was 'promised' to large object space.
ReserveSpace(int bytes)2231 bool LargeObjectSpace::ReserveSpace(int bytes) {
2232 return heap()->OldGenerationCapacityAvailable() >= bytes &&
2233 (!heap()->incremental_marking()->IsStopped() ||
2234 heap()->OldGenerationSpaceAvailable() >= bytes);
2235 }
2236
2237
AdvanceSweeper(intptr_t bytes_to_sweep)2238 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
2239 if (IsSweepingComplete()) return true;
2240
2241 intptr_t freed_bytes = 0;
2242 Page* p = first_unswept_page_;
2243 do {
2244 Page* next_page = p->next_page();
2245 if (ShouldBeSweptLazily(p)) {
2246 if (FLAG_gc_verbose) {
2247 PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
2248 reinterpret_cast<intptr_t>(p));
2249 }
2250 DecreaseUnsweptFreeBytes(p);
2251 freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
2252 }
2253 p = next_page;
2254 } while (p != anchor() && freed_bytes < bytes_to_sweep);
2255
2256 if (p == anchor()) {
2257 first_unswept_page_ = Page::FromAddress(NULL);
2258 } else {
2259 first_unswept_page_ = p;
2260 }
2261
2262 heap()->LowerOldGenLimits(freed_bytes);
2263
2264 heap()->FreeQueuedChunks();
2265
2266 return IsSweepingComplete();
2267 }
2268
2269
EvictEvacuationCandidatesFromFreeLists()2270 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2271 if (allocation_info_.top >= allocation_info_.limit) return;
2272
2273 if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
2274 // Create filler object to keep page iterable if it was iterable.
2275 int remaining =
2276 static_cast<int>(allocation_info_.limit - allocation_info_.top);
2277 heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
2278
2279 allocation_info_.top = NULL;
2280 allocation_info_.limit = NULL;
2281 }
2282 }
2283
2284
SlowAllocateRaw(int size_in_bytes)2285 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2286 // Allocation in this space has failed.
2287
2288 // If there are unswept pages advance lazy sweeper then sweep one page before
2289 // allocating a new page.
2290 if (first_unswept_page_->is_valid()) {
2291 AdvanceSweeper(size_in_bytes);
2292
2293 // Retry the free list allocation.
2294 HeapObject* object = free_list_.Allocate(size_in_bytes);
2295 if (object != NULL) return object;
2296 }
2297
2298 // Free list allocation failed and there is no next page. Fail if we have
2299 // hit the old generation size limit that should cause a garbage
2300 // collection.
2301 if (!heap()->always_allocate() &&
2302 heap()->OldGenerationAllocationLimitReached()) {
2303 return NULL;
2304 }
2305
2306 // Try to expand the space and allocate in the new next page.
2307 if (Expand()) {
2308 return free_list_.Allocate(size_in_bytes);
2309 }
2310
2311 // Last ditch, sweep all the remaining pages to try to find space. This may
2312 // cause a pause.
2313 if (!IsSweepingComplete()) {
2314 AdvanceSweeper(kMaxInt);
2315
2316 // Retry the free list allocation.
2317 HeapObject* object = free_list_.Allocate(size_in_bytes);
2318 if (object != NULL) return object;
2319 }
2320
2321 // Finally, fail.
2322 return NULL;
2323 }
2324
2325
2326 #ifdef DEBUG
ReportCodeStatistics()2327 void PagedSpace::ReportCodeStatistics() {
2328 Isolate* isolate = Isolate::Current();
2329 CommentStatistic* comments_statistics =
2330 isolate->paged_space_comments_statistics();
2331 ReportCodeKindStatistics();
2332 PrintF("Code comment statistics (\" [ comment-txt : size/ "
2333 "count (average)\"):\n");
2334 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2335 const CommentStatistic& cs = comments_statistics[i];
2336 if (cs.size > 0) {
2337 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2338 cs.size/cs.count);
2339 }
2340 }
2341 PrintF("\n");
2342 }
2343
2344
ResetCodeStatistics()2345 void PagedSpace::ResetCodeStatistics() {
2346 Isolate* isolate = Isolate::Current();
2347 CommentStatistic* comments_statistics =
2348 isolate->paged_space_comments_statistics();
2349 ClearCodeKindStatistics();
2350 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2351 comments_statistics[i].Clear();
2352 }
2353 comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2354 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2355 comments_statistics[CommentStatistic::kMaxComments].count = 0;
2356 }
2357
2358
2359 // Adds comment to 'comment_statistics' table. Performance OK as long as
2360 // 'kMaxComments' is small
EnterComment(Isolate * isolate,const char * comment,int delta)2361 static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2362 CommentStatistic* comments_statistics =
2363 isolate->paged_space_comments_statistics();
2364 // Do not count empty comments
2365 if (delta <= 0) return;
2366 CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2367 // Search for a free or matching entry in 'comments_statistics': 'cs'
2368 // points to result.
2369 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2370 if (comments_statistics[i].comment == NULL) {
2371 cs = &comments_statistics[i];
2372 cs->comment = comment;
2373 break;
2374 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2375 cs = &comments_statistics[i];
2376 break;
2377 }
2378 }
2379 // Update entry for 'comment'
2380 cs->size += delta;
2381 cs->count += 1;
2382 }
2383
2384
2385 // Call for each nested comment start (start marked with '[ xxx', end marked
2386 // with ']'. RelocIterator 'it' must point to a comment reloc info.
CollectCommentStatistics(Isolate * isolate,RelocIterator * it)2387 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2388 ASSERT(!it->done());
2389 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2390 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2391 if (tmp[0] != '[') {
2392 // Not a nested comment; skip
2393 return;
2394 }
2395
2396 // Search for end of nested comment or a new nested comment
2397 const char* const comment_txt =
2398 reinterpret_cast<const char*>(it->rinfo()->data());
2399 const byte* prev_pc = it->rinfo()->pc();
2400 int flat_delta = 0;
2401 it->next();
2402 while (true) {
2403 // All nested comments must be terminated properly, and therefore exit
2404 // from loop.
2405 ASSERT(!it->done());
2406 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2407 const char* const txt =
2408 reinterpret_cast<const char*>(it->rinfo()->data());
2409 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2410 if (txt[0] == ']') break; // End of nested comment
2411 // A new comment
2412 CollectCommentStatistics(isolate, it);
2413 // Skip code that was covered with previous comment
2414 prev_pc = it->rinfo()->pc();
2415 }
2416 it->next();
2417 }
2418 EnterComment(isolate, comment_txt, flat_delta);
2419 }
2420
2421
2422 // Collects code size statistics:
2423 // - by code kind
2424 // - by code comment
CollectCodeStatistics()2425 void PagedSpace::CollectCodeStatistics() {
2426 Isolate* isolate = heap()->isolate();
2427 HeapObjectIterator obj_it(this);
2428 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2429 if (obj->IsCode()) {
2430 Code* code = Code::cast(obj);
2431 isolate->code_kind_statistics()[code->kind()] += code->Size();
2432 RelocIterator it(code);
2433 int delta = 0;
2434 const byte* prev_pc = code->instruction_start();
2435 while (!it.done()) {
2436 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2437 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2438 CollectCommentStatistics(isolate, &it);
2439 prev_pc = it.rinfo()->pc();
2440 }
2441 it.next();
2442 }
2443
2444 ASSERT(code->instruction_start() <= prev_pc &&
2445 prev_pc <= code->instruction_end());
2446 delta += static_cast<int>(code->instruction_end() - prev_pc);
2447 EnterComment(isolate, "NoComment", delta);
2448 }
2449 }
2450 }
2451
2452
ReportStatistics()2453 void PagedSpace::ReportStatistics() {
2454 int pct = static_cast<int>(Available() * 100 / Capacity());
2455 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2456 ", waste: %" V8_PTR_PREFIX "d"
2457 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2458 Capacity(), Waste(), Available(), pct);
2459
2460 if (was_swept_conservatively_) return;
2461 ClearHistograms();
2462 HeapObjectIterator obj_it(this);
2463 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2464 CollectHistogramInfo(obj);
2465 ReportHistogram(true);
2466 }
2467 #endif
2468
2469 // -----------------------------------------------------------------------------
2470 // FixedSpace implementation
2471
PrepareForMarkCompact()2472 void FixedSpace::PrepareForMarkCompact() {
2473 // Call prepare of the super class.
2474 PagedSpace::PrepareForMarkCompact();
2475
2476 // During a non-compacting collection, everything below the linear
2477 // allocation pointer except wasted top-of-page blocks is considered
2478 // allocated and we will rediscover available bytes during the
2479 // collection.
2480 accounting_stats_.AllocateBytes(free_list_.available());
2481
2482 // Clear the free list before a full GC---it will be rebuilt afterward.
2483 free_list_.Reset();
2484 }
2485
2486
2487 // -----------------------------------------------------------------------------
2488 // MapSpace implementation
2489
2490 #ifdef DEBUG
VerifyObject(HeapObject * object)2491 void MapSpace::VerifyObject(HeapObject* object) {
2492 // The object should be a map or a free-list node.
2493 ASSERT(object->IsMap() || object->IsFreeSpace());
2494 }
2495 #endif
2496
2497
2498 // -----------------------------------------------------------------------------
2499 // GlobalPropertyCellSpace implementation
2500
2501 #ifdef DEBUG
VerifyObject(HeapObject * object)2502 void CellSpace::VerifyObject(HeapObject* object) {
2503 // The object should be a global object property cell or a free-list node.
2504 ASSERT(object->IsJSGlobalPropertyCell() ||
2505 object->map() == heap()->two_pointer_filler_map());
2506 }
2507 #endif
2508
2509
2510 // -----------------------------------------------------------------------------
2511 // LargeObjectIterator
2512
LargeObjectIterator(LargeObjectSpace * space)2513 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2514 current_ = space->first_page_;
2515 size_func_ = NULL;
2516 }
2517
2518
LargeObjectIterator(LargeObjectSpace * space,HeapObjectCallback size_func)2519 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2520 HeapObjectCallback size_func) {
2521 current_ = space->first_page_;
2522 size_func_ = size_func;
2523 }
2524
2525
Next()2526 HeapObject* LargeObjectIterator::Next() {
2527 if (current_ == NULL) return NULL;
2528
2529 HeapObject* object = current_->GetObject();
2530 current_ = current_->next_page();
2531 return object;
2532 }
2533
2534
2535 // -----------------------------------------------------------------------------
2536 // LargeObjectSpace
ComparePointers(void * key1,void * key2)2537 static bool ComparePointers(void* key1, void* key2) {
2538 return key1 == key2;
2539 }
2540
2541
LargeObjectSpace(Heap * heap,intptr_t max_capacity,AllocationSpace id)2542 LargeObjectSpace::LargeObjectSpace(Heap* heap,
2543 intptr_t max_capacity,
2544 AllocationSpace id)
2545 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2546 max_capacity_(max_capacity),
2547 first_page_(NULL),
2548 size_(0),
2549 page_count_(0),
2550 objects_size_(0),
2551 chunk_map_(ComparePointers, 1024) {}
2552
2553
SetUp()2554 bool LargeObjectSpace::SetUp() {
2555 first_page_ = NULL;
2556 size_ = 0;
2557 page_count_ = 0;
2558 objects_size_ = 0;
2559 chunk_map_.Clear();
2560 return true;
2561 }
2562
2563
TearDown()2564 void LargeObjectSpace::TearDown() {
2565 while (first_page_ != NULL) {
2566 LargePage* page = first_page_;
2567 first_page_ = first_page_->next_page();
2568 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2569
2570 ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2571 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2572 space, kAllocationActionFree, page->size());
2573 heap()->isolate()->memory_allocator()->Free(page);
2574 }
2575 SetUp();
2576 }
2577
2578
AllocateRaw(int object_size,Executability executable)2579 MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
2580 Executability executable) {
2581 // Check if we want to force a GC before growing the old space further.
2582 // If so, fail the allocation.
2583 if (!heap()->always_allocate() &&
2584 heap()->OldGenerationAllocationLimitReached()) {
2585 return Failure::RetryAfterGC(identity());
2586 }
2587
2588 if (Size() + object_size > max_capacity_) {
2589 return Failure::RetryAfterGC(identity());
2590 }
2591
2592 LargePage* page = heap()->isolate()->memory_allocator()->
2593 AllocateLargePage(object_size, executable, this);
2594 if (page == NULL) return Failure::RetryAfterGC(identity());
2595 ASSERT(page->area_size() >= object_size);
2596
2597 size_ += static_cast<int>(page->size());
2598 objects_size_ += object_size;
2599 page_count_++;
2600 page->set_next_page(first_page_);
2601 first_page_ = page;
2602
2603 // Register all MemoryChunk::kAlignment-aligned chunks covered by
2604 // this large page in the chunk map.
2605 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2606 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2607 for (uintptr_t key = base; key <= limit; key++) {
2608 HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2609 static_cast<uint32_t>(key),
2610 true);
2611 ASSERT(entry != NULL);
2612 entry->value = page;
2613 }
2614
2615 HeapObject* object = page->GetObject();
2616
2617 #ifdef DEBUG
2618 // Make the object consistent so the heap can be vefified in OldSpaceStep.
2619 reinterpret_cast<Object**>(object->address())[0] =
2620 heap()->fixed_array_map();
2621 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2622 #endif
2623
2624 heap()->incremental_marking()->OldSpaceStep(object_size);
2625 return object;
2626 }
2627
2628
2629 // GC support
FindObject(Address a)2630 MaybeObject* LargeObjectSpace::FindObject(Address a) {
2631 LargePage* page = FindPage(a);
2632 if (page != NULL) {
2633 return page->GetObject();
2634 }
2635 return Failure::Exception();
2636 }
2637
2638
FindPage(Address a)2639 LargePage* LargeObjectSpace::FindPage(Address a) {
2640 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
2641 HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2642 static_cast<uint32_t>(key),
2643 false);
2644 if (e != NULL) {
2645 ASSERT(e->value != NULL);
2646 LargePage* page = reinterpret_cast<LargePage*>(e->value);
2647 ASSERT(page->is_valid());
2648 if (page->Contains(a)) {
2649 return page;
2650 }
2651 }
2652 return NULL;
2653 }
2654
2655
FreeUnmarkedObjects()2656 void LargeObjectSpace::FreeUnmarkedObjects() {
2657 LargePage* previous = NULL;
2658 LargePage* current = first_page_;
2659 while (current != NULL) {
2660 HeapObject* object = current->GetObject();
2661 // Can this large page contain pointers to non-trivial objects. No other
2662 // pointer object is this big.
2663 bool is_pointer_object = object->IsFixedArray();
2664 MarkBit mark_bit = Marking::MarkBitFrom(object);
2665 if (mark_bit.Get()) {
2666 mark_bit.Clear();
2667 MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
2668 previous = current;
2669 current = current->next_page();
2670 } else {
2671 LargePage* page = current;
2672 // Cut the chunk out from the chunk list.
2673 current = current->next_page();
2674 if (previous == NULL) {
2675 first_page_ = current;
2676 } else {
2677 previous->set_next_page(current);
2678 }
2679
2680 // Free the chunk.
2681 heap()->mark_compact_collector()->ReportDeleteIfNeeded(
2682 object, heap()->isolate());
2683 size_ -= static_cast<int>(page->size());
2684 objects_size_ -= object->Size();
2685 page_count_--;
2686
2687 // Remove entries belonging to this page.
2688 // Use variable alignment to help pass length check (<= 80 characters)
2689 // of single line in tools/presubmit.py.
2690 const intptr_t alignment = MemoryChunk::kAlignment;
2691 uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
2692 uintptr_t limit = base + (page->size()-1)/alignment;
2693 for (uintptr_t key = base; key <= limit; key++) {
2694 chunk_map_.Remove(reinterpret_cast<void*>(key),
2695 static_cast<uint32_t>(key));
2696 }
2697
2698 if (is_pointer_object) {
2699 heap()->QueueMemoryChunkForFree(page);
2700 } else {
2701 heap()->isolate()->memory_allocator()->Free(page);
2702 }
2703 }
2704 }
2705 heap()->FreeQueuedChunks();
2706 }
2707
2708
Contains(HeapObject * object)2709 bool LargeObjectSpace::Contains(HeapObject* object) {
2710 Address address = object->address();
2711 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
2712
2713 bool owned = (chunk->owner() == this);
2714
2715 SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
2716
2717 return owned;
2718 }
2719
2720
2721 #ifdef DEBUG
2722 // We do not assume that the large object iterator works, because it depends
2723 // on the invariants we are checking during verification.
Verify()2724 void LargeObjectSpace::Verify() {
2725 for (LargePage* chunk = first_page_;
2726 chunk != NULL;
2727 chunk = chunk->next_page()) {
2728 // Each chunk contains an object that starts at the large object page's
2729 // object area start.
2730 HeapObject* object = chunk->GetObject();
2731 Page* page = Page::FromAddress(object->address());
2732 ASSERT(object->address() == page->area_start());
2733
2734 // The first word should be a map, and we expect all map pointers to be
2735 // in map space.
2736 Map* map = object->map();
2737 ASSERT(map->IsMap());
2738 ASSERT(heap()->map_space()->Contains(map));
2739
2740 // We have only code, sequential strings, external strings
2741 // (sequential strings that have been morphed into external
2742 // strings), fixed arrays, and byte arrays in large object space.
2743 ASSERT(object->IsCode() || object->IsSeqString() ||
2744 object->IsExternalString() || object->IsFixedArray() ||
2745 object->IsFixedDoubleArray() || object->IsByteArray());
2746
2747 // The object itself should look OK.
2748 object->Verify();
2749
2750 // Byte arrays and strings don't have interior pointers.
2751 if (object->IsCode()) {
2752 VerifyPointersVisitor code_visitor;
2753 object->IterateBody(map->instance_type(),
2754 object->Size(),
2755 &code_visitor);
2756 } else if (object->IsFixedArray()) {
2757 FixedArray* array = FixedArray::cast(object);
2758 for (int j = 0; j < array->length(); j++) {
2759 Object* element = array->get(j);
2760 if (element->IsHeapObject()) {
2761 HeapObject* element_object = HeapObject::cast(element);
2762 ASSERT(heap()->Contains(element_object));
2763 ASSERT(element_object->map()->IsMap());
2764 }
2765 }
2766 }
2767 }
2768 }
2769
2770
Print()2771 void LargeObjectSpace::Print() {
2772 LargeObjectIterator it(this);
2773 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
2774 obj->Print();
2775 }
2776 }
2777
2778
ReportStatistics()2779 void LargeObjectSpace::ReportStatistics() {
2780 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
2781 int num_objects = 0;
2782 ClearHistograms();
2783 LargeObjectIterator it(this);
2784 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
2785 num_objects++;
2786 CollectHistogramInfo(obj);
2787 }
2788
2789 PrintF(" number of objects %d, "
2790 "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
2791 if (num_objects > 0) ReportHistogram(false);
2792 }
2793
2794
CollectCodeStatistics()2795 void LargeObjectSpace::CollectCodeStatistics() {
2796 Isolate* isolate = heap()->isolate();
2797 LargeObjectIterator obj_it(this);
2798 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2799 if (obj->IsCode()) {
2800 Code* code = Code::cast(obj);
2801 isolate->code_kind_statistics()[code->kind()] += code->Size();
2802 }
2803 }
2804 }
2805
2806
Print()2807 void Page::Print() {
2808 // Make a best-effort to print the objects in the page.
2809 PrintF("Page@%p in %s\n",
2810 this->address(),
2811 AllocationSpaceName(this->owner()->identity()));
2812 printf(" --------------------------------------\n");
2813 HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
2814 unsigned mark_size = 0;
2815 for (HeapObject* object = objects.Next();
2816 object != NULL;
2817 object = objects.Next()) {
2818 bool is_marked = Marking::MarkBitFrom(object).Get();
2819 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
2820 if (is_marked) {
2821 mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
2822 }
2823 object->ShortPrint();
2824 PrintF("\n");
2825 }
2826 printf(" --------------------------------------\n");
2827 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
2828 }
2829
2830 #endif // DEBUG
2831
2832 } } // namespace v8::internal
2833