1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #include "src/full-codegen.h"
8 #include "src/macro-assembler.h"
9 #include "src/mark-compact.h"
10 #include "src/msan.h"
11 #include "src/platform.h"
12
13 namespace v8 {
14 namespace internal {
15
16
17 // ----------------------------------------------------------------------------
18 // HeapObjectIterator
19
HeapObjectIterator(PagedSpace * space)20 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
21 // You can't actually iterate over the anchor page. It is not a real page,
22 // just an anchor for the double linked page list. Initialize as if we have
23 // reached the end of the anchor page, then the first iteration will move on
24 // to the first page.
25 Initialize(space,
26 NULL,
27 NULL,
28 kAllPagesInSpace,
29 NULL);
30 }
31
32
HeapObjectIterator(PagedSpace * space,HeapObjectCallback size_func)33 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
34 HeapObjectCallback size_func) {
35 // You can't actually iterate over the anchor page. It is not a real page,
36 // just an anchor for the double linked page list. Initialize the current
37 // address and end as NULL, then the first iteration will move on
38 // to the first page.
39 Initialize(space,
40 NULL,
41 NULL,
42 kAllPagesInSpace,
43 size_func);
44 }
45
46
HeapObjectIterator(Page * page,HeapObjectCallback size_func)47 HeapObjectIterator::HeapObjectIterator(Page* page,
48 HeapObjectCallback size_func) {
49 Space* owner = page->owner();
50 ASSERT(owner == page->heap()->old_pointer_space() ||
51 owner == page->heap()->old_data_space() ||
52 owner == page->heap()->map_space() ||
53 owner == page->heap()->cell_space() ||
54 owner == page->heap()->property_cell_space() ||
55 owner == page->heap()->code_space());
56 Initialize(reinterpret_cast<PagedSpace*>(owner),
57 page->area_start(),
58 page->area_end(),
59 kOnePageOnly,
60 size_func);
61 ASSERT(page->WasSweptPrecisely());
62 }
63
64
Initialize(PagedSpace * space,Address cur,Address end,HeapObjectIterator::PageMode mode,HeapObjectCallback size_f)65 void HeapObjectIterator::Initialize(PagedSpace* space,
66 Address cur, Address end,
67 HeapObjectIterator::PageMode mode,
68 HeapObjectCallback size_f) {
69 // Check that we actually can iterate this space.
70 ASSERT(!space->was_swept_conservatively());
71
72 space_ = space;
73 cur_addr_ = cur;
74 cur_end_ = end;
75 page_mode_ = mode;
76 size_func_ = size_f;
77 }
78
79
80 // We have hit the end of the page and should advance to the next block of
81 // objects. This happens at the end of the page.
AdvanceToNextPage()82 bool HeapObjectIterator::AdvanceToNextPage() {
83 ASSERT(cur_addr_ == cur_end_);
84 if (page_mode_ == kOnePageOnly) return false;
85 Page* cur_page;
86 if (cur_addr_ == NULL) {
87 cur_page = space_->anchor();
88 } else {
89 cur_page = Page::FromAddress(cur_addr_ - 1);
90 ASSERT(cur_addr_ == cur_page->area_end());
91 }
92 cur_page = cur_page->next_page();
93 if (cur_page == space_->anchor()) return false;
94 cur_addr_ = cur_page->area_start();
95 cur_end_ = cur_page->area_end();
96 ASSERT(cur_page->WasSweptPrecisely());
97 return true;
98 }
99
100
101 // -----------------------------------------------------------------------------
102 // CodeRange
103
104
CodeRange(Isolate * isolate)105 CodeRange::CodeRange(Isolate* isolate)
106 : isolate_(isolate),
107 code_range_(NULL),
108 free_list_(0),
109 allocation_list_(0),
110 current_allocation_block_index_(0) {
111 }
112
113
SetUp(size_t requested)114 bool CodeRange::SetUp(size_t requested) {
115 ASSERT(code_range_ == NULL);
116
117 if (requested == 0) {
118 // When a target requires the code range feature, we put all code objects
119 // in a kMaximalCodeRangeSize range of virtual address space, so that
120 // they can call each other with near calls.
121 if (kRequiresCodeRange) {
122 requested = kMaximalCodeRangeSize;
123 } else {
124 return true;
125 }
126 }
127
128 ASSERT(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
129 code_range_ = new VirtualMemory(requested);
130 CHECK(code_range_ != NULL);
131 if (!code_range_->IsReserved()) {
132 delete code_range_;
133 code_range_ = NULL;
134 return false;
135 }
136
137 // We are sure that we have mapped a block of requested addresses.
138 ASSERT(code_range_->size() == requested);
139 LOG(isolate_,
140 NewEvent("CodeRange", code_range_->address(), requested));
141 Address base = reinterpret_cast<Address>(code_range_->address());
142 Address aligned_base =
143 RoundUp(reinterpret_cast<Address>(code_range_->address()),
144 MemoryChunk::kAlignment);
145 size_t size = code_range_->size() - (aligned_base - base);
146 allocation_list_.Add(FreeBlock(aligned_base, size));
147 current_allocation_block_index_ = 0;
148 return true;
149 }
150
151
CompareFreeBlockAddress(const FreeBlock * left,const FreeBlock * right)152 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
153 const FreeBlock* right) {
154 // The entire point of CodeRange is that the difference between two
155 // addresses in the range can be represented as a signed 32-bit int,
156 // so the cast is semantically correct.
157 return static_cast<int>(left->start - right->start);
158 }
159
160
GetNextAllocationBlock(size_t requested)161 bool CodeRange::GetNextAllocationBlock(size_t requested) {
162 for (current_allocation_block_index_++;
163 current_allocation_block_index_ < allocation_list_.length();
164 current_allocation_block_index_++) {
165 if (requested <= allocation_list_[current_allocation_block_index_].size) {
166 return true; // Found a large enough allocation block.
167 }
168 }
169
170 // Sort and merge the free blocks on the free list and the allocation list.
171 free_list_.AddAll(allocation_list_);
172 allocation_list_.Clear();
173 free_list_.Sort(&CompareFreeBlockAddress);
174 for (int i = 0; i < free_list_.length();) {
175 FreeBlock merged = free_list_[i];
176 i++;
177 // Add adjacent free blocks to the current merged block.
178 while (i < free_list_.length() &&
179 free_list_[i].start == merged.start + merged.size) {
180 merged.size += free_list_[i].size;
181 i++;
182 }
183 if (merged.size > 0) {
184 allocation_list_.Add(merged);
185 }
186 }
187 free_list_.Clear();
188
189 for (current_allocation_block_index_ = 0;
190 current_allocation_block_index_ < allocation_list_.length();
191 current_allocation_block_index_++) {
192 if (requested <= allocation_list_[current_allocation_block_index_].size) {
193 return true; // Found a large enough allocation block.
194 }
195 }
196 current_allocation_block_index_ = 0;
197 // Code range is full or too fragmented.
198 return false;
199 }
200
201
AllocateRawMemory(const size_t requested_size,const size_t commit_size,size_t * allocated)202 Address CodeRange::AllocateRawMemory(const size_t requested_size,
203 const size_t commit_size,
204 size_t* allocated) {
205 ASSERT(commit_size <= requested_size);
206 ASSERT(current_allocation_block_index_ < allocation_list_.length());
207 if (requested_size > allocation_list_[current_allocation_block_index_].size) {
208 // Find an allocation block large enough.
209 if (!GetNextAllocationBlock(requested_size)) return NULL;
210 }
211 // Commit the requested memory at the start of the current allocation block.
212 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
213 FreeBlock current = allocation_list_[current_allocation_block_index_];
214 if (aligned_requested >= (current.size - Page::kPageSize)) {
215 // Don't leave a small free block, useless for a large object or chunk.
216 *allocated = current.size;
217 } else {
218 *allocated = aligned_requested;
219 }
220 ASSERT(*allocated <= current.size);
221 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
222 if (!isolate_->memory_allocator()->CommitExecutableMemory(code_range_,
223 current.start,
224 commit_size,
225 *allocated)) {
226 *allocated = 0;
227 return NULL;
228 }
229 allocation_list_[current_allocation_block_index_].start += *allocated;
230 allocation_list_[current_allocation_block_index_].size -= *allocated;
231 if (*allocated == current.size) {
232 // This block is used up, get the next one.
233 if (!GetNextAllocationBlock(0)) return NULL;
234 }
235 return current.start;
236 }
237
238
CommitRawMemory(Address start,size_t length)239 bool CodeRange::CommitRawMemory(Address start, size_t length) {
240 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
241 }
242
243
UncommitRawMemory(Address start,size_t length)244 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
245 return code_range_->Uncommit(start, length);
246 }
247
248
FreeRawMemory(Address address,size_t length)249 void CodeRange::FreeRawMemory(Address address, size_t length) {
250 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
251 free_list_.Add(FreeBlock(address, length));
252 code_range_->Uncommit(address, length);
253 }
254
255
TearDown()256 void CodeRange::TearDown() {
257 delete code_range_; // Frees all memory in the virtual memory range.
258 code_range_ = NULL;
259 free_list_.Free();
260 allocation_list_.Free();
261 }
262
263
264 // -----------------------------------------------------------------------------
265 // MemoryAllocator
266 //
267
MemoryAllocator(Isolate * isolate)268 MemoryAllocator::MemoryAllocator(Isolate* isolate)
269 : isolate_(isolate),
270 capacity_(0),
271 capacity_executable_(0),
272 size_(0),
273 size_executable_(0),
274 lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
275 highest_ever_allocated_(reinterpret_cast<void*>(0)) {
276 }
277
278
SetUp(intptr_t capacity,intptr_t capacity_executable)279 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
280 capacity_ = RoundUp(capacity, Page::kPageSize);
281 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
282 ASSERT_GE(capacity_, capacity_executable_);
283
284 size_ = 0;
285 size_executable_ = 0;
286
287 return true;
288 }
289
290
TearDown()291 void MemoryAllocator::TearDown() {
292 // Check that spaces were torn down before MemoryAllocator.
293 ASSERT(size_ == 0);
294 // TODO(gc) this will be true again when we fix FreeMemory.
295 // ASSERT(size_executable_ == 0);
296 capacity_ = 0;
297 capacity_executable_ = 0;
298 }
299
300
CommitMemory(Address base,size_t size,Executability executable)301 bool MemoryAllocator::CommitMemory(Address base,
302 size_t size,
303 Executability executable) {
304 if (!VirtualMemory::CommitRegion(base, size, executable == EXECUTABLE)) {
305 return false;
306 }
307 UpdateAllocatedSpaceLimits(base, base + size);
308 return true;
309 }
310
311
FreeMemory(VirtualMemory * reservation,Executability executable)312 void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
313 Executability executable) {
314 // TODO(gc) make code_range part of memory allocator?
315 ASSERT(reservation->IsReserved());
316 size_t size = reservation->size();
317 ASSERT(size_ >= size);
318 size_ -= size;
319
320 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
321
322 if (executable == EXECUTABLE) {
323 ASSERT(size_executable_ >= size);
324 size_executable_ -= size;
325 }
326 // Code which is part of the code-range does not have its own VirtualMemory.
327 ASSERT(isolate_->code_range() == NULL ||
328 !isolate_->code_range()->contains(
329 static_cast<Address>(reservation->address())));
330 ASSERT(executable == NOT_EXECUTABLE ||
331 isolate_->code_range() == NULL ||
332 !isolate_->code_range()->valid());
333 reservation->Release();
334 }
335
336
FreeMemory(Address base,size_t size,Executability executable)337 void MemoryAllocator::FreeMemory(Address base,
338 size_t size,
339 Executability executable) {
340 // TODO(gc) make code_range part of memory allocator?
341 ASSERT(size_ >= size);
342 size_ -= size;
343
344 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
345
346 if (executable == EXECUTABLE) {
347 ASSERT(size_executable_ >= size);
348 size_executable_ -= size;
349 }
350 if (isolate_->code_range() != NULL &&
351 isolate_->code_range()->contains(static_cast<Address>(base))) {
352 ASSERT(executable == EXECUTABLE);
353 isolate_->code_range()->FreeRawMemory(base, size);
354 } else {
355 ASSERT(executable == NOT_EXECUTABLE ||
356 isolate_->code_range() == NULL ||
357 !isolate_->code_range()->valid());
358 bool result = VirtualMemory::ReleaseRegion(base, size);
359 USE(result);
360 ASSERT(result);
361 }
362 }
363
364
ReserveAlignedMemory(size_t size,size_t alignment,VirtualMemory * controller)365 Address MemoryAllocator::ReserveAlignedMemory(size_t size,
366 size_t alignment,
367 VirtualMemory* controller) {
368 VirtualMemory reservation(size, alignment);
369
370 if (!reservation.IsReserved()) return NULL;
371 size_ += reservation.size();
372 Address base = RoundUp(static_cast<Address>(reservation.address()),
373 alignment);
374 controller->TakeControl(&reservation);
375 return base;
376 }
377
378
AllocateAlignedMemory(size_t reserve_size,size_t commit_size,size_t alignment,Executability executable,VirtualMemory * controller)379 Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
380 size_t commit_size,
381 size_t alignment,
382 Executability executable,
383 VirtualMemory* controller) {
384 ASSERT(commit_size <= reserve_size);
385 VirtualMemory reservation;
386 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
387 if (base == NULL) return NULL;
388
389 if (executable == EXECUTABLE) {
390 if (!CommitExecutableMemory(&reservation,
391 base,
392 commit_size,
393 reserve_size)) {
394 base = NULL;
395 }
396 } else {
397 if (reservation.Commit(base, commit_size, false)) {
398 UpdateAllocatedSpaceLimits(base, base + commit_size);
399 } else {
400 base = NULL;
401 }
402 }
403
404 if (base == NULL) {
405 // Failed to commit the body. Release the mapping and any partially
406 // commited regions inside it.
407 reservation.Release();
408 return NULL;
409 }
410
411 controller->TakeControl(&reservation);
412 return base;
413 }
414
415
InitializeAsAnchor(PagedSpace * owner)416 void Page::InitializeAsAnchor(PagedSpace* owner) {
417 set_owner(owner);
418 set_prev_page(this);
419 set_next_page(this);
420 }
421
422
Initialize(Heap * heap,Address start,SemiSpace * semi_space)423 NewSpacePage* NewSpacePage::Initialize(Heap* heap,
424 Address start,
425 SemiSpace* semi_space) {
426 Address area_start = start + NewSpacePage::kObjectStartOffset;
427 Address area_end = start + Page::kPageSize;
428
429 MemoryChunk* chunk = MemoryChunk::Initialize(heap,
430 start,
431 Page::kPageSize,
432 area_start,
433 area_end,
434 NOT_EXECUTABLE,
435 semi_space);
436 chunk->set_next_chunk(NULL);
437 chunk->set_prev_chunk(NULL);
438 chunk->initialize_scan_on_scavenge(true);
439 bool in_to_space = (semi_space->id() != kFromSpace);
440 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
441 : MemoryChunk::IN_FROM_SPACE);
442 ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
443 : MemoryChunk::IN_TO_SPACE));
444 NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
445 heap->incremental_marking()->SetNewSpacePageFlags(page);
446 return page;
447 }
448
449
InitializeAsAnchor(SemiSpace * semi_space)450 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
451 set_owner(semi_space);
452 set_next_chunk(this);
453 set_prev_chunk(this);
454 // Flags marks this invalid page as not being in new-space.
455 // All real new-space pages will be in new-space.
456 SetFlags(0, ~0);
457 }
458
459
Initialize(Heap * heap,Address base,size_t size,Address area_start,Address area_end,Executability executable,Space * owner)460 MemoryChunk* MemoryChunk::Initialize(Heap* heap,
461 Address base,
462 size_t size,
463 Address area_start,
464 Address area_end,
465 Executability executable,
466 Space* owner) {
467 MemoryChunk* chunk = FromAddress(base);
468
469 ASSERT(base == chunk->address());
470
471 chunk->heap_ = heap;
472 chunk->size_ = size;
473 chunk->area_start_ = area_start;
474 chunk->area_end_ = area_end;
475 chunk->flags_ = 0;
476 chunk->set_owner(owner);
477 chunk->InitializeReservedMemory();
478 chunk->slots_buffer_ = NULL;
479 chunk->skip_list_ = NULL;
480 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
481 chunk->progress_bar_ = 0;
482 chunk->high_water_mark_ = static_cast<int>(area_start - base);
483 chunk->set_parallel_sweeping(PARALLEL_SWEEPING_DONE);
484 chunk->available_in_small_free_list_ = 0;
485 chunk->available_in_medium_free_list_ = 0;
486 chunk->available_in_large_free_list_ = 0;
487 chunk->available_in_huge_free_list_ = 0;
488 chunk->non_available_small_blocks_ = 0;
489 chunk->ResetLiveBytes();
490 Bitmap::Clear(chunk);
491 chunk->initialize_scan_on_scavenge(false);
492 chunk->SetFlag(WAS_SWEPT_PRECISELY);
493
494 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
495 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
496
497 if (executable == EXECUTABLE) {
498 chunk->SetFlag(IS_EXECUTABLE);
499 }
500
501 if (owner == heap->old_data_space()) {
502 chunk->SetFlag(CONTAINS_ONLY_DATA);
503 }
504
505 return chunk;
506 }
507
508
509 // Commit MemoryChunk area to the requested size.
CommitArea(size_t requested)510 bool MemoryChunk::CommitArea(size_t requested) {
511 size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
512 MemoryAllocator::CodePageGuardSize() : 0;
513 size_t header_size = area_start() - address() - guard_size;
514 size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
515 size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
516 OS::CommitPageSize());
517
518 if (commit_size > committed_size) {
519 // Commit size should be less or equal than the reserved size.
520 ASSERT(commit_size <= size() - 2 * guard_size);
521 // Append the committed area.
522 Address start = address() + committed_size + guard_size;
523 size_t length = commit_size - committed_size;
524 if (reservation_.IsReserved()) {
525 Executability executable = IsFlagSet(IS_EXECUTABLE)
526 ? EXECUTABLE : NOT_EXECUTABLE;
527 if (!heap()->isolate()->memory_allocator()->CommitMemory(
528 start, length, executable)) {
529 return false;
530 }
531 } else {
532 CodeRange* code_range = heap_->isolate()->code_range();
533 ASSERT(code_range != NULL && code_range->valid() &&
534 IsFlagSet(IS_EXECUTABLE));
535 if (!code_range->CommitRawMemory(start, length)) return false;
536 }
537
538 if (Heap::ShouldZapGarbage()) {
539 heap_->isolate()->memory_allocator()->ZapBlock(start, length);
540 }
541 } else if (commit_size < committed_size) {
542 ASSERT(commit_size > 0);
543 // Shrink the committed area.
544 size_t length = committed_size - commit_size;
545 Address start = address() + committed_size + guard_size - length;
546 if (reservation_.IsReserved()) {
547 if (!reservation_.Uncommit(start, length)) return false;
548 } else {
549 CodeRange* code_range = heap_->isolate()->code_range();
550 ASSERT(code_range != NULL && code_range->valid() &&
551 IsFlagSet(IS_EXECUTABLE));
552 if (!code_range->UncommitRawMemory(start, length)) return false;
553 }
554 }
555
556 area_end_ = area_start_ + requested;
557 return true;
558 }
559
560
InsertAfter(MemoryChunk * other)561 void MemoryChunk::InsertAfter(MemoryChunk* other) {
562 MemoryChunk* other_next = other->next_chunk();
563
564 set_next_chunk(other_next);
565 set_prev_chunk(other);
566 other_next->set_prev_chunk(this);
567 other->set_next_chunk(this);
568 }
569
570
Unlink()571 void MemoryChunk::Unlink() {
572 MemoryChunk* next_element = next_chunk();
573 MemoryChunk* prev_element = prev_chunk();
574 next_element->set_prev_chunk(prev_element);
575 prev_element->set_next_chunk(next_element);
576 set_prev_chunk(NULL);
577 set_next_chunk(NULL);
578 }
579
580
AllocateChunk(intptr_t reserve_area_size,intptr_t commit_area_size,Executability executable,Space * owner)581 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
582 intptr_t commit_area_size,
583 Executability executable,
584 Space* owner) {
585 ASSERT(commit_area_size <= reserve_area_size);
586
587 size_t chunk_size;
588 Heap* heap = isolate_->heap();
589 Address base = NULL;
590 VirtualMemory reservation;
591 Address area_start = NULL;
592 Address area_end = NULL;
593
594 //
595 // MemoryChunk layout:
596 //
597 // Executable
598 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
599 // | Header |
600 // +----------------------------+<- base + CodePageGuardStartOffset
601 // | Guard |
602 // +----------------------------+<- area_start_
603 // | Area |
604 // +----------------------------+<- area_end_ (area_start + commit_area_size)
605 // | Committed but not used |
606 // +----------------------------+<- aligned at OS page boundary
607 // | Reserved but not committed |
608 // +----------------------------+<- aligned at OS page boundary
609 // | Guard |
610 // +----------------------------+<- base + chunk_size
611 //
612 // Non-executable
613 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
614 // | Header |
615 // +----------------------------+<- area_start_ (base + kObjectStartOffset)
616 // | Area |
617 // +----------------------------+<- area_end_ (area_start + commit_area_size)
618 // | Committed but not used |
619 // +----------------------------+<- aligned at OS page boundary
620 // | Reserved but not committed |
621 // +----------------------------+<- base + chunk_size
622 //
623
624 if (executable == EXECUTABLE) {
625 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
626 OS::CommitPageSize()) + CodePageGuardSize();
627
628 // Check executable memory limit.
629 if (size_executable_ + chunk_size > capacity_executable_) {
630 LOG(isolate_,
631 StringEvent("MemoryAllocator::AllocateRawMemory",
632 "V8 Executable Allocation capacity exceeded"));
633 return NULL;
634 }
635
636 // Size of header (not executable) plus area (executable).
637 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
638 OS::CommitPageSize());
639 // Allocate executable memory either from code range or from the
640 // OS.
641 if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
642 base = isolate_->code_range()->AllocateRawMemory(chunk_size,
643 commit_size,
644 &chunk_size);
645 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
646 MemoryChunk::kAlignment));
647 if (base == NULL) return NULL;
648 size_ += chunk_size;
649 // Update executable memory size.
650 size_executable_ += chunk_size;
651 } else {
652 base = AllocateAlignedMemory(chunk_size,
653 commit_size,
654 MemoryChunk::kAlignment,
655 executable,
656 &reservation);
657 if (base == NULL) return NULL;
658 // Update executable memory size.
659 size_executable_ += reservation.size();
660 }
661
662 if (Heap::ShouldZapGarbage()) {
663 ZapBlock(base, CodePageGuardStartOffset());
664 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
665 }
666
667 area_start = base + CodePageAreaStartOffset();
668 area_end = area_start + commit_area_size;
669 } else {
670 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
671 OS::CommitPageSize());
672 size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
673 commit_area_size, OS::CommitPageSize());
674 base = AllocateAlignedMemory(chunk_size,
675 commit_size,
676 MemoryChunk::kAlignment,
677 executable,
678 &reservation);
679
680 if (base == NULL) return NULL;
681
682 if (Heap::ShouldZapGarbage()) {
683 ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
684 }
685
686 area_start = base + Page::kObjectStartOffset;
687 area_end = area_start + commit_area_size;
688 }
689
690 // Use chunk_size for statistics and callbacks because we assume that they
691 // treat reserved but not-yet committed memory regions of chunks as allocated.
692 isolate_->counters()->memory_allocated()->
693 Increment(static_cast<int>(chunk_size));
694
695 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
696 if (owner != NULL) {
697 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
698 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
699 }
700
701 MemoryChunk* result = MemoryChunk::Initialize(heap,
702 base,
703 chunk_size,
704 area_start,
705 area_end,
706 executable,
707 owner);
708 result->set_reserved_memory(&reservation);
709 MSAN_MEMORY_IS_INITIALIZED_IN_JIT(base, chunk_size);
710 return result;
711 }
712
713
ResetFreeListStatistics()714 void Page::ResetFreeListStatistics() {
715 non_available_small_blocks_ = 0;
716 available_in_small_free_list_ = 0;
717 available_in_medium_free_list_ = 0;
718 available_in_large_free_list_ = 0;
719 available_in_huge_free_list_ = 0;
720 }
721
722
AllocatePage(intptr_t size,PagedSpace * owner,Executability executable)723 Page* MemoryAllocator::AllocatePage(intptr_t size,
724 PagedSpace* owner,
725 Executability executable) {
726 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
727
728 if (chunk == NULL) return NULL;
729
730 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
731 }
732
733
AllocateLargePage(intptr_t object_size,Space * owner,Executability executable)734 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
735 Space* owner,
736 Executability executable) {
737 MemoryChunk* chunk = AllocateChunk(object_size,
738 object_size,
739 executable,
740 owner);
741 if (chunk == NULL) return NULL;
742 return LargePage::Initialize(isolate_->heap(), chunk);
743 }
744
745
Free(MemoryChunk * chunk)746 void MemoryAllocator::Free(MemoryChunk* chunk) {
747 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
748 if (chunk->owner() != NULL) {
749 ObjectSpace space =
750 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
751 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
752 }
753
754 isolate_->heap()->RememberUnmappedPage(
755 reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
756
757 delete chunk->slots_buffer();
758 delete chunk->skip_list();
759
760 VirtualMemory* reservation = chunk->reserved_memory();
761 if (reservation->IsReserved()) {
762 FreeMemory(reservation, chunk->executable());
763 } else {
764 FreeMemory(chunk->address(),
765 chunk->size(),
766 chunk->executable());
767 }
768 }
769
770
CommitBlock(Address start,size_t size,Executability executable)771 bool MemoryAllocator::CommitBlock(Address start,
772 size_t size,
773 Executability executable) {
774 if (!CommitMemory(start, size, executable)) return false;
775
776 if (Heap::ShouldZapGarbage()) {
777 ZapBlock(start, size);
778 }
779
780 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
781 return true;
782 }
783
784
UncommitBlock(Address start,size_t size)785 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
786 if (!VirtualMemory::UncommitRegion(start, size)) return false;
787 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
788 return true;
789 }
790
791
ZapBlock(Address start,size_t size)792 void MemoryAllocator::ZapBlock(Address start, size_t size) {
793 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
794 Memory::Address_at(start + s) = kZapValue;
795 }
796 }
797
798
PerformAllocationCallback(ObjectSpace space,AllocationAction action,size_t size)799 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
800 AllocationAction action,
801 size_t size) {
802 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
803 MemoryAllocationCallbackRegistration registration =
804 memory_allocation_callbacks_[i];
805 if ((registration.space & space) == space &&
806 (registration.action & action) == action)
807 registration.callback(space, action, static_cast<int>(size));
808 }
809 }
810
811
MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback)812 bool MemoryAllocator::MemoryAllocationCallbackRegistered(
813 MemoryAllocationCallback callback) {
814 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
815 if (memory_allocation_callbacks_[i].callback == callback) return true;
816 }
817 return false;
818 }
819
820
AddMemoryAllocationCallback(MemoryAllocationCallback callback,ObjectSpace space,AllocationAction action)821 void MemoryAllocator::AddMemoryAllocationCallback(
822 MemoryAllocationCallback callback,
823 ObjectSpace space,
824 AllocationAction action) {
825 ASSERT(callback != NULL);
826 MemoryAllocationCallbackRegistration registration(callback, space, action);
827 ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
828 return memory_allocation_callbacks_.Add(registration);
829 }
830
831
RemoveMemoryAllocationCallback(MemoryAllocationCallback callback)832 void MemoryAllocator::RemoveMemoryAllocationCallback(
833 MemoryAllocationCallback callback) {
834 ASSERT(callback != NULL);
835 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
836 if (memory_allocation_callbacks_[i].callback == callback) {
837 memory_allocation_callbacks_.Remove(i);
838 return;
839 }
840 }
841 UNREACHABLE();
842 }
843
844
845 #ifdef DEBUG
ReportStatistics()846 void MemoryAllocator::ReportStatistics() {
847 float pct = static_cast<float>(capacity_ - size_) / capacity_;
848 PrintF(" capacity: %" V8_PTR_PREFIX "d"
849 ", used: %" V8_PTR_PREFIX "d"
850 ", available: %%%d\n\n",
851 capacity_, size_, static_cast<int>(pct*100));
852 }
853 #endif
854
855
CodePageGuardStartOffset()856 int MemoryAllocator::CodePageGuardStartOffset() {
857 // We are guarding code pages: the first OS page after the header
858 // will be protected as non-writable.
859 return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
860 }
861
862
CodePageGuardSize()863 int MemoryAllocator::CodePageGuardSize() {
864 return static_cast<int>(OS::CommitPageSize());
865 }
866
867
CodePageAreaStartOffset()868 int MemoryAllocator::CodePageAreaStartOffset() {
869 // We are guarding code pages: the first OS page after the header
870 // will be protected as non-writable.
871 return CodePageGuardStartOffset() + CodePageGuardSize();
872 }
873
874
CodePageAreaEndOffset()875 int MemoryAllocator::CodePageAreaEndOffset() {
876 // We are guarding code pages: the last OS page will be protected as
877 // non-writable.
878 return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
879 }
880
881
CommitExecutableMemory(VirtualMemory * vm,Address start,size_t commit_size,size_t reserved_size)882 bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
883 Address start,
884 size_t commit_size,
885 size_t reserved_size) {
886 // Commit page header (not executable).
887 if (!vm->Commit(start,
888 CodePageGuardStartOffset(),
889 false)) {
890 return false;
891 }
892
893 // Create guard page after the header.
894 if (!vm->Guard(start + CodePageGuardStartOffset())) {
895 return false;
896 }
897
898 // Commit page body (executable).
899 if (!vm->Commit(start + CodePageAreaStartOffset(),
900 commit_size - CodePageGuardStartOffset(),
901 true)) {
902 return false;
903 }
904
905 // Create guard page before the end.
906 if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
907 return false;
908 }
909
910 UpdateAllocatedSpaceLimits(start,
911 start + CodePageAreaStartOffset() +
912 commit_size - CodePageGuardStartOffset());
913 return true;
914 }
915
916
917 // -----------------------------------------------------------------------------
918 // MemoryChunk implementation
919
IncrementLiveBytesFromMutator(Address address,int by)920 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
921 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
922 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
923 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
924 }
925 chunk->IncrementLiveBytes(by);
926 }
927
928
929 // -----------------------------------------------------------------------------
930 // PagedSpace implementation
931
PagedSpace(Heap * heap,intptr_t max_capacity,AllocationSpace id,Executability executable)932 PagedSpace::PagedSpace(Heap* heap,
933 intptr_t max_capacity,
934 AllocationSpace id,
935 Executability executable)
936 : Space(heap, id, executable),
937 free_list_(this),
938 was_swept_conservatively_(false),
939 unswept_free_bytes_(0),
940 end_of_unswept_pages_(NULL) {
941 if (id == CODE_SPACE) {
942 area_size_ = heap->isolate()->memory_allocator()->
943 CodePageAreaSize();
944 } else {
945 area_size_ = Page::kPageSize - Page::kObjectStartOffset;
946 }
947 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
948 * AreaSize();
949 accounting_stats_.Clear();
950
951 allocation_info_.set_top(NULL);
952 allocation_info_.set_limit(NULL);
953
954 anchor_.InitializeAsAnchor(this);
955 }
956
957
SetUp()958 bool PagedSpace::SetUp() {
959 return true;
960 }
961
962
HasBeenSetUp()963 bool PagedSpace::HasBeenSetUp() {
964 return true;
965 }
966
967
TearDown()968 void PagedSpace::TearDown() {
969 PageIterator iterator(this);
970 while (iterator.has_next()) {
971 heap()->isolate()->memory_allocator()->Free(iterator.next());
972 }
973 anchor_.set_next_page(&anchor_);
974 anchor_.set_prev_page(&anchor_);
975 accounting_stats_.Clear();
976 }
977
978
CommittedPhysicalMemory()979 size_t PagedSpace::CommittedPhysicalMemory() {
980 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
981 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
982 size_t size = 0;
983 PageIterator it(this);
984 while (it.has_next()) {
985 size += it.next()->CommittedPhysicalMemory();
986 }
987 return size;
988 }
989
990
FindObject(Address addr)991 Object* PagedSpace::FindObject(Address addr) {
992 // Note: this function can only be called on precisely swept spaces.
993 ASSERT(!heap()->mark_compact_collector()->in_use());
994
995 if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found.
996
997 Page* p = Page::FromAddress(addr);
998 HeapObjectIterator it(p, NULL);
999 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1000 Address cur = obj->address();
1001 Address next = cur + obj->Size();
1002 if ((cur <= addr) && (addr < next)) return obj;
1003 }
1004
1005 UNREACHABLE();
1006 return Smi::FromInt(0);
1007 }
1008
1009
CanExpand()1010 bool PagedSpace::CanExpand() {
1011 ASSERT(max_capacity_ % AreaSize() == 0);
1012
1013 if (Capacity() == max_capacity_) return false;
1014
1015 ASSERT(Capacity() < max_capacity_);
1016
1017 // Are we going to exceed capacity for this space?
1018 if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
1019
1020 return true;
1021 }
1022
1023
Expand()1024 bool PagedSpace::Expand() {
1025 if (!CanExpand()) return false;
1026
1027 intptr_t size = AreaSize();
1028
1029 if (anchor_.next_page() == &anchor_) {
1030 size = SizeOfFirstPage();
1031 }
1032
1033 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(
1034 size, this, executable());
1035 if (p == NULL) return false;
1036
1037 ASSERT(Capacity() <= max_capacity_);
1038
1039 p->InsertAfter(anchor_.prev_page());
1040
1041 return true;
1042 }
1043
1044
SizeOfFirstPage()1045 intptr_t PagedSpace::SizeOfFirstPage() {
1046 int size = 0;
1047 switch (identity()) {
1048 case OLD_POINTER_SPACE:
1049 size = 96 * kPointerSize * KB;
1050 break;
1051 case OLD_DATA_SPACE:
1052 size = 192 * KB;
1053 break;
1054 case MAP_SPACE:
1055 size = 16 * kPointerSize * KB;
1056 break;
1057 case CELL_SPACE:
1058 size = 16 * kPointerSize * KB;
1059 break;
1060 case PROPERTY_CELL_SPACE:
1061 size = 8 * kPointerSize * KB;
1062 break;
1063 case CODE_SPACE: {
1064 CodeRange* code_range = heap()->isolate()->code_range();
1065 if (code_range != NULL && code_range->valid()) {
1066 // When code range exists, code pages are allocated in a special way
1067 // (from the reserved code range). That part of the code is not yet
1068 // upgraded to handle small pages.
1069 size = AreaSize();
1070 } else {
1071 size = RoundUp(
1072 480 * KB * FullCodeGenerator::kBootCodeSizeMultiplier / 100,
1073 kPointerSize);
1074 }
1075 break;
1076 }
1077 default:
1078 UNREACHABLE();
1079 }
1080 return Min(size, AreaSize());
1081 }
1082
1083
CountTotalPages()1084 int PagedSpace::CountTotalPages() {
1085 PageIterator it(this);
1086 int count = 0;
1087 while (it.has_next()) {
1088 it.next();
1089 count++;
1090 }
1091 return count;
1092 }
1093
1094
ObtainFreeListStatistics(Page * page,SizeStats * sizes)1095 void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
1096 sizes->huge_size_ = page->available_in_huge_free_list();
1097 sizes->small_size_ = page->available_in_small_free_list();
1098 sizes->medium_size_ = page->available_in_medium_free_list();
1099 sizes->large_size_ = page->available_in_large_free_list();
1100 }
1101
1102
ResetFreeListStatistics()1103 void PagedSpace::ResetFreeListStatistics() {
1104 PageIterator page_iterator(this);
1105 while (page_iterator.has_next()) {
1106 Page* page = page_iterator.next();
1107 page->ResetFreeListStatistics();
1108 }
1109 }
1110
1111
IncreaseCapacity(int size)1112 void PagedSpace::IncreaseCapacity(int size) {
1113 accounting_stats_.ExpandSpace(size);
1114 }
1115
1116
ReleasePage(Page * page)1117 void PagedSpace::ReleasePage(Page* page) {
1118 ASSERT(page->LiveBytes() == 0);
1119 ASSERT(AreaSize() == page->area_size());
1120
1121 if (page->WasSwept()) {
1122 intptr_t size = free_list_.EvictFreeListItems(page);
1123 accounting_stats_.AllocateBytes(size);
1124 ASSERT_EQ(AreaSize(), static_cast<int>(size));
1125 } else {
1126 DecreaseUnsweptFreeBytes(page);
1127 }
1128
1129 if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
1130 heap()->decrement_scan_on_scavenge_pages();
1131 page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE);
1132 }
1133
1134 ASSERT(!free_list_.ContainsPageFreeListItems(page));
1135
1136 if (Page::FromAllocationTop(allocation_info_.top()) == page) {
1137 allocation_info_.set_top(NULL);
1138 allocation_info_.set_limit(NULL);
1139 }
1140
1141 page->Unlink();
1142 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
1143 heap()->isolate()->memory_allocator()->Free(page);
1144 } else {
1145 heap()->QueueMemoryChunkForFree(page);
1146 }
1147
1148 ASSERT(Capacity() > 0);
1149 accounting_stats_.ShrinkSpace(AreaSize());
1150 }
1151
1152
1153 #ifdef DEBUG
Print()1154 void PagedSpace::Print() { }
1155 #endif
1156
1157 #ifdef VERIFY_HEAP
Verify(ObjectVisitor * visitor)1158 void PagedSpace::Verify(ObjectVisitor* visitor) {
1159 // We can only iterate over the pages if they were swept precisely.
1160 if (was_swept_conservatively_) return;
1161
1162 bool allocation_pointer_found_in_space =
1163 (allocation_info_.top() == allocation_info_.limit());
1164 PageIterator page_iterator(this);
1165 while (page_iterator.has_next()) {
1166 Page* page = page_iterator.next();
1167 CHECK(page->owner() == this);
1168 if (page == Page::FromAllocationTop(allocation_info_.top())) {
1169 allocation_pointer_found_in_space = true;
1170 }
1171 CHECK(page->WasSweptPrecisely());
1172 HeapObjectIterator it(page, NULL);
1173 Address end_of_previous_object = page->area_start();
1174 Address top = page->area_end();
1175 int black_size = 0;
1176 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1177 CHECK(end_of_previous_object <= object->address());
1178
1179 // The first word should be a map, and we expect all map pointers to
1180 // be in map space.
1181 Map* map = object->map();
1182 CHECK(map->IsMap());
1183 CHECK(heap()->map_space()->Contains(map));
1184
1185 // Perform space-specific object verification.
1186 VerifyObject(object);
1187
1188 // The object itself should look OK.
1189 object->ObjectVerify();
1190
1191 // All the interior pointers should be contained in the heap.
1192 int size = object->Size();
1193 object->IterateBody(map->instance_type(), size, visitor);
1194 if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
1195 black_size += size;
1196 }
1197
1198 CHECK(object->address() + size <= top);
1199 end_of_previous_object = object->address() + size;
1200 }
1201 CHECK_LE(black_size, page->LiveBytes());
1202 }
1203 CHECK(allocation_pointer_found_in_space);
1204 }
1205 #endif // VERIFY_HEAP
1206
1207 // -----------------------------------------------------------------------------
1208 // NewSpace implementation
1209
1210
SetUp(int reserved_semispace_capacity,int maximum_semispace_capacity)1211 bool NewSpace::SetUp(int reserved_semispace_capacity,
1212 int maximum_semispace_capacity) {
1213 // Set up new space based on the preallocated memory block defined by
1214 // start and size. The provided space is divided into two semi-spaces.
1215 // To support fast containment testing in the new space, the size of
1216 // this chunk must be a power of two and it must be aligned to its size.
1217 int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1218
1219 size_t size = 2 * reserved_semispace_capacity;
1220 Address base =
1221 heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
1222 size, size, &reservation_);
1223 if (base == NULL) return false;
1224
1225 chunk_base_ = base;
1226 chunk_size_ = static_cast<uintptr_t>(size);
1227 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1228
1229 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1230 ASSERT(IsPowerOf2(maximum_semispace_capacity));
1231
1232 // Allocate and set up the histogram arrays if necessary.
1233 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1234 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1235
1236 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1237 promoted_histogram_[name].set_name(#name);
1238 INSTANCE_TYPE_LIST(SET_NAME)
1239 #undef SET_NAME
1240
1241 ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1242 ASSERT(static_cast<intptr_t>(chunk_size_) >=
1243 2 * heap()->ReservedSemiSpaceSize());
1244 ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1245
1246 to_space_.SetUp(chunk_base_,
1247 initial_semispace_capacity,
1248 maximum_semispace_capacity);
1249 from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1250 initial_semispace_capacity,
1251 maximum_semispace_capacity);
1252 if (!to_space_.Commit()) {
1253 return false;
1254 }
1255 ASSERT(!from_space_.is_committed()); // No need to use memory yet.
1256
1257 start_ = chunk_base_;
1258 address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1259 object_mask_ = address_mask_ | kHeapObjectTagMask;
1260 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
1261
1262 ResetAllocationInfo();
1263
1264 return true;
1265 }
1266
1267
TearDown()1268 void NewSpace::TearDown() {
1269 if (allocated_histogram_) {
1270 DeleteArray(allocated_histogram_);
1271 allocated_histogram_ = NULL;
1272 }
1273 if (promoted_histogram_) {
1274 DeleteArray(promoted_histogram_);
1275 promoted_histogram_ = NULL;
1276 }
1277
1278 start_ = NULL;
1279 allocation_info_.set_top(NULL);
1280 allocation_info_.set_limit(NULL);
1281
1282 to_space_.TearDown();
1283 from_space_.TearDown();
1284
1285 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1286
1287 ASSERT(reservation_.IsReserved());
1288 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1289 NOT_EXECUTABLE);
1290 chunk_base_ = NULL;
1291 chunk_size_ = 0;
1292 }
1293
1294
Flip()1295 void NewSpace::Flip() {
1296 SemiSpace::Swap(&from_space_, &to_space_);
1297 }
1298
1299
Grow()1300 void NewSpace::Grow() {
1301 // Double the semispace size but only up to maximum capacity.
1302 ASSERT(Capacity() < MaximumCapacity());
1303 int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
1304 if (to_space_.GrowTo(new_capacity)) {
1305 // Only grow from space if we managed to grow to-space.
1306 if (!from_space_.GrowTo(new_capacity)) {
1307 // If we managed to grow to-space but couldn't grow from-space,
1308 // attempt to shrink to-space.
1309 if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1310 // We are in an inconsistent state because we could not
1311 // commit/uncommit memory from new space.
1312 V8::FatalProcessOutOfMemory("Failed to grow new space.");
1313 }
1314 }
1315 }
1316 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1317 }
1318
1319
Shrink()1320 void NewSpace::Shrink() {
1321 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
1322 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1323 if (rounded_new_capacity < Capacity() &&
1324 to_space_.ShrinkTo(rounded_new_capacity)) {
1325 // Only shrink from-space if we managed to shrink to-space.
1326 from_space_.Reset();
1327 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1328 // If we managed to shrink to-space but couldn't shrink from
1329 // space, attempt to grow to-space again.
1330 if (!to_space_.GrowTo(from_space_.Capacity())) {
1331 // We are in an inconsistent state because we could not
1332 // commit/uncommit memory from new space.
1333 V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1334 }
1335 }
1336 }
1337 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1338 }
1339
1340
UpdateAllocationInfo()1341 void NewSpace::UpdateAllocationInfo() {
1342 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1343 allocation_info_.set_top(to_space_.page_low());
1344 allocation_info_.set_limit(to_space_.page_high());
1345 UpdateInlineAllocationLimit(0);
1346 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1347 }
1348
1349
ResetAllocationInfo()1350 void NewSpace::ResetAllocationInfo() {
1351 to_space_.Reset();
1352 UpdateAllocationInfo();
1353 pages_used_ = 0;
1354 // Clear all mark-bits in the to-space.
1355 NewSpacePageIterator it(&to_space_);
1356 while (it.has_next()) {
1357 Bitmap::Clear(it.next());
1358 }
1359 }
1360
1361
UpdateInlineAllocationLimit(int size_in_bytes)1362 void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
1363 if (heap()->inline_allocation_disabled()) {
1364 // Lowest limit when linear allocation was disabled.
1365 Address high = to_space_.page_high();
1366 Address new_top = allocation_info_.top() + size_in_bytes;
1367 allocation_info_.set_limit(Min(new_top, high));
1368 } else if (inline_allocation_limit_step() == 0) {
1369 // Normal limit is the end of the current page.
1370 allocation_info_.set_limit(to_space_.page_high());
1371 } else {
1372 // Lower limit during incremental marking.
1373 Address high = to_space_.page_high();
1374 Address new_top = allocation_info_.top() + size_in_bytes;
1375 Address new_limit = new_top + inline_allocation_limit_step_;
1376 allocation_info_.set_limit(Min(new_limit, high));
1377 }
1378 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1379 }
1380
1381
AddFreshPage()1382 bool NewSpace::AddFreshPage() {
1383 Address top = allocation_info_.top();
1384 if (NewSpacePage::IsAtStart(top)) {
1385 // The current page is already empty. Don't try to make another.
1386
1387 // We should only get here if someone asks to allocate more
1388 // than what can be stored in a single page.
1389 // TODO(gc): Change the limit on new-space allocation to prevent this
1390 // from happening (all such allocations should go directly to LOSpace).
1391 return false;
1392 }
1393 if (!to_space_.AdvancePage()) {
1394 // Failed to get a new page in to-space.
1395 return false;
1396 }
1397
1398 // Clear remainder of current page.
1399 Address limit = NewSpacePage::FromLimit(top)->area_end();
1400 if (heap()->gc_state() == Heap::SCAVENGE) {
1401 heap()->promotion_queue()->SetNewLimit(limit);
1402 heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
1403 }
1404
1405 int remaining_in_page = static_cast<int>(limit - top);
1406 heap()->CreateFillerObjectAt(top, remaining_in_page);
1407 pages_used_++;
1408 UpdateAllocationInfo();
1409
1410 return true;
1411 }
1412
1413
SlowAllocateRaw(int size_in_bytes)1414 AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) {
1415 Address old_top = allocation_info_.top();
1416 Address high = to_space_.page_high();
1417 if (allocation_info_.limit() < high) {
1418 // Either the limit has been lowered because linear allocation was disabled
1419 // or because incremental marking wants to get a chance to do a step. Set
1420 // the new limit accordingly.
1421 Address new_top = old_top + size_in_bytes;
1422 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1423 heap()->incremental_marking()->Step(
1424 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
1425 UpdateInlineAllocationLimit(size_in_bytes);
1426 top_on_previous_step_ = new_top;
1427 return AllocateRaw(size_in_bytes);
1428 } else if (AddFreshPage()) {
1429 // Switched to new page. Try allocating again.
1430 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1431 heap()->incremental_marking()->Step(
1432 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
1433 top_on_previous_step_ = to_space_.page_low();
1434 return AllocateRaw(size_in_bytes);
1435 } else {
1436 return AllocationResult::Retry();
1437 }
1438 }
1439
1440
1441 #ifdef VERIFY_HEAP
1442 // We do not use the SemiSpaceIterator because verification doesn't assume
1443 // that it works (it depends on the invariants we are checking).
Verify()1444 void NewSpace::Verify() {
1445 // The allocation pointer should be in the space or at the very end.
1446 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1447
1448 // There should be objects packed in from the low address up to the
1449 // allocation pointer.
1450 Address current = to_space_.first_page()->area_start();
1451 CHECK_EQ(current, to_space_.space_start());
1452
1453 while (current != top()) {
1454 if (!NewSpacePage::IsAtEnd(current)) {
1455 // The allocation pointer should not be in the middle of an object.
1456 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1457 current < top());
1458
1459 HeapObject* object = HeapObject::FromAddress(current);
1460
1461 // The first word should be a map, and we expect all map pointers to
1462 // be in map space.
1463 Map* map = object->map();
1464 CHECK(map->IsMap());
1465 CHECK(heap()->map_space()->Contains(map));
1466
1467 // The object should not be code or a map.
1468 CHECK(!object->IsMap());
1469 CHECK(!object->IsCode());
1470
1471 // The object itself should look OK.
1472 object->ObjectVerify();
1473
1474 // All the interior pointers should be contained in the heap.
1475 VerifyPointersVisitor visitor;
1476 int size = object->Size();
1477 object->IterateBody(map->instance_type(), size, &visitor);
1478
1479 current += size;
1480 } else {
1481 // At end of page, switch to next page.
1482 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1483 // Next page should be valid.
1484 CHECK(!page->is_anchor());
1485 current = page->area_start();
1486 }
1487 }
1488
1489 // Check semi-spaces.
1490 CHECK_EQ(from_space_.id(), kFromSpace);
1491 CHECK_EQ(to_space_.id(), kToSpace);
1492 from_space_.Verify();
1493 to_space_.Verify();
1494 }
1495 #endif
1496
1497 // -----------------------------------------------------------------------------
1498 // SemiSpace implementation
1499
SetUp(Address start,int initial_capacity,int maximum_capacity)1500 void SemiSpace::SetUp(Address start,
1501 int initial_capacity,
1502 int maximum_capacity) {
1503 // Creates a space in the young generation. The constructor does not
1504 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1505 // memory of size 'capacity' when set up, and does not grow or shrink
1506 // otherwise. In the mark-compact collector, the memory region of the from
1507 // space is used as the marking stack. It requires contiguous memory
1508 // addresses.
1509 ASSERT(maximum_capacity >= Page::kPageSize);
1510 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1511 capacity_ = initial_capacity;
1512 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1513 maximum_committed_ = 0;
1514 committed_ = false;
1515 start_ = start;
1516 address_mask_ = ~(maximum_capacity - 1);
1517 object_mask_ = address_mask_ | kHeapObjectTagMask;
1518 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1519 age_mark_ = start_;
1520 }
1521
1522
TearDown()1523 void SemiSpace::TearDown() {
1524 start_ = NULL;
1525 capacity_ = 0;
1526 }
1527
1528
Commit()1529 bool SemiSpace::Commit() {
1530 ASSERT(!is_committed());
1531 int pages = capacity_ / Page::kPageSize;
1532 if (!heap()->isolate()->memory_allocator()->CommitBlock(start_,
1533 capacity_,
1534 executable())) {
1535 return false;
1536 }
1537
1538 NewSpacePage* current = anchor();
1539 for (int i = 0; i < pages; i++) {
1540 NewSpacePage* new_page =
1541 NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
1542 new_page->InsertAfter(current);
1543 current = new_page;
1544 }
1545
1546 SetCapacity(capacity_);
1547 committed_ = true;
1548 Reset();
1549 return true;
1550 }
1551
1552
Uncommit()1553 bool SemiSpace::Uncommit() {
1554 ASSERT(is_committed());
1555 Address start = start_ + maximum_capacity_ - capacity_;
1556 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
1557 return false;
1558 }
1559 anchor()->set_next_page(anchor());
1560 anchor()->set_prev_page(anchor());
1561
1562 committed_ = false;
1563 return true;
1564 }
1565
1566
CommittedPhysicalMemory()1567 size_t SemiSpace::CommittedPhysicalMemory() {
1568 if (!is_committed()) return 0;
1569 size_t size = 0;
1570 NewSpacePageIterator it(this);
1571 while (it.has_next()) {
1572 size += it.next()->CommittedPhysicalMemory();
1573 }
1574 return size;
1575 }
1576
1577
GrowTo(int new_capacity)1578 bool SemiSpace::GrowTo(int new_capacity) {
1579 if (!is_committed()) {
1580 if (!Commit()) return false;
1581 }
1582 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1583 ASSERT(new_capacity <= maximum_capacity_);
1584 ASSERT(new_capacity > capacity_);
1585 int pages_before = capacity_ / Page::kPageSize;
1586 int pages_after = new_capacity / Page::kPageSize;
1587
1588 size_t delta = new_capacity - capacity_;
1589
1590 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1591 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1592 start_ + capacity_, delta, executable())) {
1593 return false;
1594 }
1595 SetCapacity(new_capacity);
1596 NewSpacePage* last_page = anchor()->prev_page();
1597 ASSERT(last_page != anchor());
1598 for (int i = pages_before; i < pages_after; i++) {
1599 Address page_address = start_ + i * Page::kPageSize;
1600 NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
1601 page_address,
1602 this);
1603 new_page->InsertAfter(last_page);
1604 Bitmap::Clear(new_page);
1605 // Duplicate the flags that was set on the old page.
1606 new_page->SetFlags(last_page->GetFlags(),
1607 NewSpacePage::kCopyOnFlipFlagsMask);
1608 last_page = new_page;
1609 }
1610 return true;
1611 }
1612
1613
ShrinkTo(int new_capacity)1614 bool SemiSpace::ShrinkTo(int new_capacity) {
1615 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1616 ASSERT(new_capacity >= initial_capacity_);
1617 ASSERT(new_capacity < capacity_);
1618 if (is_committed()) {
1619 size_t delta = capacity_ - new_capacity;
1620 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1621
1622 MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1623 if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
1624 return false;
1625 }
1626
1627 int pages_after = new_capacity / Page::kPageSize;
1628 NewSpacePage* new_last_page =
1629 NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
1630 new_last_page->set_next_page(anchor());
1631 anchor()->set_prev_page(new_last_page);
1632 ASSERT((current_page_ >= first_page()) && (current_page_ <= new_last_page));
1633 }
1634
1635 SetCapacity(new_capacity);
1636
1637 return true;
1638 }
1639
1640
FlipPages(intptr_t flags,intptr_t mask)1641 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1642 anchor_.set_owner(this);
1643 // Fixup back-pointers to anchor. Address of anchor changes
1644 // when we swap.
1645 anchor_.prev_page()->set_next_page(&anchor_);
1646 anchor_.next_page()->set_prev_page(&anchor_);
1647
1648 bool becomes_to_space = (id_ == kFromSpace);
1649 id_ = becomes_to_space ? kToSpace : kFromSpace;
1650 NewSpacePage* page = anchor_.next_page();
1651 while (page != &anchor_) {
1652 page->set_owner(this);
1653 page->SetFlags(flags, mask);
1654 if (becomes_to_space) {
1655 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1656 page->SetFlag(MemoryChunk::IN_TO_SPACE);
1657 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1658 page->ResetLiveBytes();
1659 } else {
1660 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1661 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1662 }
1663 ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1664 ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1665 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1666 page = page->next_page();
1667 }
1668 }
1669
1670
Reset()1671 void SemiSpace::Reset() {
1672 ASSERT(anchor_.next_page() != &anchor_);
1673 current_page_ = anchor_.next_page();
1674 }
1675
1676
Swap(SemiSpace * from,SemiSpace * to)1677 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1678 // We won't be swapping semispaces without data in them.
1679 ASSERT(from->anchor_.next_page() != &from->anchor_);
1680 ASSERT(to->anchor_.next_page() != &to->anchor_);
1681
1682 // Swap bits.
1683 SemiSpace tmp = *from;
1684 *from = *to;
1685 *to = tmp;
1686
1687 // Fixup back-pointers to the page list anchor now that its address
1688 // has changed.
1689 // Swap to/from-space bits on pages.
1690 // Copy GC flags from old active space (from-space) to new (to-space).
1691 intptr_t flags = from->current_page()->GetFlags();
1692 to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1693
1694 from->FlipPages(0, 0);
1695 }
1696
1697
SetCapacity(int new_capacity)1698 void SemiSpace::SetCapacity(int new_capacity) {
1699 capacity_ = new_capacity;
1700 if (capacity_ > maximum_committed_) {
1701 maximum_committed_ = capacity_;
1702 }
1703 }
1704
1705
set_age_mark(Address mark)1706 void SemiSpace::set_age_mark(Address mark) {
1707 ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
1708 age_mark_ = mark;
1709 // Mark all pages up to the one containing mark.
1710 NewSpacePageIterator it(space_start(), mark);
1711 while (it.has_next()) {
1712 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1713 }
1714 }
1715
1716
1717 #ifdef DEBUG
Print()1718 void SemiSpace::Print() { }
1719 #endif
1720
1721 #ifdef VERIFY_HEAP
Verify()1722 void SemiSpace::Verify() {
1723 bool is_from_space = (id_ == kFromSpace);
1724 NewSpacePage* page = anchor_.next_page();
1725 CHECK(anchor_.semi_space() == this);
1726 while (page != &anchor_) {
1727 CHECK(page->semi_space() == this);
1728 CHECK(page->InNewSpace());
1729 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1730 : MemoryChunk::IN_TO_SPACE));
1731 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1732 : MemoryChunk::IN_FROM_SPACE));
1733 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1734 if (!is_from_space) {
1735 // The pointers-from-here-are-interesting flag isn't updated dynamically
1736 // on from-space pages, so it might be out of sync with the marking state.
1737 if (page->heap()->incremental_marking()->IsMarking()) {
1738 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1739 } else {
1740 CHECK(!page->IsFlagSet(
1741 MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1742 }
1743 // TODO(gc): Check that the live_bytes_count_ field matches the
1744 // black marking on the page (if we make it match in new-space).
1745 }
1746 CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1747 CHECK(page->prev_page()->next_page() == page);
1748 page = page->next_page();
1749 }
1750 }
1751 #endif
1752
1753 #ifdef DEBUG
AssertValidRange(Address start,Address end)1754 void SemiSpace::AssertValidRange(Address start, Address end) {
1755 // Addresses belong to same semi-space
1756 NewSpacePage* page = NewSpacePage::FromLimit(start);
1757 NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1758 SemiSpace* space = page->semi_space();
1759 CHECK_EQ(space, end_page->semi_space());
1760 // Start address is before end address, either on same page,
1761 // or end address is on a later page in the linked list of
1762 // semi-space pages.
1763 if (page == end_page) {
1764 CHECK(start <= end);
1765 } else {
1766 while (page != end_page) {
1767 page = page->next_page();
1768 CHECK_NE(page, space->anchor());
1769 }
1770 }
1771 }
1772 #endif
1773
1774
1775 // -----------------------------------------------------------------------------
1776 // SemiSpaceIterator implementation.
SemiSpaceIterator(NewSpace * space)1777 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1778 Initialize(space->bottom(), space->top(), NULL);
1779 }
1780
1781
SemiSpaceIterator(NewSpace * space,HeapObjectCallback size_func)1782 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1783 HeapObjectCallback size_func) {
1784 Initialize(space->bottom(), space->top(), size_func);
1785 }
1786
1787
SemiSpaceIterator(NewSpace * space,Address start)1788 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1789 Initialize(start, space->top(), NULL);
1790 }
1791
1792
SemiSpaceIterator(Address from,Address to)1793 SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
1794 Initialize(from, to, NULL);
1795 }
1796
1797
Initialize(Address start,Address end,HeapObjectCallback size_func)1798 void SemiSpaceIterator::Initialize(Address start,
1799 Address end,
1800 HeapObjectCallback size_func) {
1801 SemiSpace::AssertValidRange(start, end);
1802 current_ = start;
1803 limit_ = end;
1804 size_func_ = size_func;
1805 }
1806
1807
1808 #ifdef DEBUG
1809 // heap_histograms is shared, always clear it before using it.
ClearHistograms(Isolate * isolate)1810 static void ClearHistograms(Isolate* isolate) {
1811 // We reset the name each time, though it hasn't changed.
1812 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1813 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1814 #undef DEF_TYPE_NAME
1815
1816 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1817 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1818 #undef CLEAR_HISTOGRAM
1819
1820 isolate->js_spill_information()->Clear();
1821 }
1822
1823
ClearCodeKindStatistics(int * code_kind_statistics)1824 static void ClearCodeKindStatistics(int* code_kind_statistics) {
1825 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1826 code_kind_statistics[i] = 0;
1827 }
1828 }
1829
1830
ReportCodeKindStatistics(int * code_kind_statistics)1831 static void ReportCodeKindStatistics(int* code_kind_statistics) {
1832 PrintF("\n Code kind histograms: \n");
1833 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1834 if (code_kind_statistics[i] > 0) {
1835 PrintF(" %-20s: %10d bytes\n",
1836 Code::Kind2String(static_cast<Code::Kind>(i)),
1837 code_kind_statistics[i]);
1838 }
1839 }
1840 PrintF("\n");
1841 }
1842
1843
CollectHistogramInfo(HeapObject * obj)1844 static int CollectHistogramInfo(HeapObject* obj) {
1845 Isolate* isolate = obj->GetIsolate();
1846 InstanceType type = obj->map()->instance_type();
1847 ASSERT(0 <= type && type <= LAST_TYPE);
1848 ASSERT(isolate->heap_histograms()[type].name() != NULL);
1849 isolate->heap_histograms()[type].increment_number(1);
1850 isolate->heap_histograms()[type].increment_bytes(obj->Size());
1851
1852 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1853 JSObject::cast(obj)->IncrementSpillStatistics(
1854 isolate->js_spill_information());
1855 }
1856
1857 return obj->Size();
1858 }
1859
1860
ReportHistogram(Isolate * isolate,bool print_spill)1861 static void ReportHistogram(Isolate* isolate, bool print_spill) {
1862 PrintF("\n Object Histogram:\n");
1863 for (int i = 0; i <= LAST_TYPE; i++) {
1864 if (isolate->heap_histograms()[i].number() > 0) {
1865 PrintF(" %-34s%10d (%10d bytes)\n",
1866 isolate->heap_histograms()[i].name(),
1867 isolate->heap_histograms()[i].number(),
1868 isolate->heap_histograms()[i].bytes());
1869 }
1870 }
1871 PrintF("\n");
1872
1873 // Summarize string types.
1874 int string_number = 0;
1875 int string_bytes = 0;
1876 #define INCREMENT(type, size, name, camel_name) \
1877 string_number += isolate->heap_histograms()[type].number(); \
1878 string_bytes += isolate->heap_histograms()[type].bytes();
1879 STRING_TYPE_LIST(INCREMENT)
1880 #undef INCREMENT
1881 if (string_number > 0) {
1882 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1883 string_bytes);
1884 }
1885
1886 if (FLAG_collect_heap_spill_statistics && print_spill) {
1887 isolate->js_spill_information()->Print();
1888 }
1889 }
1890 #endif // DEBUG
1891
1892
1893 // Support for statistics gathering for --heap-stats and --log-gc.
ClearHistograms()1894 void NewSpace::ClearHistograms() {
1895 for (int i = 0; i <= LAST_TYPE; i++) {
1896 allocated_histogram_[i].clear();
1897 promoted_histogram_[i].clear();
1898 }
1899 }
1900
1901
1902 // Because the copying collector does not touch garbage objects, we iterate
1903 // the new space before a collection to get a histogram of allocated objects.
1904 // This only happens when --log-gc flag is set.
CollectStatistics()1905 void NewSpace::CollectStatistics() {
1906 ClearHistograms();
1907 SemiSpaceIterator it(this);
1908 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
1909 RecordAllocation(obj);
1910 }
1911
1912
DoReportStatistics(Isolate * isolate,HistogramInfo * info,const char * description)1913 static void DoReportStatistics(Isolate* isolate,
1914 HistogramInfo* info, const char* description) {
1915 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1916 // Lump all the string types together.
1917 int string_number = 0;
1918 int string_bytes = 0;
1919 #define INCREMENT(type, size, name, camel_name) \
1920 string_number += info[type].number(); \
1921 string_bytes += info[type].bytes();
1922 STRING_TYPE_LIST(INCREMENT)
1923 #undef INCREMENT
1924 if (string_number > 0) {
1925 LOG(isolate,
1926 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1927 }
1928
1929 // Then do the other types.
1930 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1931 if (info[i].number() > 0) {
1932 LOG(isolate,
1933 HeapSampleItemEvent(info[i].name(), info[i].number(),
1934 info[i].bytes()));
1935 }
1936 }
1937 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
1938 }
1939
1940
ReportStatistics()1941 void NewSpace::ReportStatistics() {
1942 #ifdef DEBUG
1943 if (FLAG_heap_stats) {
1944 float pct = static_cast<float>(Available()) / Capacity();
1945 PrintF(" capacity: %" V8_PTR_PREFIX "d"
1946 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
1947 Capacity(), Available(), static_cast<int>(pct*100));
1948 PrintF("\n Object Histogram:\n");
1949 for (int i = 0; i <= LAST_TYPE; i++) {
1950 if (allocated_histogram_[i].number() > 0) {
1951 PrintF(" %-34s%10d (%10d bytes)\n",
1952 allocated_histogram_[i].name(),
1953 allocated_histogram_[i].number(),
1954 allocated_histogram_[i].bytes());
1955 }
1956 }
1957 PrintF("\n");
1958 }
1959 #endif // DEBUG
1960
1961 if (FLAG_log_gc) {
1962 Isolate* isolate = heap()->isolate();
1963 DoReportStatistics(isolate, allocated_histogram_, "allocated");
1964 DoReportStatistics(isolate, promoted_histogram_, "promoted");
1965 }
1966 }
1967
1968
RecordAllocation(HeapObject * obj)1969 void NewSpace::RecordAllocation(HeapObject* obj) {
1970 InstanceType type = obj->map()->instance_type();
1971 ASSERT(0 <= type && type <= LAST_TYPE);
1972 allocated_histogram_[type].increment_number(1);
1973 allocated_histogram_[type].increment_bytes(obj->Size());
1974 }
1975
1976
RecordPromotion(HeapObject * obj)1977 void NewSpace::RecordPromotion(HeapObject* obj) {
1978 InstanceType type = obj->map()->instance_type();
1979 ASSERT(0 <= type && type <= LAST_TYPE);
1980 promoted_histogram_[type].increment_number(1);
1981 promoted_histogram_[type].increment_bytes(obj->Size());
1982 }
1983
1984
CommittedPhysicalMemory()1985 size_t NewSpace::CommittedPhysicalMemory() {
1986 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
1987 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1988 size_t size = to_space_.CommittedPhysicalMemory();
1989 if (from_space_.is_committed()) {
1990 size += from_space_.CommittedPhysicalMemory();
1991 }
1992 return size;
1993 }
1994
1995
1996 // -----------------------------------------------------------------------------
1997 // Free lists for old object spaces implementation
1998
set_size(Heap * heap,int size_in_bytes)1999 void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
2000 ASSERT(size_in_bytes > 0);
2001 ASSERT(IsAligned(size_in_bytes, kPointerSize));
2002
2003 // We write a map and possibly size information to the block. If the block
2004 // is big enough to be a FreeSpace with at least one extra word (the next
2005 // pointer), we set its map to be the free space map and its size to an
2006 // appropriate array length for the desired size from HeapObject::Size().
2007 // If the block is too small (eg, one or two words), to hold both a size
2008 // field and a next pointer, we give it a filler map that gives it the
2009 // correct size.
2010 if (size_in_bytes > FreeSpace::kHeaderSize) {
2011 // Can't use FreeSpace::cast because it fails during deserialization.
2012 // We have to set the size first with a release store before we store
2013 // the map because a concurrent store buffer scan on scavenge must not
2014 // observe a map with an invalid size.
2015 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
2016 this_as_free_space->nobarrier_set_size(size_in_bytes);
2017 synchronized_set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
2018 } else if (size_in_bytes == kPointerSize) {
2019 set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
2020 } else if (size_in_bytes == 2 * kPointerSize) {
2021 set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
2022 } else {
2023 UNREACHABLE();
2024 }
2025 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
2026 // deserialization because the free space map is not done yet.
2027 }
2028
2029
next()2030 FreeListNode* FreeListNode::next() {
2031 ASSERT(IsFreeListNode(this));
2032 if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2033 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
2034 return reinterpret_cast<FreeListNode*>(
2035 Memory::Address_at(address() + kNextOffset));
2036 } else {
2037 return reinterpret_cast<FreeListNode*>(
2038 Memory::Address_at(address() + kPointerSize));
2039 }
2040 }
2041
2042
next_address()2043 FreeListNode** FreeListNode::next_address() {
2044 ASSERT(IsFreeListNode(this));
2045 if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2046 ASSERT(Size() >= kNextOffset + kPointerSize);
2047 return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
2048 } else {
2049 return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
2050 }
2051 }
2052
2053
set_next(FreeListNode * next)2054 void FreeListNode::set_next(FreeListNode* next) {
2055 ASSERT(IsFreeListNode(this));
2056 // While we are booting the VM the free space map will actually be null. So
2057 // we have to make sure that we don't try to use it for anything at that
2058 // stage.
2059 if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2060 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
2061 base::NoBarrier_Store(
2062 reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
2063 reinterpret_cast<base::AtomicWord>(next));
2064 } else {
2065 base::NoBarrier_Store(
2066 reinterpret_cast<base::AtomicWord*>(address() + kPointerSize),
2067 reinterpret_cast<base::AtomicWord>(next));
2068 }
2069 }
2070
2071
Concatenate(FreeListCategory * category)2072 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
2073 intptr_t free_bytes = 0;
2074 if (category->top() != NULL) {
2075 // This is safe (not going to deadlock) since Concatenate operations
2076 // are never performed on the same free lists at the same time in
2077 // reverse order.
2078 LockGuard<Mutex> target_lock_guard(mutex());
2079 LockGuard<Mutex> source_lock_guard(category->mutex());
2080 ASSERT(category->end_ != NULL);
2081 free_bytes = category->available();
2082 if (end_ == NULL) {
2083 end_ = category->end();
2084 } else {
2085 category->end()->set_next(top());
2086 }
2087 set_top(category->top());
2088 base::NoBarrier_Store(&top_, category->top_);
2089 available_ += category->available();
2090 category->Reset();
2091 }
2092 return free_bytes;
2093 }
2094
2095
Reset()2096 void FreeListCategory::Reset() {
2097 set_top(NULL);
2098 set_end(NULL);
2099 set_available(0);
2100 }
2101
2102
EvictFreeListItemsInList(Page * p)2103 intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
2104 int sum = 0;
2105 FreeListNode* t = top();
2106 FreeListNode** n = &t;
2107 while (*n != NULL) {
2108 if (Page::FromAddress((*n)->address()) == p) {
2109 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
2110 sum += free_space->Size();
2111 *n = (*n)->next();
2112 } else {
2113 n = (*n)->next_address();
2114 }
2115 }
2116 set_top(t);
2117 if (top() == NULL) {
2118 set_end(NULL);
2119 }
2120 available_ -= sum;
2121 return sum;
2122 }
2123
2124
ContainsPageFreeListItemsInList(Page * p)2125 bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
2126 FreeListNode* node = top();
2127 while (node != NULL) {
2128 if (Page::FromAddress(node->address()) == p) return true;
2129 node = node->next();
2130 }
2131 return false;
2132 }
2133
2134
PickNodeFromList(int * node_size)2135 FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
2136 FreeListNode* node = top();
2137
2138 if (node == NULL) return NULL;
2139
2140 while (node != NULL &&
2141 Page::FromAddress(node->address())->IsEvacuationCandidate()) {
2142 available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
2143 node = node->next();
2144 }
2145
2146 if (node != NULL) {
2147 set_top(node->next());
2148 *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
2149 available_ -= *node_size;
2150 } else {
2151 set_top(NULL);
2152 }
2153
2154 if (top() == NULL) {
2155 set_end(NULL);
2156 }
2157
2158 return node;
2159 }
2160
2161
PickNodeFromList(int size_in_bytes,int * node_size)2162 FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
2163 int *node_size) {
2164 FreeListNode* node = PickNodeFromList(node_size);
2165 if (node != NULL && *node_size < size_in_bytes) {
2166 Free(node, *node_size);
2167 *node_size = 0;
2168 return NULL;
2169 }
2170 return node;
2171 }
2172
2173
Free(FreeListNode * node,int size_in_bytes)2174 void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
2175 node->set_next(top());
2176 set_top(node);
2177 if (end_ == NULL) {
2178 end_ = node;
2179 }
2180 available_ += size_in_bytes;
2181 }
2182
2183
RepairFreeList(Heap * heap)2184 void FreeListCategory::RepairFreeList(Heap* heap) {
2185 FreeListNode* n = top();
2186 while (n != NULL) {
2187 Map** map_location = reinterpret_cast<Map**>(n->address());
2188 if (*map_location == NULL) {
2189 *map_location = heap->free_space_map();
2190 } else {
2191 ASSERT(*map_location == heap->free_space_map());
2192 }
2193 n = n->next();
2194 }
2195 }
2196
2197
FreeList(PagedSpace * owner)2198 FreeList::FreeList(PagedSpace* owner)
2199 : owner_(owner), heap_(owner->heap()) {
2200 Reset();
2201 }
2202
2203
Concatenate(FreeList * free_list)2204 intptr_t FreeList::Concatenate(FreeList* free_list) {
2205 intptr_t free_bytes = 0;
2206 free_bytes += small_list_.Concatenate(free_list->small_list());
2207 free_bytes += medium_list_.Concatenate(free_list->medium_list());
2208 free_bytes += large_list_.Concatenate(free_list->large_list());
2209 free_bytes += huge_list_.Concatenate(free_list->huge_list());
2210 return free_bytes;
2211 }
2212
2213
Reset()2214 void FreeList::Reset() {
2215 small_list_.Reset();
2216 medium_list_.Reset();
2217 large_list_.Reset();
2218 huge_list_.Reset();
2219 }
2220
2221
Free(Address start,int size_in_bytes)2222 int FreeList::Free(Address start, int size_in_bytes) {
2223 if (size_in_bytes == 0) return 0;
2224
2225 FreeListNode* node = FreeListNode::FromAddress(start);
2226 node->set_size(heap_, size_in_bytes);
2227 Page* page = Page::FromAddress(start);
2228
2229 // Early return to drop too-small blocks on the floor.
2230 if (size_in_bytes < kSmallListMin) {
2231 page->add_non_available_small_blocks(size_in_bytes);
2232 return size_in_bytes;
2233 }
2234
2235 // Insert other blocks at the head of a free list of the appropriate
2236 // magnitude.
2237 if (size_in_bytes <= kSmallListMax) {
2238 small_list_.Free(node, size_in_bytes);
2239 page->add_available_in_small_free_list(size_in_bytes);
2240 } else if (size_in_bytes <= kMediumListMax) {
2241 medium_list_.Free(node, size_in_bytes);
2242 page->add_available_in_medium_free_list(size_in_bytes);
2243 } else if (size_in_bytes <= kLargeListMax) {
2244 large_list_.Free(node, size_in_bytes);
2245 page->add_available_in_large_free_list(size_in_bytes);
2246 } else {
2247 huge_list_.Free(node, size_in_bytes);
2248 page->add_available_in_huge_free_list(size_in_bytes);
2249 }
2250
2251 ASSERT(IsVeryLong() || available() == SumFreeLists());
2252 return 0;
2253 }
2254
2255
FindNodeFor(int size_in_bytes,int * node_size)2256 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2257 FreeListNode* node = NULL;
2258 Page* page = NULL;
2259
2260 if (size_in_bytes <= kSmallAllocationMax) {
2261 node = small_list_.PickNodeFromList(node_size);
2262 if (node != NULL) {
2263 ASSERT(size_in_bytes <= *node_size);
2264 page = Page::FromAddress(node->address());
2265 page->add_available_in_small_free_list(-(*node_size));
2266 ASSERT(IsVeryLong() || available() == SumFreeLists());
2267 return node;
2268 }
2269 }
2270
2271 if (size_in_bytes <= kMediumAllocationMax) {
2272 node = medium_list_.PickNodeFromList(node_size);
2273 if (node != NULL) {
2274 ASSERT(size_in_bytes <= *node_size);
2275 page = Page::FromAddress(node->address());
2276 page->add_available_in_medium_free_list(-(*node_size));
2277 ASSERT(IsVeryLong() || available() == SumFreeLists());
2278 return node;
2279 }
2280 }
2281
2282 if (size_in_bytes <= kLargeAllocationMax) {
2283 node = large_list_.PickNodeFromList(node_size);
2284 if (node != NULL) {
2285 ASSERT(size_in_bytes <= *node_size);
2286 page = Page::FromAddress(node->address());
2287 page->add_available_in_large_free_list(-(*node_size));
2288 ASSERT(IsVeryLong() || available() == SumFreeLists());
2289 return node;
2290 }
2291 }
2292
2293 int huge_list_available = huge_list_.available();
2294 FreeListNode* top_node = huge_list_.top();
2295 for (FreeListNode** cur = &top_node;
2296 *cur != NULL;
2297 cur = (*cur)->next_address()) {
2298 FreeListNode* cur_node = *cur;
2299 while (cur_node != NULL &&
2300 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
2301 int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
2302 huge_list_available -= size;
2303 page = Page::FromAddress(cur_node->address());
2304 page->add_available_in_huge_free_list(-size);
2305 cur_node = cur_node->next();
2306 }
2307
2308 *cur = cur_node;
2309 if (cur_node == NULL) {
2310 huge_list_.set_end(NULL);
2311 break;
2312 }
2313
2314 ASSERT((*cur)->map() == heap_->raw_unchecked_free_space_map());
2315 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
2316 int size = cur_as_free_space->Size();
2317 if (size >= size_in_bytes) {
2318 // Large enough node found. Unlink it from the list.
2319 node = *cur;
2320 *cur = node->next();
2321 *node_size = size;
2322 huge_list_available -= size;
2323 page = Page::FromAddress(node->address());
2324 page->add_available_in_huge_free_list(-size);
2325 break;
2326 }
2327 }
2328
2329 huge_list_.set_top(top_node);
2330 if (huge_list_.top() == NULL) {
2331 huge_list_.set_end(NULL);
2332 }
2333 huge_list_.set_available(huge_list_available);
2334
2335 if (node != NULL) {
2336 ASSERT(IsVeryLong() || available() == SumFreeLists());
2337 return node;
2338 }
2339
2340 if (size_in_bytes <= kSmallListMax) {
2341 node = small_list_.PickNodeFromList(size_in_bytes, node_size);
2342 if (node != NULL) {
2343 ASSERT(size_in_bytes <= *node_size);
2344 page = Page::FromAddress(node->address());
2345 page->add_available_in_small_free_list(-(*node_size));
2346 }
2347 } else if (size_in_bytes <= kMediumListMax) {
2348 node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
2349 if (node != NULL) {
2350 ASSERT(size_in_bytes <= *node_size);
2351 page = Page::FromAddress(node->address());
2352 page->add_available_in_medium_free_list(-(*node_size));
2353 }
2354 } else if (size_in_bytes <= kLargeListMax) {
2355 node = large_list_.PickNodeFromList(size_in_bytes, node_size);
2356 if (node != NULL) {
2357 ASSERT(size_in_bytes <= *node_size);
2358 page = Page::FromAddress(node->address());
2359 page->add_available_in_large_free_list(-(*node_size));
2360 }
2361 }
2362
2363 ASSERT(IsVeryLong() || available() == SumFreeLists());
2364 return node;
2365 }
2366
2367
2368 // Allocation on the old space free list. If it succeeds then a new linear
2369 // allocation space has been set up with the top and limit of the space. If
2370 // the allocation fails then NULL is returned, and the caller can perform a GC
2371 // or allocate a new page before retrying.
Allocate(int size_in_bytes)2372 HeapObject* FreeList::Allocate(int size_in_bytes) {
2373 ASSERT(0 < size_in_bytes);
2374 ASSERT(size_in_bytes <= kMaxBlockSize);
2375 ASSERT(IsAligned(size_in_bytes, kPointerSize));
2376 // Don't free list allocate if there is linear space available.
2377 ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
2378
2379 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2380 // Mark the old linear allocation area with a free space map so it can be
2381 // skipped when scanning the heap. This also puts it back in the free list
2382 // if it is big enough.
2383 owner_->Free(owner_->top(), old_linear_size);
2384
2385 owner_->heap()->incremental_marking()->OldSpaceStep(
2386 size_in_bytes - old_linear_size);
2387
2388 int new_node_size = 0;
2389 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2390 if (new_node == NULL) {
2391 owner_->SetTopAndLimit(NULL, NULL);
2392 return NULL;
2393 }
2394
2395 int bytes_left = new_node_size - size_in_bytes;
2396 ASSERT(bytes_left >= 0);
2397
2398 #ifdef DEBUG
2399 for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2400 reinterpret_cast<Object**>(new_node->address())[i] =
2401 Smi::FromInt(kCodeZapValue);
2402 }
2403 #endif
2404
2405 // The old-space-step might have finished sweeping and restarted marking.
2406 // Verify that it did not turn the page of the new node into an evacuation
2407 // candidate.
2408 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2409
2410 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2411
2412 // Memory in the linear allocation area is counted as allocated. We may free
2413 // a little of this again immediately - see below.
2414 owner_->Allocate(new_node_size);
2415
2416 if (owner_->heap()->inline_allocation_disabled()) {
2417 // Keep the linear allocation area empty if requested to do so, just
2418 // return area back to the free list instead.
2419 owner_->Free(new_node->address() + size_in_bytes, bytes_left);
2420 ASSERT(owner_->top() == NULL && owner_->limit() == NULL);
2421 } else if (bytes_left > kThreshold &&
2422 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2423 FLAG_incremental_marking_steps) {
2424 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2425 // We don't want to give too large linear areas to the allocator while
2426 // incremental marking is going on, because we won't check again whether
2427 // we want to do another increment until the linear area is used up.
2428 owner_->Free(new_node->address() + size_in_bytes + linear_size,
2429 new_node_size - size_in_bytes - linear_size);
2430 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2431 new_node->address() + size_in_bytes + linear_size);
2432 } else if (bytes_left > 0) {
2433 // Normally we give the rest of the node to the allocator as its new
2434 // linear allocation area.
2435 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2436 new_node->address() + new_node_size);
2437 } else {
2438 // TODO(gc) Try not freeing linear allocation region when bytes_left
2439 // are zero.
2440 owner_->SetTopAndLimit(NULL, NULL);
2441 }
2442
2443 return new_node;
2444 }
2445
2446
EvictFreeListItems(Page * p)2447 intptr_t FreeList::EvictFreeListItems(Page* p) {
2448 intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
2449 p->set_available_in_huge_free_list(0);
2450
2451 if (sum < p->area_size()) {
2452 sum += small_list_.EvictFreeListItemsInList(p) +
2453 medium_list_.EvictFreeListItemsInList(p) +
2454 large_list_.EvictFreeListItemsInList(p);
2455 p->set_available_in_small_free_list(0);
2456 p->set_available_in_medium_free_list(0);
2457 p->set_available_in_large_free_list(0);
2458 }
2459
2460 return sum;
2461 }
2462
2463
ContainsPageFreeListItems(Page * p)2464 bool FreeList::ContainsPageFreeListItems(Page* p) {
2465 return huge_list_.EvictFreeListItemsInList(p) ||
2466 small_list_.EvictFreeListItemsInList(p) ||
2467 medium_list_.EvictFreeListItemsInList(p) ||
2468 large_list_.EvictFreeListItemsInList(p);
2469 }
2470
2471
RepairLists(Heap * heap)2472 void FreeList::RepairLists(Heap* heap) {
2473 small_list_.RepairFreeList(heap);
2474 medium_list_.RepairFreeList(heap);
2475 large_list_.RepairFreeList(heap);
2476 huge_list_.RepairFreeList(heap);
2477 }
2478
2479
2480 #ifdef DEBUG
SumFreeList()2481 intptr_t FreeListCategory::SumFreeList() {
2482 intptr_t sum = 0;
2483 FreeListNode* cur = top();
2484 while (cur != NULL) {
2485 ASSERT(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
2486 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
2487 sum += cur_as_free_space->nobarrier_size();
2488 cur = cur->next();
2489 }
2490 return sum;
2491 }
2492
2493
2494 static const int kVeryLongFreeList = 500;
2495
2496
FreeListLength()2497 int FreeListCategory::FreeListLength() {
2498 int length = 0;
2499 FreeListNode* cur = top();
2500 while (cur != NULL) {
2501 length++;
2502 cur = cur->next();
2503 if (length == kVeryLongFreeList) return length;
2504 }
2505 return length;
2506 }
2507
2508
IsVeryLong()2509 bool FreeList::IsVeryLong() {
2510 if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
2511 if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
2512 if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
2513 if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
2514 return false;
2515 }
2516
2517
2518 // This can take a very long time because it is linear in the number of entries
2519 // on the free list, so it should not be called if FreeListLength returns
2520 // kVeryLongFreeList.
SumFreeLists()2521 intptr_t FreeList::SumFreeLists() {
2522 intptr_t sum = small_list_.SumFreeList();
2523 sum += medium_list_.SumFreeList();
2524 sum += large_list_.SumFreeList();
2525 sum += huge_list_.SumFreeList();
2526 return sum;
2527 }
2528 #endif
2529
2530
2531 // -----------------------------------------------------------------------------
2532 // OldSpace implementation
2533
PrepareForMarkCompact()2534 void PagedSpace::PrepareForMarkCompact() {
2535 // We don't have a linear allocation area while sweeping. It will be restored
2536 // on the first allocation after the sweep.
2537 EmptyAllocationInfo();
2538
2539 // This counter will be increased for pages which will be swept by the
2540 // sweeper threads.
2541 unswept_free_bytes_ = 0;
2542
2543 // Clear the free list before a full GC---it will be rebuilt afterward.
2544 free_list_.Reset();
2545 }
2546
2547
SizeOfObjects()2548 intptr_t PagedSpace::SizeOfObjects() {
2549 ASSERT(heap()->mark_compact_collector()->IsConcurrentSweepingInProgress() ||
2550 (unswept_free_bytes_ == 0));
2551 return Size() - unswept_free_bytes_ - (limit() - top());
2552 }
2553
2554
2555 // After we have booted, we have created a map which represents free space
2556 // on the heap. If there was already a free list then the elements on it
2557 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
2558 // fix them.
RepairFreeListsAfterBoot()2559 void PagedSpace::RepairFreeListsAfterBoot() {
2560 free_list_.RepairLists(heap());
2561 }
2562
2563
EvictEvacuationCandidatesFromFreeLists()2564 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2565 if (allocation_info_.top() >= allocation_info_.limit()) return;
2566
2567 if (Page::FromAllocationTop(allocation_info_.top())->
2568 IsEvacuationCandidate()) {
2569 // Create filler object to keep page iterable if it was iterable.
2570 int remaining =
2571 static_cast<int>(allocation_info_.limit() - allocation_info_.top());
2572 heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
2573
2574 allocation_info_.set_top(NULL);
2575 allocation_info_.set_limit(NULL);
2576 }
2577 }
2578
2579
WaitForSweeperThreadsAndRetryAllocation(int size_in_bytes)2580 HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
2581 int size_in_bytes) {
2582 MarkCompactCollector* collector = heap()->mark_compact_collector();
2583
2584 // If sweeper threads are still running, wait for them.
2585 if (collector->IsConcurrentSweepingInProgress()) {
2586 collector->WaitUntilSweepingCompleted();
2587
2588 // After waiting for the sweeper threads, there may be new free-list
2589 // entries.
2590 return free_list_.Allocate(size_in_bytes);
2591 }
2592 return NULL;
2593 }
2594
2595
SlowAllocateRaw(int size_in_bytes)2596 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2597 // Allocation in this space has failed.
2598
2599 // If sweeper threads are active, try to re-fill the free-lists.
2600 MarkCompactCollector* collector = heap()->mark_compact_collector();
2601 if (collector->IsConcurrentSweepingInProgress()) {
2602 collector->RefillFreeList(this);
2603
2604 // Retry the free list allocation.
2605 HeapObject* object = free_list_.Allocate(size_in_bytes);
2606 if (object != NULL) return object;
2607 }
2608
2609 // Free list allocation failed and there is no next page. Fail if we have
2610 // hit the old generation size limit that should cause a garbage
2611 // collection.
2612 if (!heap()->always_allocate()
2613 && heap()->OldGenerationAllocationLimitReached()) {
2614 // If sweeper threads are active, wait for them at that point and steal
2615 // elements form their free-lists.
2616 HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
2617 if (object != NULL) return object;
2618 }
2619
2620 // Try to expand the space and allocate in the new next page.
2621 if (Expand()) {
2622 ASSERT(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
2623 return free_list_.Allocate(size_in_bytes);
2624 }
2625
2626 // If sweeper threads are active, wait for them at that point and steal
2627 // elements form their free-lists. Allocation may still fail their which
2628 // would indicate that there is not enough memory for the given allocation.
2629 return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
2630 }
2631
2632
2633 #ifdef DEBUG
ReportCodeStatistics(Isolate * isolate)2634 void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
2635 CommentStatistic* comments_statistics =
2636 isolate->paged_space_comments_statistics();
2637 ReportCodeKindStatistics(isolate->code_kind_statistics());
2638 PrintF("Code comment statistics (\" [ comment-txt : size/ "
2639 "count (average)\"):\n");
2640 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2641 const CommentStatistic& cs = comments_statistics[i];
2642 if (cs.size > 0) {
2643 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2644 cs.size/cs.count);
2645 }
2646 }
2647 PrintF("\n");
2648 }
2649
2650
ResetCodeStatistics(Isolate * isolate)2651 void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
2652 CommentStatistic* comments_statistics =
2653 isolate->paged_space_comments_statistics();
2654 ClearCodeKindStatistics(isolate->code_kind_statistics());
2655 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2656 comments_statistics[i].Clear();
2657 }
2658 comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2659 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2660 comments_statistics[CommentStatistic::kMaxComments].count = 0;
2661 }
2662
2663
2664 // Adds comment to 'comment_statistics' table. Performance OK as long as
2665 // 'kMaxComments' is small
EnterComment(Isolate * isolate,const char * comment,int delta)2666 static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2667 CommentStatistic* comments_statistics =
2668 isolate->paged_space_comments_statistics();
2669 // Do not count empty comments
2670 if (delta <= 0) return;
2671 CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2672 // Search for a free or matching entry in 'comments_statistics': 'cs'
2673 // points to result.
2674 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2675 if (comments_statistics[i].comment == NULL) {
2676 cs = &comments_statistics[i];
2677 cs->comment = comment;
2678 break;
2679 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2680 cs = &comments_statistics[i];
2681 break;
2682 }
2683 }
2684 // Update entry for 'comment'
2685 cs->size += delta;
2686 cs->count += 1;
2687 }
2688
2689
2690 // Call for each nested comment start (start marked with '[ xxx', end marked
2691 // with ']'. RelocIterator 'it' must point to a comment reloc info.
CollectCommentStatistics(Isolate * isolate,RelocIterator * it)2692 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2693 ASSERT(!it->done());
2694 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2695 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2696 if (tmp[0] != '[') {
2697 // Not a nested comment; skip
2698 return;
2699 }
2700
2701 // Search for end of nested comment or a new nested comment
2702 const char* const comment_txt =
2703 reinterpret_cast<const char*>(it->rinfo()->data());
2704 const byte* prev_pc = it->rinfo()->pc();
2705 int flat_delta = 0;
2706 it->next();
2707 while (true) {
2708 // All nested comments must be terminated properly, and therefore exit
2709 // from loop.
2710 ASSERT(!it->done());
2711 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2712 const char* const txt =
2713 reinterpret_cast<const char*>(it->rinfo()->data());
2714 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2715 if (txt[0] == ']') break; // End of nested comment
2716 // A new comment
2717 CollectCommentStatistics(isolate, it);
2718 // Skip code that was covered with previous comment
2719 prev_pc = it->rinfo()->pc();
2720 }
2721 it->next();
2722 }
2723 EnterComment(isolate, comment_txt, flat_delta);
2724 }
2725
2726
2727 // Collects code size statistics:
2728 // - by code kind
2729 // - by code comment
CollectCodeStatistics()2730 void PagedSpace::CollectCodeStatistics() {
2731 Isolate* isolate = heap()->isolate();
2732 HeapObjectIterator obj_it(this);
2733 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2734 if (obj->IsCode()) {
2735 Code* code = Code::cast(obj);
2736 isolate->code_kind_statistics()[code->kind()] += code->Size();
2737 RelocIterator it(code);
2738 int delta = 0;
2739 const byte* prev_pc = code->instruction_start();
2740 while (!it.done()) {
2741 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2742 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2743 CollectCommentStatistics(isolate, &it);
2744 prev_pc = it.rinfo()->pc();
2745 }
2746 it.next();
2747 }
2748
2749 ASSERT(code->instruction_start() <= prev_pc &&
2750 prev_pc <= code->instruction_end());
2751 delta += static_cast<int>(code->instruction_end() - prev_pc);
2752 EnterComment(isolate, "NoComment", delta);
2753 }
2754 }
2755 }
2756
2757
ReportStatistics()2758 void PagedSpace::ReportStatistics() {
2759 int pct = static_cast<int>(Available() * 100 / Capacity());
2760 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2761 ", waste: %" V8_PTR_PREFIX "d"
2762 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2763 Capacity(), Waste(), Available(), pct);
2764
2765 if (was_swept_conservatively_) return;
2766 ClearHistograms(heap()->isolate());
2767 HeapObjectIterator obj_it(this);
2768 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2769 CollectHistogramInfo(obj);
2770 ReportHistogram(heap()->isolate(), true);
2771 }
2772 #endif
2773
2774
2775 // -----------------------------------------------------------------------------
2776 // MapSpace implementation
2777 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2778 // there is at least one non-inlined virtual function. I would prefer to hide
2779 // the VerifyObject definition behind VERIFY_HEAP.
2780
VerifyObject(HeapObject * object)2781 void MapSpace::VerifyObject(HeapObject* object) {
2782 CHECK(object->IsMap());
2783 }
2784
2785
2786 // -----------------------------------------------------------------------------
2787 // CellSpace and PropertyCellSpace implementation
2788 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2789 // there is at least one non-inlined virtual function. I would prefer to hide
2790 // the VerifyObject definition behind VERIFY_HEAP.
2791
VerifyObject(HeapObject * object)2792 void CellSpace::VerifyObject(HeapObject* object) {
2793 CHECK(object->IsCell());
2794 }
2795
2796
VerifyObject(HeapObject * object)2797 void PropertyCellSpace::VerifyObject(HeapObject* object) {
2798 CHECK(object->IsPropertyCell());
2799 }
2800
2801
2802 // -----------------------------------------------------------------------------
2803 // LargeObjectIterator
2804
LargeObjectIterator(LargeObjectSpace * space)2805 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2806 current_ = space->first_page_;
2807 size_func_ = NULL;
2808 }
2809
2810
LargeObjectIterator(LargeObjectSpace * space,HeapObjectCallback size_func)2811 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2812 HeapObjectCallback size_func) {
2813 current_ = space->first_page_;
2814 size_func_ = size_func;
2815 }
2816
2817
Next()2818 HeapObject* LargeObjectIterator::Next() {
2819 if (current_ == NULL) return NULL;
2820
2821 HeapObject* object = current_->GetObject();
2822 current_ = current_->next_page();
2823 return object;
2824 }
2825
2826
2827 // -----------------------------------------------------------------------------
2828 // LargeObjectSpace
ComparePointers(void * key1,void * key2)2829 static bool ComparePointers(void* key1, void* key2) {
2830 return key1 == key2;
2831 }
2832
2833
LargeObjectSpace(Heap * heap,intptr_t max_capacity,AllocationSpace id)2834 LargeObjectSpace::LargeObjectSpace(Heap* heap,
2835 intptr_t max_capacity,
2836 AllocationSpace id)
2837 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2838 max_capacity_(max_capacity),
2839 first_page_(NULL),
2840 size_(0),
2841 page_count_(0),
2842 objects_size_(0),
2843 chunk_map_(ComparePointers, 1024) {}
2844
2845
SetUp()2846 bool LargeObjectSpace::SetUp() {
2847 first_page_ = NULL;
2848 size_ = 0;
2849 maximum_committed_ = 0;
2850 page_count_ = 0;
2851 objects_size_ = 0;
2852 chunk_map_.Clear();
2853 return true;
2854 }
2855
2856
TearDown()2857 void LargeObjectSpace::TearDown() {
2858 while (first_page_ != NULL) {
2859 LargePage* page = first_page_;
2860 first_page_ = first_page_->next_page();
2861 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2862
2863 ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2864 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2865 space, kAllocationActionFree, page->size());
2866 heap()->isolate()->memory_allocator()->Free(page);
2867 }
2868 SetUp();
2869 }
2870
2871
AllocateRaw(int object_size,Executability executable)2872 AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
2873 Executability executable) {
2874 // Check if we want to force a GC before growing the old space further.
2875 // If so, fail the allocation.
2876 if (!heap()->always_allocate() &&
2877 heap()->OldGenerationAllocationLimitReached()) {
2878 return AllocationResult::Retry(identity());
2879 }
2880
2881 if (Size() + object_size > max_capacity_) {
2882 return AllocationResult::Retry(identity());
2883 }
2884
2885 LargePage* page = heap()->isolate()->memory_allocator()->
2886 AllocateLargePage(object_size, this, executable);
2887 if (page == NULL) return AllocationResult::Retry(identity());
2888 ASSERT(page->area_size() >= object_size);
2889
2890 size_ += static_cast<int>(page->size());
2891 objects_size_ += object_size;
2892 page_count_++;
2893 page->set_next_page(first_page_);
2894 first_page_ = page;
2895
2896 if (size_ > maximum_committed_) {
2897 maximum_committed_ = size_;
2898 }
2899
2900 // Register all MemoryChunk::kAlignment-aligned chunks covered by
2901 // this large page in the chunk map.
2902 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2903 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2904 for (uintptr_t key = base; key <= limit; key++) {
2905 HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2906 static_cast<uint32_t>(key),
2907 true);
2908 ASSERT(entry != NULL);
2909 entry->value = page;
2910 }
2911
2912 HeapObject* object = page->GetObject();
2913
2914 if (Heap::ShouldZapGarbage()) {
2915 // Make the object consistent so the heap can be verified in OldSpaceStep.
2916 // We only need to do this in debug builds or if verify_heap is on.
2917 reinterpret_cast<Object**>(object->address())[0] =
2918 heap()->fixed_array_map();
2919 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2920 }
2921
2922 heap()->incremental_marking()->OldSpaceStep(object_size);
2923 return object;
2924 }
2925
2926
CommittedPhysicalMemory()2927 size_t LargeObjectSpace::CommittedPhysicalMemory() {
2928 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
2929 size_t size = 0;
2930 LargePage* current = first_page_;
2931 while (current != NULL) {
2932 size += current->CommittedPhysicalMemory();
2933 current = current->next_page();
2934 }
2935 return size;
2936 }
2937
2938
2939 // GC support
FindObject(Address a)2940 Object* LargeObjectSpace::FindObject(Address a) {
2941 LargePage* page = FindPage(a);
2942 if (page != NULL) {
2943 return page->GetObject();
2944 }
2945 return Smi::FromInt(0); // Signaling not found.
2946 }
2947
2948
FindPage(Address a)2949 LargePage* LargeObjectSpace::FindPage(Address a) {
2950 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
2951 HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2952 static_cast<uint32_t>(key),
2953 false);
2954 if (e != NULL) {
2955 ASSERT(e->value != NULL);
2956 LargePage* page = reinterpret_cast<LargePage*>(e->value);
2957 ASSERT(page->is_valid());
2958 if (page->Contains(a)) {
2959 return page;
2960 }
2961 }
2962 return NULL;
2963 }
2964
2965
FreeUnmarkedObjects()2966 void LargeObjectSpace::FreeUnmarkedObjects() {
2967 LargePage* previous = NULL;
2968 LargePage* current = first_page_;
2969 while (current != NULL) {
2970 HeapObject* object = current->GetObject();
2971 // Can this large page contain pointers to non-trivial objects. No other
2972 // pointer object is this big.
2973 bool is_pointer_object = object->IsFixedArray();
2974 MarkBit mark_bit = Marking::MarkBitFrom(object);
2975 if (mark_bit.Get()) {
2976 mark_bit.Clear();
2977 Page::FromAddress(object->address())->ResetProgressBar();
2978 Page::FromAddress(object->address())->ResetLiveBytes();
2979 previous = current;
2980 current = current->next_page();
2981 } else {
2982 LargePage* page = current;
2983 // Cut the chunk out from the chunk list.
2984 current = current->next_page();
2985 if (previous == NULL) {
2986 first_page_ = current;
2987 } else {
2988 previous->set_next_page(current);
2989 }
2990
2991 // Free the chunk.
2992 heap()->mark_compact_collector()->ReportDeleteIfNeeded(
2993 object, heap()->isolate());
2994 size_ -= static_cast<int>(page->size());
2995 objects_size_ -= object->Size();
2996 page_count_--;
2997
2998 // Remove entries belonging to this page.
2999 // Use variable alignment to help pass length check (<= 80 characters)
3000 // of single line in tools/presubmit.py.
3001 const intptr_t alignment = MemoryChunk::kAlignment;
3002 uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
3003 uintptr_t limit = base + (page->size()-1)/alignment;
3004 for (uintptr_t key = base; key <= limit; key++) {
3005 chunk_map_.Remove(reinterpret_cast<void*>(key),
3006 static_cast<uint32_t>(key));
3007 }
3008
3009 if (is_pointer_object) {
3010 heap()->QueueMemoryChunkForFree(page);
3011 } else {
3012 heap()->isolate()->memory_allocator()->Free(page);
3013 }
3014 }
3015 }
3016 heap()->FreeQueuedChunks();
3017 }
3018
3019
Contains(HeapObject * object)3020 bool LargeObjectSpace::Contains(HeapObject* object) {
3021 Address address = object->address();
3022 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3023
3024 bool owned = (chunk->owner() == this);
3025
3026 SLOW_ASSERT(!owned || FindObject(address)->IsHeapObject());
3027
3028 return owned;
3029 }
3030
3031
3032 #ifdef VERIFY_HEAP
3033 // We do not assume that the large object iterator works, because it depends
3034 // on the invariants we are checking during verification.
Verify()3035 void LargeObjectSpace::Verify() {
3036 for (LargePage* chunk = first_page_;
3037 chunk != NULL;
3038 chunk = chunk->next_page()) {
3039 // Each chunk contains an object that starts at the large object page's
3040 // object area start.
3041 HeapObject* object = chunk->GetObject();
3042 Page* page = Page::FromAddress(object->address());
3043 CHECK(object->address() == page->area_start());
3044
3045 // The first word should be a map, and we expect all map pointers to be
3046 // in map space.
3047 Map* map = object->map();
3048 CHECK(map->IsMap());
3049 CHECK(heap()->map_space()->Contains(map));
3050
3051 // We have only code, sequential strings, external strings
3052 // (sequential strings that have been morphed into external
3053 // strings), fixed arrays, and byte arrays in large object space.
3054 CHECK(object->IsCode() || object->IsSeqString() ||
3055 object->IsExternalString() || object->IsFixedArray() ||
3056 object->IsFixedDoubleArray() || object->IsByteArray());
3057
3058 // The object itself should look OK.
3059 object->ObjectVerify();
3060
3061 // Byte arrays and strings don't have interior pointers.
3062 if (object->IsCode()) {
3063 VerifyPointersVisitor code_visitor;
3064 object->IterateBody(map->instance_type(),
3065 object->Size(),
3066 &code_visitor);
3067 } else if (object->IsFixedArray()) {
3068 FixedArray* array = FixedArray::cast(object);
3069 for (int j = 0; j < array->length(); j++) {
3070 Object* element = array->get(j);
3071 if (element->IsHeapObject()) {
3072 HeapObject* element_object = HeapObject::cast(element);
3073 CHECK(heap()->Contains(element_object));
3074 CHECK(element_object->map()->IsMap());
3075 }
3076 }
3077 }
3078 }
3079 }
3080 #endif
3081
3082
3083 #ifdef DEBUG
Print()3084 void LargeObjectSpace::Print() {
3085 LargeObjectIterator it(this);
3086 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3087 obj->Print();
3088 }
3089 }
3090
3091
ReportStatistics()3092 void LargeObjectSpace::ReportStatistics() {
3093 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
3094 int num_objects = 0;
3095 ClearHistograms(heap()->isolate());
3096 LargeObjectIterator it(this);
3097 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3098 num_objects++;
3099 CollectHistogramInfo(obj);
3100 }
3101
3102 PrintF(" number of objects %d, "
3103 "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
3104 if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3105 }
3106
3107
CollectCodeStatistics()3108 void LargeObjectSpace::CollectCodeStatistics() {
3109 Isolate* isolate = heap()->isolate();
3110 LargeObjectIterator obj_it(this);
3111 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
3112 if (obj->IsCode()) {
3113 Code* code = Code::cast(obj);
3114 isolate->code_kind_statistics()[code->kind()] += code->Size();
3115 }
3116 }
3117 }
3118
3119
Print()3120 void Page::Print() {
3121 // Make a best-effort to print the objects in the page.
3122 PrintF("Page@%p in %s\n",
3123 this->address(),
3124 AllocationSpaceName(this->owner()->identity()));
3125 printf(" --------------------------------------\n");
3126 HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
3127 unsigned mark_size = 0;
3128 for (HeapObject* object = objects.Next();
3129 object != NULL;
3130 object = objects.Next()) {
3131 bool is_marked = Marking::MarkBitFrom(object).Get();
3132 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
3133 if (is_marked) {
3134 mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
3135 }
3136 object->ShortPrint();
3137 PrintF("\n");
3138 }
3139 printf(" --------------------------------------\n");
3140 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3141 }
3142
3143 #endif // DEBUG
3144
3145 } } // namespace v8::internal
3146