1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "macro-assembler.h"
31 #include "mark-compact.h"
32 #include "platform.h"
33
34 namespace v8 {
35 namespace internal {
36
37 // For contiguous spaces, top should be in the space (or at the end) and limit
38 // should be the end of the space.
39 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
40 ASSERT((space).low() <= (info).top \
41 && (info).top <= (space).high() \
42 && (info).limit == (space).high())
43
44
45 // ----------------------------------------------------------------------------
46 // HeapObjectIterator
47
HeapObjectIterator(PagedSpace * space)48 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
49 Initialize(space->bottom(), space->top(), NULL);
50 }
51
52
HeapObjectIterator(PagedSpace * space,HeapObjectCallback size_func)53 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
54 HeapObjectCallback size_func) {
55 Initialize(space->bottom(), space->top(), size_func);
56 }
57
58
HeapObjectIterator(PagedSpace * space,Address start)59 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
60 Initialize(start, space->top(), NULL);
61 }
62
63
HeapObjectIterator(PagedSpace * space,Address start,HeapObjectCallback size_func)64 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
65 HeapObjectCallback size_func) {
66 Initialize(start, space->top(), size_func);
67 }
68
69
Initialize(Address cur,Address end,HeapObjectCallback size_f)70 void HeapObjectIterator::Initialize(Address cur, Address end,
71 HeapObjectCallback size_f) {
72 cur_addr_ = cur;
73 end_addr_ = end;
74 end_page_ = Page::FromAllocationTop(end);
75 size_func_ = size_f;
76 Page* p = Page::FromAllocationTop(cur_addr_);
77 cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
78
79 #ifdef DEBUG
80 Verify();
81 #endif
82 }
83
84
HasNextInNextPage()85 bool HeapObjectIterator::HasNextInNextPage() {
86 if (cur_addr_ == end_addr_) return false;
87
88 Page* cur_page = Page::FromAllocationTop(cur_addr_);
89 cur_page = cur_page->next_page();
90 ASSERT(cur_page->is_valid());
91
92 cur_addr_ = cur_page->ObjectAreaStart();
93 cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
94
95 ASSERT(cur_addr_ < cur_limit_);
96 #ifdef DEBUG
97 Verify();
98 #endif
99 return true;
100 }
101
102
103 #ifdef DEBUG
Verify()104 void HeapObjectIterator::Verify() {
105 Page* p = Page::FromAllocationTop(cur_addr_);
106 ASSERT(p == Page::FromAllocationTop(cur_limit_));
107 ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
108 }
109 #endif
110
111
112 // -----------------------------------------------------------------------------
113 // PageIterator
114
PageIterator(PagedSpace * space,Mode mode)115 PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
116 prev_page_ = NULL;
117 switch (mode) {
118 case PAGES_IN_USE:
119 stop_page_ = space->AllocationTopPage();
120 break;
121 case PAGES_USED_BY_MC:
122 stop_page_ = space->MCRelocationTopPage();
123 break;
124 case ALL_PAGES:
125 #ifdef DEBUG
126 // Verify that the cached last page in the space is actually the
127 // last page.
128 for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
129 if (!p->next_page()->is_valid()) {
130 ASSERT(space->last_page_ == p);
131 }
132 }
133 #endif
134 stop_page_ = space->last_page_;
135 break;
136 }
137 }
138
139
140 // -----------------------------------------------------------------------------
141 // Page
142
143 #ifdef DEBUG
144 Page::RSetState Page::rset_state_ = Page::IN_USE;
145 #endif
146
147 // -----------------------------------------------------------------------------
148 // MemoryAllocator
149 //
150 int MemoryAllocator::capacity_ = 0;
151 int MemoryAllocator::size_ = 0;
152
153 VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
154
155 // 270 is an estimate based on the static default heap size of a pair of 256K
156 // semispaces and a 64M old generation.
157 const int kEstimatedNumberOfChunks = 270;
158 List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_(
159 kEstimatedNumberOfChunks);
160 List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks);
161 int MemoryAllocator::max_nof_chunks_ = 0;
162 int MemoryAllocator::top_ = 0;
163
164
Push(int free_chunk_id)165 void MemoryAllocator::Push(int free_chunk_id) {
166 ASSERT(max_nof_chunks_ > 0);
167 ASSERT(top_ < max_nof_chunks_);
168 free_chunk_ids_[top_++] = free_chunk_id;
169 }
170
171
Pop()172 int MemoryAllocator::Pop() {
173 ASSERT(top_ > 0);
174 return free_chunk_ids_[--top_];
175 }
176
177
Setup(int capacity)178 bool MemoryAllocator::Setup(int capacity) {
179 capacity_ = RoundUp(capacity, Page::kPageSize);
180
181 // Over-estimate the size of chunks_ array. It assumes the expansion of old
182 // space is always in the unit of a chunk (kChunkSize) except the last
183 // expansion.
184 //
185 // Due to alignment, allocated space might be one page less than required
186 // number (kPagesPerChunk) of pages for old spaces.
187 //
188 // Reserve two chunk ids for semispaces, one for map space, one for old
189 // space, and one for code space.
190 max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5;
191 if (max_nof_chunks_ > kMaxNofChunks) return false;
192
193 size_ = 0;
194 ChunkInfo info; // uninitialized element.
195 for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
196 chunks_.Add(info);
197 free_chunk_ids_.Add(i);
198 }
199 top_ = max_nof_chunks_;
200 return true;
201 }
202
203
TearDown()204 void MemoryAllocator::TearDown() {
205 for (int i = 0; i < max_nof_chunks_; i++) {
206 if (chunks_[i].address() != NULL) DeleteChunk(i);
207 }
208 chunks_.Clear();
209 free_chunk_ids_.Clear();
210
211 if (initial_chunk_ != NULL) {
212 LOG(DeleteEvent("InitialChunk", initial_chunk_->address()));
213 delete initial_chunk_;
214 initial_chunk_ = NULL;
215 }
216
217 ASSERT(top_ == max_nof_chunks_); // all chunks are free
218 top_ = 0;
219 capacity_ = 0;
220 size_ = 0;
221 max_nof_chunks_ = 0;
222 }
223
224
AllocateRawMemory(const size_t requested,size_t * allocated,Executability executable)225 void* MemoryAllocator::AllocateRawMemory(const size_t requested,
226 size_t* allocated,
227 Executability executable) {
228 if (size_ + static_cast<int>(requested) > capacity_) return NULL;
229
230 void* mem = OS::Allocate(requested, allocated, executable == EXECUTABLE);
231 int alloced = *allocated;
232 size_ += alloced;
233 Counters::memory_allocated.Increment(alloced);
234 return mem;
235 }
236
237
FreeRawMemory(void * mem,size_t length)238 void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
239 OS::Free(mem, length);
240 Counters::memory_allocated.Decrement(length);
241 size_ -= length;
242 ASSERT(size_ >= 0);
243 }
244
245
ReserveInitialChunk(const size_t requested)246 void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
247 ASSERT(initial_chunk_ == NULL);
248
249 initial_chunk_ = new VirtualMemory(requested);
250 CHECK(initial_chunk_ != NULL);
251 if (!initial_chunk_->IsReserved()) {
252 delete initial_chunk_;
253 initial_chunk_ = NULL;
254 return NULL;
255 }
256
257 // We are sure that we have mapped a block of requested addresses.
258 ASSERT(initial_chunk_->size() == requested);
259 LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested));
260 size_ += requested;
261 return initial_chunk_->address();
262 }
263
264
PagesInChunk(Address start,size_t size)265 static int PagesInChunk(Address start, size_t size) {
266 // The first page starts on the first page-aligned address from start onward
267 // and the last page ends on the last page-aligned address before
268 // start+size. Page::kPageSize is a power of two so we can divide by
269 // shifting.
270 return (RoundDown(start + size, Page::kPageSize)
271 - RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits;
272 }
273
274
AllocatePages(int requested_pages,int * allocated_pages,PagedSpace * owner)275 Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
276 PagedSpace* owner) {
277 if (requested_pages <= 0) return Page::FromAddress(NULL);
278 size_t chunk_size = requested_pages * Page::kPageSize;
279
280 // There is not enough space to guarantee the desired number pages can be
281 // allocated.
282 if (size_ + static_cast<int>(chunk_size) > capacity_) {
283 // Request as many pages as we can.
284 chunk_size = capacity_ - size_;
285 requested_pages = chunk_size >> Page::kPageSizeBits;
286
287 if (requested_pages <= 0) return Page::FromAddress(NULL);
288 }
289 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
290 if (chunk == NULL) return Page::FromAddress(NULL);
291 LOG(NewEvent("PagedChunk", chunk, chunk_size));
292
293 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
294 if (*allocated_pages == 0) {
295 FreeRawMemory(chunk, chunk_size);
296 LOG(DeleteEvent("PagedChunk", chunk));
297 return Page::FromAddress(NULL);
298 }
299
300 int chunk_id = Pop();
301 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
302
303 return InitializePagesInChunk(chunk_id, *allocated_pages, owner);
304 }
305
306
CommitPages(Address start,size_t size,PagedSpace * owner,int * num_pages)307 Page* MemoryAllocator::CommitPages(Address start, size_t size,
308 PagedSpace* owner, int* num_pages) {
309 ASSERT(start != NULL);
310 *num_pages = PagesInChunk(start, size);
311 ASSERT(*num_pages > 0);
312 ASSERT(initial_chunk_ != NULL);
313 ASSERT(InInitialChunk(start));
314 ASSERT(InInitialChunk(start + size - 1));
315 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
316 return Page::FromAddress(NULL);
317 }
318 Counters::memory_allocated.Increment(size);
319
320 // So long as we correctly overestimated the number of chunks we should not
321 // run out of chunk ids.
322 CHECK(!OutOfChunkIds());
323 int chunk_id = Pop();
324 chunks_[chunk_id].init(start, size, owner);
325 return InitializePagesInChunk(chunk_id, *num_pages, owner);
326 }
327
328
CommitBlock(Address start,size_t size,Executability executable)329 bool MemoryAllocator::CommitBlock(Address start,
330 size_t size,
331 Executability executable) {
332 ASSERT(start != NULL);
333 ASSERT(size > 0);
334 ASSERT(initial_chunk_ != NULL);
335 ASSERT(InInitialChunk(start));
336 ASSERT(InInitialChunk(start + size - 1));
337
338 if (!initial_chunk_->Commit(start, size, executable)) return false;
339 Counters::memory_allocated.Increment(size);
340 return true;
341 }
342
UncommitBlock(Address start,size_t size)343 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
344 ASSERT(start != NULL);
345 ASSERT(size > 0);
346 ASSERT(initial_chunk_ != NULL);
347 ASSERT(InInitialChunk(start));
348 ASSERT(InInitialChunk(start + size - 1));
349
350 if (!initial_chunk_->Uncommit(start, size)) return false;
351 Counters::memory_allocated.Decrement(size);
352 return true;
353 }
354
InitializePagesInChunk(int chunk_id,int pages_in_chunk,PagedSpace * owner)355 Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
356 PagedSpace* owner) {
357 ASSERT(IsValidChunk(chunk_id));
358 ASSERT(pages_in_chunk > 0);
359
360 Address chunk_start = chunks_[chunk_id].address();
361
362 Address low = RoundUp(chunk_start, Page::kPageSize);
363
364 #ifdef DEBUG
365 size_t chunk_size = chunks_[chunk_id].size();
366 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
367 ASSERT(pages_in_chunk <=
368 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
369 #endif
370
371 Address page_addr = low;
372 for (int i = 0; i < pages_in_chunk; i++) {
373 Page* p = Page::FromAddress(page_addr);
374 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
375 p->is_normal_page = 1;
376 page_addr += Page::kPageSize;
377 }
378
379 // Set the next page of the last page to 0.
380 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
381 last_page->opaque_header = OffsetFrom(0) | chunk_id;
382
383 return Page::FromAddress(low);
384 }
385
386
FreePages(Page * p)387 Page* MemoryAllocator::FreePages(Page* p) {
388 if (!p->is_valid()) return p;
389
390 // Find the first page in the same chunk as 'p'
391 Page* first_page = FindFirstPageInSameChunk(p);
392 Page* page_to_return = Page::FromAddress(NULL);
393
394 if (p != first_page) {
395 // Find the last page in the same chunk as 'prev'.
396 Page* last_page = FindLastPageInSameChunk(p);
397 first_page = GetNextPage(last_page); // first page in next chunk
398
399 // set the next_page of last_page to NULL
400 SetNextPage(last_page, Page::FromAddress(NULL));
401 page_to_return = p; // return 'p' when exiting
402 }
403
404 while (first_page->is_valid()) {
405 int chunk_id = GetChunkId(first_page);
406 ASSERT(IsValidChunk(chunk_id));
407
408 // Find the first page of the next chunk before deleting this chunk.
409 first_page = GetNextPage(FindLastPageInSameChunk(first_page));
410
411 // Free the current chunk.
412 DeleteChunk(chunk_id);
413 }
414
415 return page_to_return;
416 }
417
418
DeleteChunk(int chunk_id)419 void MemoryAllocator::DeleteChunk(int chunk_id) {
420 ASSERT(IsValidChunk(chunk_id));
421
422 ChunkInfo& c = chunks_[chunk_id];
423
424 // We cannot free a chunk contained in the initial chunk because it was not
425 // allocated with AllocateRawMemory. Instead we uncommit the virtual
426 // memory.
427 if (InInitialChunk(c.address())) {
428 // TODO(1240712): VirtualMemory::Uncommit has a return value which
429 // is ignored here.
430 initial_chunk_->Uncommit(c.address(), c.size());
431 Counters::memory_allocated.Decrement(c.size());
432 } else {
433 LOG(DeleteEvent("PagedChunk", c.address()));
434 FreeRawMemory(c.address(), c.size());
435 }
436 c.init(NULL, 0, NULL);
437 Push(chunk_id);
438 }
439
440
FindFirstPageInSameChunk(Page * p)441 Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
442 int chunk_id = GetChunkId(p);
443 ASSERT(IsValidChunk(chunk_id));
444
445 Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
446 return Page::FromAddress(low);
447 }
448
449
FindLastPageInSameChunk(Page * p)450 Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
451 int chunk_id = GetChunkId(p);
452 ASSERT(IsValidChunk(chunk_id));
453
454 Address chunk_start = chunks_[chunk_id].address();
455 size_t chunk_size = chunks_[chunk_id].size();
456
457 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
458 ASSERT(chunk_start <= p->address() && p->address() < high);
459
460 return Page::FromAddress(high - Page::kPageSize);
461 }
462
463
464 #ifdef DEBUG
ReportStatistics()465 void MemoryAllocator::ReportStatistics() {
466 float pct = static_cast<float>(capacity_ - size_) / capacity_;
467 PrintF(" capacity: %d, used: %d, available: %%%d\n\n",
468 capacity_, size_, static_cast<int>(pct*100));
469 }
470 #endif
471
472
473 // -----------------------------------------------------------------------------
474 // PagedSpace implementation
475
PagedSpace(int max_capacity,AllocationSpace id,Executability executable)476 PagedSpace::PagedSpace(int max_capacity,
477 AllocationSpace id,
478 Executability executable)
479 : Space(id, executable) {
480 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
481 * Page::kObjectAreaSize;
482 accounting_stats_.Clear();
483
484 allocation_info_.top = NULL;
485 allocation_info_.limit = NULL;
486
487 mc_forwarding_info_.top = NULL;
488 mc_forwarding_info_.limit = NULL;
489 }
490
491
Setup(Address start,size_t size)492 bool PagedSpace::Setup(Address start, size_t size) {
493 if (HasBeenSetup()) return false;
494
495 int num_pages = 0;
496 // Try to use the virtual memory range passed to us. If it is too small to
497 // contain at least one page, ignore it and allocate instead.
498 int pages_in_chunk = PagesInChunk(start, size);
499 if (pages_in_chunk > 0) {
500 first_page_ = MemoryAllocator::CommitPages(RoundUp(start, Page::kPageSize),
501 Page::kPageSize * pages_in_chunk,
502 this, &num_pages);
503 } else {
504 int requested_pages = Min(MemoryAllocator::kPagesPerChunk,
505 max_capacity_ / Page::kObjectAreaSize);
506 first_page_ =
507 MemoryAllocator::AllocatePages(requested_pages, &num_pages, this);
508 if (!first_page_->is_valid()) return false;
509 }
510
511 // We are sure that the first page is valid and that we have at least one
512 // page.
513 ASSERT(first_page_->is_valid());
514 ASSERT(num_pages > 0);
515 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
516 ASSERT(Capacity() <= max_capacity_);
517
518 // Sequentially initialize remembered sets in the newly allocated
519 // pages and cache the current last page in the space.
520 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
521 p->ClearRSet();
522 last_page_ = p;
523 }
524
525 // Use first_page_ for allocation.
526 SetAllocationInfo(&allocation_info_, first_page_);
527
528 return true;
529 }
530
531
HasBeenSetup()532 bool PagedSpace::HasBeenSetup() {
533 return (Capacity() > 0);
534 }
535
536
TearDown()537 void PagedSpace::TearDown() {
538 first_page_ = MemoryAllocator::FreePages(first_page_);
539 ASSERT(!first_page_->is_valid());
540
541 accounting_stats_.Clear();
542 }
543
544
545 #ifdef ENABLE_HEAP_PROTECTION
546
Protect()547 void PagedSpace::Protect() {
548 Page* page = first_page_;
549 while (page->is_valid()) {
550 MemoryAllocator::ProtectChunkFromPage(page);
551 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
552 }
553 }
554
555
Unprotect()556 void PagedSpace::Unprotect() {
557 Page* page = first_page_;
558 while (page->is_valid()) {
559 MemoryAllocator::UnprotectChunkFromPage(page);
560 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
561 }
562 }
563
564 #endif
565
566
ClearRSet()567 void PagedSpace::ClearRSet() {
568 PageIterator it(this, PageIterator::ALL_PAGES);
569 while (it.has_next()) {
570 it.next()->ClearRSet();
571 }
572 }
573
574
FindObject(Address addr)575 Object* PagedSpace::FindObject(Address addr) {
576 // Note: this function can only be called before or after mark-compact GC
577 // because it accesses map pointers.
578 ASSERT(!MarkCompactCollector::in_use());
579
580 if (!Contains(addr)) return Failure::Exception();
581
582 Page* p = Page::FromAddress(addr);
583 ASSERT(IsUsed(p));
584 Address cur = p->ObjectAreaStart();
585 Address end = p->AllocationTop();
586 while (cur < end) {
587 HeapObject* obj = HeapObject::FromAddress(cur);
588 Address next = cur + obj->Size();
589 if ((cur <= addr) && (addr < next)) return obj;
590 cur = next;
591 }
592
593 UNREACHABLE();
594 return Failure::Exception();
595 }
596
597
IsUsed(Page * page)598 bool PagedSpace::IsUsed(Page* page) {
599 PageIterator it(this, PageIterator::PAGES_IN_USE);
600 while (it.has_next()) {
601 if (page == it.next()) return true;
602 }
603 return false;
604 }
605
606
SetAllocationInfo(AllocationInfo * alloc_info,Page * p)607 void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
608 alloc_info->top = p->ObjectAreaStart();
609 alloc_info->limit = p->ObjectAreaEnd();
610 ASSERT(alloc_info->VerifyPagedAllocation());
611 }
612
613
MCResetRelocationInfo()614 void PagedSpace::MCResetRelocationInfo() {
615 // Set page indexes.
616 int i = 0;
617 PageIterator it(this, PageIterator::ALL_PAGES);
618 while (it.has_next()) {
619 Page* p = it.next();
620 p->mc_page_index = i++;
621 }
622
623 // Set mc_forwarding_info_ to the first page in the space.
624 SetAllocationInfo(&mc_forwarding_info_, first_page_);
625 // All the bytes in the space are 'available'. We will rediscover
626 // allocated and wasted bytes during GC.
627 accounting_stats_.Reset();
628 }
629
630
MCSpaceOffsetForAddress(Address addr)631 int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
632 #ifdef DEBUG
633 // The Contains function considers the address at the beginning of a
634 // page in the page, MCSpaceOffsetForAddress considers it is in the
635 // previous page.
636 if (Page::IsAlignedToPageSize(addr)) {
637 ASSERT(Contains(addr - kPointerSize));
638 } else {
639 ASSERT(Contains(addr));
640 }
641 #endif
642
643 // If addr is at the end of a page, it belongs to previous page
644 Page* p = Page::IsAlignedToPageSize(addr)
645 ? Page::FromAllocationTop(addr)
646 : Page::FromAddress(addr);
647 int index = p->mc_page_index;
648 return (index * Page::kPageSize) + p->Offset(addr);
649 }
650
651
652 // Slow case for reallocating and promoting objects during a compacting
653 // collection. This function is not space-specific.
SlowMCAllocateRaw(int size_in_bytes)654 HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
655 Page* current_page = TopPageOf(mc_forwarding_info_);
656 if (!current_page->next_page()->is_valid()) {
657 if (!Expand(current_page)) {
658 return NULL;
659 }
660 }
661
662 // There are surely more pages in the space now.
663 ASSERT(current_page->next_page()->is_valid());
664 // We do not add the top of page block for current page to the space's
665 // free list---the block may contain live objects so we cannot write
666 // bookkeeping information to it. Instead, we will recover top of page
667 // blocks when we move objects to their new locations.
668 //
669 // We do however write the allocation pointer to the page. The encoding
670 // of forwarding addresses is as an offset in terms of live bytes, so we
671 // need quick access to the allocation top of each page to decode
672 // forwarding addresses.
673 current_page->mc_relocation_top = mc_forwarding_info_.top;
674 SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
675 return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
676 }
677
678
Expand(Page * last_page)679 bool PagedSpace::Expand(Page* last_page) {
680 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
681 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
682
683 if (Capacity() == max_capacity_) return false;
684
685 ASSERT(Capacity() < max_capacity_);
686 // Last page must be valid and its next page is invalid.
687 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
688
689 int available_pages = (max_capacity_ - Capacity()) / Page::kObjectAreaSize;
690 if (available_pages <= 0) return false;
691
692 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
693 Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this);
694 if (!p->is_valid()) return false;
695
696 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
697 ASSERT(Capacity() <= max_capacity_);
698
699 MemoryAllocator::SetNextPage(last_page, p);
700
701 // Sequentially clear remembered set of new pages and and cache the
702 // new last page in the space.
703 while (p->is_valid()) {
704 p->ClearRSet();
705 last_page_ = p;
706 p = p->next_page();
707 }
708
709 return true;
710 }
711
712
713 #ifdef DEBUG
CountTotalPages()714 int PagedSpace::CountTotalPages() {
715 int count = 0;
716 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
717 count++;
718 }
719 return count;
720 }
721 #endif
722
723
Shrink()724 void PagedSpace::Shrink() {
725 // Release half of free pages.
726 Page* top_page = AllocationTopPage();
727 ASSERT(top_page->is_valid());
728
729 // Count the number of pages we would like to free.
730 int pages_to_free = 0;
731 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
732 pages_to_free++;
733 }
734
735 // Free pages after top_page.
736 Page* p = MemoryAllocator::FreePages(top_page->next_page());
737 MemoryAllocator::SetNextPage(top_page, p);
738
739 // Find out how many pages we failed to free and update last_page_.
740 // Please note pages can only be freed in whole chunks.
741 last_page_ = top_page;
742 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
743 pages_to_free--;
744 last_page_ = p;
745 }
746
747 accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
748 ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
749 }
750
751
EnsureCapacity(int capacity)752 bool PagedSpace::EnsureCapacity(int capacity) {
753 if (Capacity() >= capacity) return true;
754
755 // Start from the allocation top and loop to the last page in the space.
756 Page* last_page = AllocationTopPage();
757 Page* next_page = last_page->next_page();
758 while (next_page->is_valid()) {
759 last_page = MemoryAllocator::FindLastPageInSameChunk(next_page);
760 next_page = last_page->next_page();
761 }
762
763 // Expand the space until it has the required capacity or expansion fails.
764 do {
765 if (!Expand(last_page)) return false;
766 ASSERT(last_page->next_page()->is_valid());
767 last_page =
768 MemoryAllocator::FindLastPageInSameChunk(last_page->next_page());
769 } while (Capacity() < capacity);
770
771 return true;
772 }
773
774
775 #ifdef DEBUG
Print()776 void PagedSpace::Print() { }
777 #endif
778
779
780 #ifdef DEBUG
781 // We do not assume that the PageIterator works, because it depends on the
782 // invariants we are checking during verification.
Verify(ObjectVisitor * visitor)783 void PagedSpace::Verify(ObjectVisitor* visitor) {
784 // The allocation pointer should be valid, and it should be in a page in the
785 // space.
786 ASSERT(allocation_info_.VerifyPagedAllocation());
787 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
788 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
789
790 // Loop over all the pages.
791 bool above_allocation_top = false;
792 Page* current_page = first_page_;
793 while (current_page->is_valid()) {
794 if (above_allocation_top) {
795 // We don't care what's above the allocation top.
796 } else {
797 // Unless this is the last page in the space containing allocated
798 // objects, the allocation top should be at a constant offset from the
799 // object area end.
800 Address top = current_page->AllocationTop();
801 if (current_page == top_page) {
802 ASSERT(top == allocation_info_.top);
803 // The next page will be above the allocation top.
804 above_allocation_top = true;
805 } else {
806 ASSERT(top == current_page->ObjectAreaEnd() - page_extra_);
807 }
808
809 // It should be packed with objects from the bottom to the top.
810 Address current = current_page->ObjectAreaStart();
811 while (current < top) {
812 HeapObject* object = HeapObject::FromAddress(current);
813
814 // The first word should be a map, and we expect all map pointers to
815 // be in map space.
816 Map* map = object->map();
817 ASSERT(map->IsMap());
818 ASSERT(Heap::map_space()->Contains(map));
819
820 // Perform space-specific object verification.
821 VerifyObject(object);
822
823 // The object itself should look OK.
824 object->Verify();
825
826 // All the interior pointers should be contained in the heap and
827 // have their remembered set bits set if required as determined
828 // by the visitor.
829 int size = object->Size();
830 if (object->IsCode()) {
831 Code::cast(object)->ConvertICTargetsFromAddressToObject();
832 object->IterateBody(map->instance_type(), size, visitor);
833 Code::cast(object)->ConvertICTargetsFromObjectToAddress();
834 } else {
835 object->IterateBody(map->instance_type(), size, visitor);
836 }
837
838 current += size;
839 }
840
841 // The allocation pointer should not be in the middle of an object.
842 ASSERT(current == top);
843 }
844
845 current_page = current_page->next_page();
846 }
847 }
848 #endif
849
850
851 // -----------------------------------------------------------------------------
852 // NewSpace implementation
853
854
Setup(Address start,int size)855 bool NewSpace::Setup(Address start, int size) {
856 // Setup new space based on the preallocated memory block defined by
857 // start and size. The provided space is divided into two semi-spaces.
858 // To support fast containment testing in the new space, the size of
859 // this chunk must be a power of two and it must be aligned to its size.
860 int initial_semispace_capacity = Heap::InitialSemiSpaceSize();
861 int maximum_semispace_capacity = Heap::SemiSpaceSize();
862
863 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
864 ASSERT(IsPowerOf2(maximum_semispace_capacity));
865
866 // Allocate and setup the histogram arrays if necessary.
867 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
868 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
869 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
870
871 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
872 promoted_histogram_[name].set_name(#name);
873 INSTANCE_TYPE_LIST(SET_NAME)
874 #undef SET_NAME
875 #endif
876
877 ASSERT(size == 2 * maximum_semispace_capacity);
878 ASSERT(IsAddressAligned(start, size, 0));
879
880 if (!to_space_.Setup(start,
881 initial_semispace_capacity,
882 maximum_semispace_capacity)) {
883 return false;
884 }
885 if (!from_space_.Setup(start + maximum_semispace_capacity,
886 initial_semispace_capacity,
887 maximum_semispace_capacity)) {
888 return false;
889 }
890
891 start_ = start;
892 address_mask_ = ~(size - 1);
893 object_mask_ = address_mask_ | kHeapObjectTag;
894 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
895
896 allocation_info_.top = to_space_.low();
897 allocation_info_.limit = to_space_.high();
898 mc_forwarding_info_.top = NULL;
899 mc_forwarding_info_.limit = NULL;
900
901 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
902 return true;
903 }
904
905
TearDown()906 void NewSpace::TearDown() {
907 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
908 if (allocated_histogram_) {
909 DeleteArray(allocated_histogram_);
910 allocated_histogram_ = NULL;
911 }
912 if (promoted_histogram_) {
913 DeleteArray(promoted_histogram_);
914 promoted_histogram_ = NULL;
915 }
916 #endif
917
918 start_ = NULL;
919 allocation_info_.top = NULL;
920 allocation_info_.limit = NULL;
921 mc_forwarding_info_.top = NULL;
922 mc_forwarding_info_.limit = NULL;
923
924 to_space_.TearDown();
925 from_space_.TearDown();
926 }
927
928
929 #ifdef ENABLE_HEAP_PROTECTION
930
Protect()931 void NewSpace::Protect() {
932 MemoryAllocator::Protect(ToSpaceLow(), Capacity());
933 MemoryAllocator::Protect(FromSpaceLow(), Capacity());
934 }
935
936
Unprotect()937 void NewSpace::Unprotect() {
938 MemoryAllocator::Unprotect(ToSpaceLow(), Capacity(),
939 to_space_.executable());
940 MemoryAllocator::Unprotect(FromSpaceLow(), Capacity(),
941 from_space_.executable());
942 }
943
944 #endif
945
946
Flip()947 void NewSpace::Flip() {
948 SemiSpace tmp = from_space_;
949 from_space_ = to_space_;
950 to_space_ = tmp;
951 }
952
953
Grow()954 void NewSpace::Grow() {
955 ASSERT(Capacity() < MaximumCapacity());
956 if (to_space_.Grow()) {
957 // Only grow from space if we managed to grow to space.
958 if (!from_space_.Grow()) {
959 // If we managed to grow to space but couldn't grow from space,
960 // attempt to shrink to space.
961 if (!to_space_.ShrinkTo(from_space_.Capacity())) {
962 // We are in an inconsistent state because we could not
963 // commit/uncommit memory from new space.
964 V8::FatalProcessOutOfMemory("Failed to grow new space.");
965 }
966 }
967 }
968 allocation_info_.limit = to_space_.high();
969 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
970 }
971
972
Shrink()973 void NewSpace::Shrink() {
974 int new_capacity = Max(InitialCapacity(), 2 * Size());
975 int rounded_new_capacity = RoundUp(new_capacity, OS::AllocateAlignment());
976 if (rounded_new_capacity < Capacity() &&
977 to_space_.ShrinkTo(rounded_new_capacity)) {
978 // Only shrink from space if we managed to shrink to space.
979 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
980 // If we managed to shrink to space but couldn't shrink from
981 // space, attempt to grow to space again.
982 if (!to_space_.GrowTo(from_space_.Capacity())) {
983 // We are in an inconsistent state because we could not
984 // commit/uncommit memory from new space.
985 V8::FatalProcessOutOfMemory("Failed to shrink new space.");
986 }
987 }
988 }
989 allocation_info_.limit = to_space_.high();
990 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
991 }
992
993
ResetAllocationInfo()994 void NewSpace::ResetAllocationInfo() {
995 allocation_info_.top = to_space_.low();
996 allocation_info_.limit = to_space_.high();
997 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
998 }
999
1000
MCResetRelocationInfo()1001 void NewSpace::MCResetRelocationInfo() {
1002 mc_forwarding_info_.top = from_space_.low();
1003 mc_forwarding_info_.limit = from_space_.high();
1004 ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
1005 }
1006
1007
MCCommitRelocationInfo()1008 void NewSpace::MCCommitRelocationInfo() {
1009 // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
1010 // valid allocation info for the to space.
1011 allocation_info_.top = mc_forwarding_info_.top;
1012 allocation_info_.limit = to_space_.high();
1013 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1014 }
1015
1016
1017 #ifdef DEBUG
1018 // We do not use the SemispaceIterator because verification doesn't assume
1019 // that it works (it depends on the invariants we are checking).
Verify()1020 void NewSpace::Verify() {
1021 // The allocation pointer should be in the space or at the very end.
1022 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1023
1024 // There should be objects packed in from the low address up to the
1025 // allocation pointer.
1026 Address current = to_space_.low();
1027 while (current < top()) {
1028 HeapObject* object = HeapObject::FromAddress(current);
1029
1030 // The first word should be a map, and we expect all map pointers to
1031 // be in map space.
1032 Map* map = object->map();
1033 ASSERT(map->IsMap());
1034 ASSERT(Heap::map_space()->Contains(map));
1035
1036 // The object should not be code or a map.
1037 ASSERT(!object->IsMap());
1038 ASSERT(!object->IsCode());
1039
1040 // The object itself should look OK.
1041 object->Verify();
1042
1043 // All the interior pointers should be contained in the heap.
1044 VerifyPointersVisitor visitor;
1045 int size = object->Size();
1046 object->IterateBody(map->instance_type(), size, &visitor);
1047
1048 current += size;
1049 }
1050
1051 // The allocation pointer should not be in the middle of an object.
1052 ASSERT(current == top());
1053 }
1054 #endif
1055
1056
Commit()1057 bool SemiSpace::Commit() {
1058 ASSERT(!is_committed());
1059 if (!MemoryAllocator::CommitBlock(start_, capacity_, executable())) {
1060 return false;
1061 }
1062 committed_ = true;
1063 return true;
1064 }
1065
1066
Uncommit()1067 bool SemiSpace::Uncommit() {
1068 ASSERT(is_committed());
1069 if (!MemoryAllocator::UncommitBlock(start_, capacity_)) {
1070 return false;
1071 }
1072 committed_ = false;
1073 return true;
1074 }
1075
1076
1077 // -----------------------------------------------------------------------------
1078 // SemiSpace implementation
1079
Setup(Address start,int initial_capacity,int maximum_capacity)1080 bool SemiSpace::Setup(Address start,
1081 int initial_capacity,
1082 int maximum_capacity) {
1083 // Creates a space in the young generation. The constructor does not
1084 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1085 // memory of size 'capacity' when set up, and does not grow or shrink
1086 // otherwise. In the mark-compact collector, the memory region of the from
1087 // space is used as the marking stack. It requires contiguous memory
1088 // addresses.
1089 initial_capacity_ = initial_capacity;
1090 capacity_ = initial_capacity;
1091 maximum_capacity_ = maximum_capacity;
1092 committed_ = false;
1093
1094 start_ = start;
1095 address_mask_ = ~(maximum_capacity - 1);
1096 object_mask_ = address_mask_ | kHeapObjectTag;
1097 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1098 age_mark_ = start_;
1099
1100 return Commit();
1101 }
1102
1103
TearDown()1104 void SemiSpace::TearDown() {
1105 start_ = NULL;
1106 capacity_ = 0;
1107 }
1108
1109
Grow()1110 bool SemiSpace::Grow() {
1111 // Double the semispace size but only up to maximum capacity.
1112 int maximum_extra = maximum_capacity_ - capacity_;
1113 int extra = Min(RoundUp(capacity_, OS::AllocateAlignment()),
1114 maximum_extra);
1115 if (!MemoryAllocator::CommitBlock(high(), extra, executable())) {
1116 return false;
1117 }
1118 capacity_ += extra;
1119 return true;
1120 }
1121
1122
GrowTo(int new_capacity)1123 bool SemiSpace::GrowTo(int new_capacity) {
1124 ASSERT(new_capacity <= maximum_capacity_);
1125 ASSERT(new_capacity > capacity_);
1126 size_t delta = new_capacity - capacity_;
1127 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1128 if (!MemoryAllocator::CommitBlock(high(), delta, executable())) {
1129 return false;
1130 }
1131 capacity_ = new_capacity;
1132 return true;
1133 }
1134
1135
ShrinkTo(int new_capacity)1136 bool SemiSpace::ShrinkTo(int new_capacity) {
1137 ASSERT(new_capacity >= initial_capacity_);
1138 ASSERT(new_capacity < capacity_);
1139 size_t delta = capacity_ - new_capacity;
1140 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1141 if (!MemoryAllocator::UncommitBlock(high() - delta, delta)) {
1142 return false;
1143 }
1144 capacity_ = new_capacity;
1145 return true;
1146 }
1147
1148
1149 #ifdef DEBUG
Print()1150 void SemiSpace::Print() { }
1151
1152
Verify()1153 void SemiSpace::Verify() { }
1154 #endif
1155
1156
1157 // -----------------------------------------------------------------------------
1158 // SemiSpaceIterator implementation.
SemiSpaceIterator(NewSpace * space)1159 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1160 Initialize(space, space->bottom(), space->top(), NULL);
1161 }
1162
1163
SemiSpaceIterator(NewSpace * space,HeapObjectCallback size_func)1164 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1165 HeapObjectCallback size_func) {
1166 Initialize(space, space->bottom(), space->top(), size_func);
1167 }
1168
1169
SemiSpaceIterator(NewSpace * space,Address start)1170 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1171 Initialize(space, start, space->top(), NULL);
1172 }
1173
1174
Initialize(NewSpace * space,Address start,Address end,HeapObjectCallback size_func)1175 void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
1176 Address end,
1177 HeapObjectCallback size_func) {
1178 ASSERT(space->ToSpaceContains(start));
1179 ASSERT(space->ToSpaceLow() <= end
1180 && end <= space->ToSpaceHigh());
1181 space_ = &space->to_space_;
1182 current_ = start;
1183 limit_ = end;
1184 size_func_ = size_func;
1185 }
1186
1187
1188 #ifdef DEBUG
1189 // A static array of histogram info for each type.
1190 static HistogramInfo heap_histograms[LAST_TYPE+1];
1191 static JSObject::SpillInformation js_spill_information;
1192
1193 // heap_histograms is shared, always clear it before using it.
ClearHistograms()1194 static void ClearHistograms() {
1195 // We reset the name each time, though it hasn't changed.
1196 #define DEF_TYPE_NAME(name) heap_histograms[name].set_name(#name);
1197 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1198 #undef DEF_TYPE_NAME
1199
1200 #define CLEAR_HISTOGRAM(name) heap_histograms[name].clear();
1201 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1202 #undef CLEAR_HISTOGRAM
1203
1204 js_spill_information.Clear();
1205 }
1206
1207
1208 static int code_kind_statistics[Code::NUMBER_OF_KINDS];
1209
1210
ClearCodeKindStatistics()1211 static void ClearCodeKindStatistics() {
1212 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1213 code_kind_statistics[i] = 0;
1214 }
1215 }
1216
1217
ReportCodeKindStatistics()1218 static void ReportCodeKindStatistics() {
1219 const char* table[Code::NUMBER_OF_KINDS];
1220
1221 #define CASE(name) \
1222 case Code::name: table[Code::name] = #name; \
1223 break
1224
1225 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1226 switch (static_cast<Code::Kind>(i)) {
1227 CASE(FUNCTION);
1228 CASE(STUB);
1229 CASE(BUILTIN);
1230 CASE(LOAD_IC);
1231 CASE(KEYED_LOAD_IC);
1232 CASE(STORE_IC);
1233 CASE(KEYED_STORE_IC);
1234 CASE(CALL_IC);
1235 }
1236 }
1237
1238 #undef CASE
1239
1240 PrintF("\n Code kind histograms: \n");
1241 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1242 if (code_kind_statistics[i] > 0) {
1243 PrintF(" %-20s: %10d bytes\n", table[i], code_kind_statistics[i]);
1244 }
1245 }
1246 PrintF("\n");
1247 }
1248
1249
CollectHistogramInfo(HeapObject * obj)1250 static int CollectHistogramInfo(HeapObject* obj) {
1251 InstanceType type = obj->map()->instance_type();
1252 ASSERT(0 <= type && type <= LAST_TYPE);
1253 ASSERT(heap_histograms[type].name() != NULL);
1254 heap_histograms[type].increment_number(1);
1255 heap_histograms[type].increment_bytes(obj->Size());
1256
1257 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1258 JSObject::cast(obj)->IncrementSpillStatistics(&js_spill_information);
1259 }
1260
1261 return obj->Size();
1262 }
1263
1264
ReportHistogram(bool print_spill)1265 static void ReportHistogram(bool print_spill) {
1266 PrintF("\n Object Histogram:\n");
1267 for (int i = 0; i <= LAST_TYPE; i++) {
1268 if (heap_histograms[i].number() > 0) {
1269 PrintF(" %-33s%10d (%10d bytes)\n",
1270 heap_histograms[i].name(),
1271 heap_histograms[i].number(),
1272 heap_histograms[i].bytes());
1273 }
1274 }
1275 PrintF("\n");
1276
1277 // Summarize string types.
1278 int string_number = 0;
1279 int string_bytes = 0;
1280 #define INCREMENT(type, size, name, camel_name) \
1281 string_number += heap_histograms[type].number(); \
1282 string_bytes += heap_histograms[type].bytes();
1283 STRING_TYPE_LIST(INCREMENT)
1284 #undef INCREMENT
1285 if (string_number > 0) {
1286 PrintF(" %-33s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1287 string_bytes);
1288 }
1289
1290 if (FLAG_collect_heap_spill_statistics && print_spill) {
1291 js_spill_information.Print();
1292 }
1293 }
1294 #endif // DEBUG
1295
1296
1297 // Support for statistics gathering for --heap-stats and --log-gc.
1298 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
ClearHistograms()1299 void NewSpace::ClearHistograms() {
1300 for (int i = 0; i <= LAST_TYPE; i++) {
1301 allocated_histogram_[i].clear();
1302 promoted_histogram_[i].clear();
1303 }
1304 }
1305
1306 // Because the copying collector does not touch garbage objects, we iterate
1307 // the new space before a collection to get a histogram of allocated objects.
1308 // This only happens (1) when compiled with DEBUG and the --heap-stats flag is
1309 // set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
1310 // flag is set.
CollectStatistics()1311 void NewSpace::CollectStatistics() {
1312 ClearHistograms();
1313 SemiSpaceIterator it(this);
1314 while (it.has_next()) RecordAllocation(it.next());
1315 }
1316
1317
1318 #ifdef ENABLE_LOGGING_AND_PROFILING
DoReportStatistics(HistogramInfo * info,const char * description)1319 static void DoReportStatistics(HistogramInfo* info, const char* description) {
1320 LOG(HeapSampleBeginEvent("NewSpace", description));
1321 // Lump all the string types together.
1322 int string_number = 0;
1323 int string_bytes = 0;
1324 #define INCREMENT(type, size, name, camel_name) \
1325 string_number += info[type].number(); \
1326 string_bytes += info[type].bytes();
1327 STRING_TYPE_LIST(INCREMENT)
1328 #undef INCREMENT
1329 if (string_number > 0) {
1330 LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1331 }
1332
1333 // Then do the other types.
1334 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1335 if (info[i].number() > 0) {
1336 LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
1337 info[i].bytes()));
1338 }
1339 }
1340 LOG(HeapSampleEndEvent("NewSpace", description));
1341 }
1342 #endif // ENABLE_LOGGING_AND_PROFILING
1343
1344
ReportStatistics()1345 void NewSpace::ReportStatistics() {
1346 #ifdef DEBUG
1347 if (FLAG_heap_stats) {
1348 float pct = static_cast<float>(Available()) / Capacity();
1349 PrintF(" capacity: %d, available: %d, %%%d\n",
1350 Capacity(), Available(), static_cast<int>(pct*100));
1351 PrintF("\n Object Histogram:\n");
1352 for (int i = 0; i <= LAST_TYPE; i++) {
1353 if (allocated_histogram_[i].number() > 0) {
1354 PrintF(" %-33s%10d (%10d bytes)\n",
1355 allocated_histogram_[i].name(),
1356 allocated_histogram_[i].number(),
1357 allocated_histogram_[i].bytes());
1358 }
1359 }
1360 PrintF("\n");
1361 }
1362 #endif // DEBUG
1363
1364 #ifdef ENABLE_LOGGING_AND_PROFILING
1365 if (FLAG_log_gc) {
1366 DoReportStatistics(allocated_histogram_, "allocated");
1367 DoReportStatistics(promoted_histogram_, "promoted");
1368 }
1369 #endif // ENABLE_LOGGING_AND_PROFILING
1370 }
1371
1372
RecordAllocation(HeapObject * obj)1373 void NewSpace::RecordAllocation(HeapObject* obj) {
1374 InstanceType type = obj->map()->instance_type();
1375 ASSERT(0 <= type && type <= LAST_TYPE);
1376 allocated_histogram_[type].increment_number(1);
1377 allocated_histogram_[type].increment_bytes(obj->Size());
1378 }
1379
1380
RecordPromotion(HeapObject * obj)1381 void NewSpace::RecordPromotion(HeapObject* obj) {
1382 InstanceType type = obj->map()->instance_type();
1383 ASSERT(0 <= type && type <= LAST_TYPE);
1384 promoted_histogram_[type].increment_number(1);
1385 promoted_histogram_[type].increment_bytes(obj->Size());
1386 }
1387 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1388
1389
1390 // -----------------------------------------------------------------------------
1391 // Free lists for old object spaces implementation
1392
set_size(int size_in_bytes)1393 void FreeListNode::set_size(int size_in_bytes) {
1394 ASSERT(size_in_bytes > 0);
1395 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1396
1397 // We write a map and possibly size information to the block. If the block
1398 // is big enough to be a ByteArray with at least one extra word (the next
1399 // pointer), we set its map to be the byte array map and its size to an
1400 // appropriate array length for the desired size from HeapObject::Size().
1401 // If the block is too small (eg, one or two words), to hold both a size
1402 // field and a next pointer, we give it a filler map that gives it the
1403 // correct size.
1404 if (size_in_bytes > ByteArray::kAlignedSize) {
1405 set_map(Heap::raw_unchecked_byte_array_map());
1406 ByteArray::cast(this)->set_length(ByteArray::LengthFor(size_in_bytes));
1407 } else if (size_in_bytes == kPointerSize) {
1408 set_map(Heap::raw_unchecked_one_pointer_filler_map());
1409 } else if (size_in_bytes == 2 * kPointerSize) {
1410 set_map(Heap::raw_unchecked_two_pointer_filler_map());
1411 } else {
1412 UNREACHABLE();
1413 }
1414 ASSERT(Size() == size_in_bytes);
1415 }
1416
1417
next()1418 Address FreeListNode::next() {
1419 ASSERT(map() == Heap::raw_unchecked_byte_array_map() ||
1420 map() == Heap::raw_unchecked_two_pointer_filler_map());
1421 if (map() == Heap::raw_unchecked_byte_array_map()) {
1422 ASSERT(Size() >= kNextOffset + kPointerSize);
1423 return Memory::Address_at(address() + kNextOffset);
1424 } else {
1425 return Memory::Address_at(address() + kPointerSize);
1426 }
1427 }
1428
1429
set_next(Address next)1430 void FreeListNode::set_next(Address next) {
1431 ASSERT(map() == Heap::raw_unchecked_byte_array_map() ||
1432 map() == Heap::raw_unchecked_two_pointer_filler_map());
1433 if (map() == Heap::raw_unchecked_byte_array_map()) {
1434 ASSERT(Size() >= kNextOffset + kPointerSize);
1435 Memory::Address_at(address() + kNextOffset) = next;
1436 } else {
1437 Memory::Address_at(address() + kPointerSize) = next;
1438 }
1439 }
1440
1441
OldSpaceFreeList(AllocationSpace owner)1442 OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) {
1443 Reset();
1444 }
1445
1446
Reset()1447 void OldSpaceFreeList::Reset() {
1448 available_ = 0;
1449 for (int i = 0; i < kFreeListsLength; i++) {
1450 free_[i].head_node_ = NULL;
1451 }
1452 needs_rebuild_ = false;
1453 finger_ = kHead;
1454 free_[kHead].next_size_ = kEnd;
1455 }
1456
1457
RebuildSizeList()1458 void OldSpaceFreeList::RebuildSizeList() {
1459 ASSERT(needs_rebuild_);
1460 int cur = kHead;
1461 for (int i = cur + 1; i < kFreeListsLength; i++) {
1462 if (free_[i].head_node_ != NULL) {
1463 free_[cur].next_size_ = i;
1464 cur = i;
1465 }
1466 }
1467 free_[cur].next_size_ = kEnd;
1468 needs_rebuild_ = false;
1469 }
1470
1471
Free(Address start,int size_in_bytes)1472 int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
1473 #ifdef DEBUG
1474 for (int i = 0; i < size_in_bytes; i += kPointerSize) {
1475 Memory::Address_at(start + i) = kZapValue;
1476 }
1477 #endif
1478 FreeListNode* node = FreeListNode::FromAddress(start);
1479 node->set_size(size_in_bytes);
1480
1481 // We don't use the freelists in compacting mode. This makes it more like a
1482 // GC that only has mark-sweep-compact and doesn't have a mark-sweep
1483 // collector.
1484 if (FLAG_always_compact) {
1485 return size_in_bytes;
1486 }
1487
1488 // Early return to drop too-small blocks on the floor (one or two word
1489 // blocks cannot hold a map pointer, a size field, and a pointer to the
1490 // next block in the free list).
1491 if (size_in_bytes < kMinBlockSize) {
1492 return size_in_bytes;
1493 }
1494
1495 // Insert other blocks at the head of an exact free list.
1496 int index = size_in_bytes >> kPointerSizeLog2;
1497 node->set_next(free_[index].head_node_);
1498 free_[index].head_node_ = node->address();
1499 available_ += size_in_bytes;
1500 needs_rebuild_ = true;
1501 return 0;
1502 }
1503
1504
Allocate(int size_in_bytes,int * wasted_bytes)1505 Object* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
1506 ASSERT(0 < size_in_bytes);
1507 ASSERT(size_in_bytes <= kMaxBlockSize);
1508 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1509
1510 if (needs_rebuild_) RebuildSizeList();
1511 int index = size_in_bytes >> kPointerSizeLog2;
1512 // Check for a perfect fit.
1513 if (free_[index].head_node_ != NULL) {
1514 FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
1515 // If this was the last block of its size, remove the size.
1516 if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index);
1517 available_ -= size_in_bytes;
1518 *wasted_bytes = 0;
1519 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1520 return node;
1521 }
1522 // Search the size list for the best fit.
1523 int prev = finger_ < index ? finger_ : kHead;
1524 int cur = FindSize(index, &prev);
1525 ASSERT(index < cur);
1526 if (cur == kEnd) {
1527 // No large enough size in list.
1528 *wasted_bytes = 0;
1529 return Failure::RetryAfterGC(size_in_bytes, owner_);
1530 }
1531 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1532 int rem = cur - index;
1533 int rem_bytes = rem << kPointerSizeLog2;
1534 FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
1535 ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
1536 FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
1537 size_in_bytes);
1538 // Distinguish the cases prev < rem < cur and rem <= prev < cur
1539 // to avoid many redundant tests and calls to Insert/RemoveSize.
1540 if (prev < rem) {
1541 // Simple case: insert rem between prev and cur.
1542 finger_ = prev;
1543 free_[prev].next_size_ = rem;
1544 // If this was the last block of size cur, remove the size.
1545 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
1546 free_[rem].next_size_ = free_[cur].next_size_;
1547 } else {
1548 free_[rem].next_size_ = cur;
1549 }
1550 // Add the remainder block.
1551 rem_node->set_size(rem_bytes);
1552 rem_node->set_next(free_[rem].head_node_);
1553 free_[rem].head_node_ = rem_node->address();
1554 } else {
1555 // If this was the last block of size cur, remove the size.
1556 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
1557 finger_ = prev;
1558 free_[prev].next_size_ = free_[cur].next_size_;
1559 }
1560 if (rem_bytes < kMinBlockSize) {
1561 // Too-small remainder is wasted.
1562 rem_node->set_size(rem_bytes);
1563 available_ -= size_in_bytes + rem_bytes;
1564 *wasted_bytes = rem_bytes;
1565 return cur_node;
1566 }
1567 // Add the remainder block and, if needed, insert its size.
1568 rem_node->set_size(rem_bytes);
1569 rem_node->set_next(free_[rem].head_node_);
1570 free_[rem].head_node_ = rem_node->address();
1571 if (rem_node->next() == NULL) InsertSize(rem);
1572 }
1573 available_ -= size_in_bytes;
1574 *wasted_bytes = 0;
1575 return cur_node;
1576 }
1577
1578
1579 #ifdef DEBUG
Contains(FreeListNode * node)1580 bool OldSpaceFreeList::Contains(FreeListNode* node) {
1581 for (int i = 0; i < kFreeListsLength; i++) {
1582 Address cur_addr = free_[i].head_node_;
1583 while (cur_addr != NULL) {
1584 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1585 if (cur_node == node) return true;
1586 cur_addr = cur_node->next();
1587 }
1588 }
1589 return false;
1590 }
1591 #endif
1592
1593
FixedSizeFreeList(AllocationSpace owner,int object_size)1594 FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
1595 : owner_(owner), object_size_(object_size) {
1596 Reset();
1597 }
1598
1599
Reset()1600 void FixedSizeFreeList::Reset() {
1601 available_ = 0;
1602 head_ = NULL;
1603 }
1604
1605
Free(Address start)1606 void FixedSizeFreeList::Free(Address start) {
1607 #ifdef DEBUG
1608 for (int i = 0; i < object_size_; i += kPointerSize) {
1609 Memory::Address_at(start + i) = kZapValue;
1610 }
1611 #endif
1612 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1613 FreeListNode* node = FreeListNode::FromAddress(start);
1614 node->set_size(object_size_);
1615 node->set_next(head_);
1616 head_ = node->address();
1617 available_ += object_size_;
1618 }
1619
1620
Allocate()1621 Object* FixedSizeFreeList::Allocate() {
1622 if (head_ == NULL) {
1623 return Failure::RetryAfterGC(object_size_, owner_);
1624 }
1625
1626 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1627 FreeListNode* node = FreeListNode::FromAddress(head_);
1628 head_ = node->next();
1629 available_ -= object_size_;
1630 return node;
1631 }
1632
1633
1634 // -----------------------------------------------------------------------------
1635 // OldSpace implementation
1636
PrepareForMarkCompact(bool will_compact)1637 void OldSpace::PrepareForMarkCompact(bool will_compact) {
1638 if (will_compact) {
1639 // Reset relocation info. During a compacting collection, everything in
1640 // the space is considered 'available' and we will rediscover live data
1641 // and waste during the collection.
1642 MCResetRelocationInfo();
1643 ASSERT(Available() == Capacity());
1644 } else {
1645 // During a non-compacting collection, everything below the linear
1646 // allocation pointer is considered allocated (everything above is
1647 // available) and we will rediscover available and wasted bytes during
1648 // the collection.
1649 accounting_stats_.AllocateBytes(free_list_.available());
1650 accounting_stats_.FillWastedBytes(Waste());
1651 }
1652
1653 // Clear the free list before a full GC---it will be rebuilt afterward.
1654 free_list_.Reset();
1655 }
1656
1657
MCCommitRelocationInfo()1658 void OldSpace::MCCommitRelocationInfo() {
1659 // Update fast allocation info.
1660 allocation_info_.top = mc_forwarding_info_.top;
1661 allocation_info_.limit = mc_forwarding_info_.limit;
1662 ASSERT(allocation_info_.VerifyPagedAllocation());
1663
1664 // The space is compacted and we haven't yet built free lists or
1665 // wasted any space.
1666 ASSERT(Waste() == 0);
1667 ASSERT(AvailableFree() == 0);
1668
1669 // Build the free list for the space.
1670 int computed_size = 0;
1671 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
1672 while (it.has_next()) {
1673 Page* p = it.next();
1674 // Space below the relocation pointer is allocated.
1675 computed_size += p->mc_relocation_top - p->ObjectAreaStart();
1676 if (it.has_next()) {
1677 // Free the space at the top of the page. We cannot use
1678 // p->mc_relocation_top after the call to Free (because Free will clear
1679 // remembered set bits).
1680 int extra_size = p->ObjectAreaEnd() - p->mc_relocation_top;
1681 if (extra_size > 0) {
1682 int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size);
1683 // The bytes we have just "freed" to add to the free list were
1684 // already accounted as available.
1685 accounting_stats_.WasteBytes(wasted_bytes);
1686 }
1687 }
1688 }
1689
1690 // Make sure the computed size - based on the used portion of the pages in
1691 // use - matches the size obtained while computing forwarding addresses.
1692 ASSERT(computed_size == Size());
1693 }
1694
1695
1696 // Slow case for normal allocation. Try in order: (1) allocate in the next
1697 // page in the space, (2) allocate off the space's free list, (3) expand the
1698 // space, (4) fail.
SlowAllocateRaw(int size_in_bytes)1699 HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
1700 // Linear allocation in this space has failed. If there is another page
1701 // in the space, move to that page and allocate there. This allocation
1702 // should succeed (size_in_bytes should not be greater than a page's
1703 // object area size).
1704 Page* current_page = TopPageOf(allocation_info_);
1705 if (current_page->next_page()->is_valid()) {
1706 return AllocateInNextPage(current_page, size_in_bytes);
1707 }
1708
1709 // There is no next page in this space. Try free list allocation.
1710 int wasted_bytes;
1711 Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
1712 accounting_stats_.WasteBytes(wasted_bytes);
1713 if (!result->IsFailure()) {
1714 accounting_stats_.AllocateBytes(size_in_bytes);
1715 return HeapObject::cast(result);
1716 }
1717
1718 // Free list allocation failed and there is no next page. Fail if we have
1719 // hit the old generation size limit that should cause a garbage
1720 // collection.
1721 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
1722 return NULL;
1723 }
1724
1725 // Try to expand the space and allocate in the new next page.
1726 ASSERT(!current_page->next_page()->is_valid());
1727 if (Expand(current_page)) {
1728 return AllocateInNextPage(current_page, size_in_bytes);
1729 }
1730
1731 // Finally, fail.
1732 return NULL;
1733 }
1734
1735
1736 // Add the block at the top of the page to the space's free list, set the
1737 // allocation info to the next page (assumed to be one), and allocate
1738 // linearly there.
AllocateInNextPage(Page * current_page,int size_in_bytes)1739 HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
1740 int size_in_bytes) {
1741 ASSERT(current_page->next_page()->is_valid());
1742 // Add the block at the top of this page to the free list.
1743 int free_size = current_page->ObjectAreaEnd() - allocation_info_.top;
1744 if (free_size > 0) {
1745 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
1746 accounting_stats_.WasteBytes(wasted_bytes);
1747 }
1748 SetAllocationInfo(&allocation_info_, current_page->next_page());
1749 return AllocateLinearly(&allocation_info_, size_in_bytes);
1750 }
1751
1752
1753 #ifdef DEBUG
1754 struct CommentStatistic {
1755 const char* comment;
1756 int size;
1757 int count;
Clearv8::internal::CommentStatistic1758 void Clear() {
1759 comment = NULL;
1760 size = 0;
1761 count = 0;
1762 }
1763 };
1764
1765
1766 // must be small, since an iteration is used for lookup
1767 const int kMaxComments = 64;
1768 static CommentStatistic comments_statistics[kMaxComments+1];
1769
1770
ReportCodeStatistics()1771 void PagedSpace::ReportCodeStatistics() {
1772 ReportCodeKindStatistics();
1773 PrintF("Code comment statistics (\" [ comment-txt : size/ "
1774 "count (average)\"):\n");
1775 for (int i = 0; i <= kMaxComments; i++) {
1776 const CommentStatistic& cs = comments_statistics[i];
1777 if (cs.size > 0) {
1778 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
1779 cs.size/cs.count);
1780 }
1781 }
1782 PrintF("\n");
1783 }
1784
1785
ResetCodeStatistics()1786 void PagedSpace::ResetCodeStatistics() {
1787 ClearCodeKindStatistics();
1788 for (int i = 0; i < kMaxComments; i++) comments_statistics[i].Clear();
1789 comments_statistics[kMaxComments].comment = "Unknown";
1790 comments_statistics[kMaxComments].size = 0;
1791 comments_statistics[kMaxComments].count = 0;
1792 }
1793
1794
1795 // Adds comment to 'comment_statistics' table. Performance OK sa long as
1796 // 'kMaxComments' is small
EnterComment(const char * comment,int delta)1797 static void EnterComment(const char* comment, int delta) {
1798 // Do not count empty comments
1799 if (delta <= 0) return;
1800 CommentStatistic* cs = &comments_statistics[kMaxComments];
1801 // Search for a free or matching entry in 'comments_statistics': 'cs'
1802 // points to result.
1803 for (int i = 0; i < kMaxComments; i++) {
1804 if (comments_statistics[i].comment == NULL) {
1805 cs = &comments_statistics[i];
1806 cs->comment = comment;
1807 break;
1808 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
1809 cs = &comments_statistics[i];
1810 break;
1811 }
1812 }
1813 // Update entry for 'comment'
1814 cs->size += delta;
1815 cs->count += 1;
1816 }
1817
1818
1819 // Call for each nested comment start (start marked with '[ xxx', end marked
1820 // with ']'. RelocIterator 'it' must point to a comment reloc info.
CollectCommentStatistics(RelocIterator * it)1821 static void CollectCommentStatistics(RelocIterator* it) {
1822 ASSERT(!it->done());
1823 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
1824 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
1825 if (tmp[0] != '[') {
1826 // Not a nested comment; skip
1827 return;
1828 }
1829
1830 // Search for end of nested comment or a new nested comment
1831 const char* const comment_txt =
1832 reinterpret_cast<const char*>(it->rinfo()->data());
1833 const byte* prev_pc = it->rinfo()->pc();
1834 int flat_delta = 0;
1835 it->next();
1836 while (true) {
1837 // All nested comments must be terminated properly, and therefore exit
1838 // from loop.
1839 ASSERT(!it->done());
1840 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
1841 const char* const txt =
1842 reinterpret_cast<const char*>(it->rinfo()->data());
1843 flat_delta += it->rinfo()->pc() - prev_pc;
1844 if (txt[0] == ']') break; // End of nested comment
1845 // A new comment
1846 CollectCommentStatistics(it);
1847 // Skip code that was covered with previous comment
1848 prev_pc = it->rinfo()->pc();
1849 }
1850 it->next();
1851 }
1852 EnterComment(comment_txt, flat_delta);
1853 }
1854
1855
1856 // Collects code size statistics:
1857 // - by code kind
1858 // - by code comment
CollectCodeStatistics()1859 void PagedSpace::CollectCodeStatistics() {
1860 HeapObjectIterator obj_it(this);
1861 while (obj_it.has_next()) {
1862 HeapObject* obj = obj_it.next();
1863 if (obj->IsCode()) {
1864 Code* code = Code::cast(obj);
1865 code_kind_statistics[code->kind()] += code->Size();
1866 RelocIterator it(code);
1867 int delta = 0;
1868 const byte* prev_pc = code->instruction_start();
1869 while (!it.done()) {
1870 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
1871 delta += it.rinfo()->pc() - prev_pc;
1872 CollectCommentStatistics(&it);
1873 prev_pc = it.rinfo()->pc();
1874 }
1875 it.next();
1876 }
1877
1878 ASSERT(code->instruction_start() <= prev_pc &&
1879 prev_pc <= code->relocation_start());
1880 delta += code->relocation_start() - prev_pc;
1881 EnterComment("NoComment", delta);
1882 }
1883 }
1884 }
1885
1886
ReportStatistics()1887 void OldSpace::ReportStatistics() {
1888 int pct = Available() * 100 / Capacity();
1889 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
1890 Capacity(), Waste(), Available(), pct);
1891
1892 // Report remembered set statistics.
1893 int rset_marked_pointers = 0;
1894 int rset_marked_arrays = 0;
1895 int rset_marked_array_elements = 0;
1896 int cross_gen_pointers = 0;
1897 int cross_gen_array_elements = 0;
1898
1899 PageIterator page_it(this, PageIterator::PAGES_IN_USE);
1900 while (page_it.has_next()) {
1901 Page* p = page_it.next();
1902
1903 for (Address rset_addr = p->RSetStart();
1904 rset_addr < p->RSetEnd();
1905 rset_addr += kIntSize) {
1906 int rset = Memory::int_at(rset_addr);
1907 if (rset != 0) {
1908 // Bits were set
1909 int intoff = rset_addr - p->address();
1910 int bitoff = 0;
1911 for (; bitoff < kBitsPerInt; ++bitoff) {
1912 if ((rset & (1 << bitoff)) != 0) {
1913 int bitpos = intoff*kBitsPerByte + bitoff;
1914 Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
1915 Object** obj = reinterpret_cast<Object**>(slot);
1916 if (*obj == Heap::raw_unchecked_fixed_array_map()) {
1917 rset_marked_arrays++;
1918 FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot));
1919
1920 rset_marked_array_elements += fa->length();
1921 // Manually inline FixedArray::IterateBody
1922 Address elm_start = slot + FixedArray::kHeaderSize;
1923 Address elm_stop = elm_start + fa->length() * kPointerSize;
1924 for (Address elm_addr = elm_start;
1925 elm_addr < elm_stop; elm_addr += kPointerSize) {
1926 // Filter non-heap-object pointers
1927 Object** elm_p = reinterpret_cast<Object**>(elm_addr);
1928 if (Heap::InNewSpace(*elm_p))
1929 cross_gen_array_elements++;
1930 }
1931 } else {
1932 rset_marked_pointers++;
1933 if (Heap::InNewSpace(*obj))
1934 cross_gen_pointers++;
1935 }
1936 }
1937 }
1938 }
1939 }
1940 }
1941
1942 pct = rset_marked_pointers == 0 ?
1943 0 : cross_gen_pointers * 100 / rset_marked_pointers;
1944 PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
1945 rset_marked_pointers, cross_gen_pointers, pct);
1946 PrintF(" rset_marked arrays %d, ", rset_marked_arrays);
1947 PrintF(" elements %d, ", rset_marked_array_elements);
1948 pct = rset_marked_array_elements == 0 ? 0
1949 : cross_gen_array_elements * 100 / rset_marked_array_elements;
1950 PrintF(" pointers to new space %d (%%%d)\n", cross_gen_array_elements, pct);
1951 PrintF(" total rset-marked bits %d\n",
1952 (rset_marked_pointers + rset_marked_arrays));
1953 pct = (rset_marked_pointers + rset_marked_array_elements) == 0 ? 0
1954 : (cross_gen_pointers + cross_gen_array_elements) * 100 /
1955 (rset_marked_pointers + rset_marked_array_elements);
1956 PrintF(" total rset pointers %d, true cross generation ones %d (%%%d)\n",
1957 (rset_marked_pointers + rset_marked_array_elements),
1958 (cross_gen_pointers + cross_gen_array_elements),
1959 pct);
1960
1961 ClearHistograms();
1962 HeapObjectIterator obj_it(this);
1963 while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
1964 ReportHistogram(true);
1965 }
1966
1967
1968 // Dump the range of remembered set words between [start, end) corresponding
1969 // to the pointers starting at object_p. The allocation_top is an object
1970 // pointer which should not be read past. This is important for large object
1971 // pages, where some bits in the remembered set range do not correspond to
1972 // allocated addresses.
PrintRSetRange(Address start,Address end,Object ** object_p,Address allocation_top)1973 static void PrintRSetRange(Address start, Address end, Object** object_p,
1974 Address allocation_top) {
1975 Address rset_address = start;
1976
1977 // If the range starts on on odd numbered word (eg, for large object extra
1978 // remembered set ranges), print some spaces.
1979 if ((reinterpret_cast<uintptr_t>(start) / kIntSize) % 2 == 1) {
1980 PrintF(" ");
1981 }
1982
1983 // Loop over all the words in the range.
1984 while (rset_address < end) {
1985 uint32_t rset_word = Memory::uint32_at(rset_address);
1986 int bit_position = 0;
1987
1988 // Loop over all the bits in the word.
1989 while (bit_position < kBitsPerInt) {
1990 if (object_p == reinterpret_cast<Object**>(allocation_top)) {
1991 // Print a bar at the allocation pointer.
1992 PrintF("|");
1993 } else if (object_p > reinterpret_cast<Object**>(allocation_top)) {
1994 // Do not dereference object_p past the allocation pointer.
1995 PrintF("#");
1996 } else if ((rset_word & (1 << bit_position)) == 0) {
1997 // Print a dot for zero bits.
1998 PrintF(".");
1999 } else if (Heap::InNewSpace(*object_p)) {
2000 // Print an X for one bits for pointers to new space.
2001 PrintF("X");
2002 } else {
2003 // Print a circle for one bits for pointers to old space.
2004 PrintF("o");
2005 }
2006
2007 // Print a space after every 8th bit except the last.
2008 if (bit_position % 8 == 7 && bit_position != (kBitsPerInt - 1)) {
2009 PrintF(" ");
2010 }
2011
2012 // Advance to next bit.
2013 bit_position++;
2014 object_p++;
2015 }
2016
2017 // Print a newline after every odd numbered word, otherwise a space.
2018 if ((reinterpret_cast<uintptr_t>(rset_address) / kIntSize) % 2 == 1) {
2019 PrintF("\n");
2020 } else {
2021 PrintF(" ");
2022 }
2023
2024 // Advance to next remembered set word.
2025 rset_address += kIntSize;
2026 }
2027 }
2028
2029
DoPrintRSet(const char * space_name)2030 void PagedSpace::DoPrintRSet(const char* space_name) {
2031 PageIterator it(this, PageIterator::PAGES_IN_USE);
2032 while (it.has_next()) {
2033 Page* p = it.next();
2034 PrintF("%s page 0x%x:\n", space_name, p);
2035 PrintRSetRange(p->RSetStart(), p->RSetEnd(),
2036 reinterpret_cast<Object**>(p->ObjectAreaStart()),
2037 p->AllocationTop());
2038 PrintF("\n");
2039 }
2040 }
2041
2042
PrintRSet()2043 void OldSpace::PrintRSet() { DoPrintRSet("old"); }
2044 #endif
2045
2046 // -----------------------------------------------------------------------------
2047 // FixedSpace implementation
2048
PrepareForMarkCompact(bool will_compact)2049 void FixedSpace::PrepareForMarkCompact(bool will_compact) {
2050 if (will_compact) {
2051 // Reset relocation info.
2052 MCResetRelocationInfo();
2053
2054 // During a compacting collection, everything in the space is considered
2055 // 'available' (set by the call to MCResetRelocationInfo) and we will
2056 // rediscover live and wasted bytes during the collection.
2057 ASSERT(Available() == Capacity());
2058 } else {
2059 // During a non-compacting collection, everything below the linear
2060 // allocation pointer except wasted top-of-page blocks is considered
2061 // allocated and we will rediscover available bytes during the
2062 // collection.
2063 accounting_stats_.AllocateBytes(free_list_.available());
2064 }
2065
2066 // Clear the free list before a full GC---it will be rebuilt afterward.
2067 free_list_.Reset();
2068 }
2069
2070
MCCommitRelocationInfo()2071 void FixedSpace::MCCommitRelocationInfo() {
2072 // Update fast allocation info.
2073 allocation_info_.top = mc_forwarding_info_.top;
2074 allocation_info_.limit = mc_forwarding_info_.limit;
2075 ASSERT(allocation_info_.VerifyPagedAllocation());
2076
2077 // The space is compacted and we haven't yet wasted any space.
2078 ASSERT(Waste() == 0);
2079
2080 // Update allocation_top of each page in use and compute waste.
2081 int computed_size = 0;
2082 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2083 while (it.has_next()) {
2084 Page* page = it.next();
2085 Address page_top = page->AllocationTop();
2086 computed_size += page_top - page->ObjectAreaStart();
2087 if (it.has_next()) {
2088 accounting_stats_.WasteBytes(page->ObjectAreaEnd() - page_top);
2089 }
2090 }
2091
2092 // Make sure the computed size - based on the used portion of the
2093 // pages in use - matches the size we adjust during allocation.
2094 ASSERT(computed_size == Size());
2095 }
2096
2097
2098 // Slow case for normal allocation. Try in order: (1) allocate in the next
2099 // page in the space, (2) allocate off the space's free list, (3) expand the
2100 // space, (4) fail.
SlowAllocateRaw(int size_in_bytes)2101 HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
2102 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2103 // Linear allocation in this space has failed. If there is another page
2104 // in the space, move to that page and allocate there. This allocation
2105 // should succeed.
2106 Page* current_page = TopPageOf(allocation_info_);
2107 if (current_page->next_page()->is_valid()) {
2108 return AllocateInNextPage(current_page, size_in_bytes);
2109 }
2110
2111 // There is no next page in this space. Try free list allocation.
2112 // The fixed space free list implicitly assumes that all free blocks
2113 // are of the fixed size.
2114 if (size_in_bytes == object_size_in_bytes_) {
2115 Object* result = free_list_.Allocate();
2116 if (!result->IsFailure()) {
2117 accounting_stats_.AllocateBytes(size_in_bytes);
2118 return HeapObject::cast(result);
2119 }
2120 }
2121
2122 // Free list allocation failed and there is no next page. Fail if we have
2123 // hit the old generation size limit that should cause a garbage
2124 // collection.
2125 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2126 return NULL;
2127 }
2128
2129 // Try to expand the space and allocate in the new next page.
2130 ASSERT(!current_page->next_page()->is_valid());
2131 if (Expand(current_page)) {
2132 return AllocateInNextPage(current_page, size_in_bytes);
2133 }
2134
2135 // Finally, fail.
2136 return NULL;
2137 }
2138
2139
2140 // Move to the next page (there is assumed to be one) and allocate there.
2141 // The top of page block is always wasted, because it is too small to hold a
2142 // map.
AllocateInNextPage(Page * current_page,int size_in_bytes)2143 HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
2144 int size_in_bytes) {
2145 ASSERT(current_page->next_page()->is_valid());
2146 ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == page_extra_);
2147 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2148 accounting_stats_.WasteBytes(page_extra_);
2149 SetAllocationInfo(&allocation_info_, current_page->next_page());
2150 return AllocateLinearly(&allocation_info_, size_in_bytes);
2151 }
2152
2153
2154 #ifdef DEBUG
ReportStatistics()2155 void FixedSpace::ReportStatistics() {
2156 int pct = Available() * 100 / Capacity();
2157 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
2158 Capacity(), Waste(), Available(), pct);
2159
2160 // Report remembered set statistics.
2161 int rset_marked_pointers = 0;
2162 int cross_gen_pointers = 0;
2163
2164 PageIterator page_it(this, PageIterator::PAGES_IN_USE);
2165 while (page_it.has_next()) {
2166 Page* p = page_it.next();
2167
2168 for (Address rset_addr = p->RSetStart();
2169 rset_addr < p->RSetEnd();
2170 rset_addr += kIntSize) {
2171 int rset = Memory::int_at(rset_addr);
2172 if (rset != 0) {
2173 // Bits were set
2174 int intoff = rset_addr - p->address();
2175 int bitoff = 0;
2176 for (; bitoff < kBitsPerInt; ++bitoff) {
2177 if ((rset & (1 << bitoff)) != 0) {
2178 int bitpos = intoff*kBitsPerByte + bitoff;
2179 Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
2180 Object** obj = reinterpret_cast<Object**>(slot);
2181 rset_marked_pointers++;
2182 if (Heap::InNewSpace(*obj))
2183 cross_gen_pointers++;
2184 }
2185 }
2186 }
2187 }
2188 }
2189
2190 pct = rset_marked_pointers == 0 ?
2191 0 : cross_gen_pointers * 100 / rset_marked_pointers;
2192 PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
2193 rset_marked_pointers, cross_gen_pointers, pct);
2194
2195 ClearHistograms();
2196 HeapObjectIterator obj_it(this);
2197 while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
2198 ReportHistogram(false);
2199 }
2200
2201
PrintRSet()2202 void FixedSpace::PrintRSet() { DoPrintRSet(name_); }
2203 #endif
2204
2205
2206 // -----------------------------------------------------------------------------
2207 // MapSpace implementation
2208
PrepareForMarkCompact(bool will_compact)2209 void MapSpace::PrepareForMarkCompact(bool will_compact) {
2210 // Call prepare of the super class.
2211 FixedSpace::PrepareForMarkCompact(will_compact);
2212
2213 if (will_compact) {
2214 // Initialize map index entry.
2215 int page_count = 0;
2216 PageIterator it(this, PageIterator::ALL_PAGES);
2217 while (it.has_next()) {
2218 ASSERT_MAP_PAGE_INDEX(page_count);
2219
2220 Page* p = it.next();
2221 ASSERT(p->mc_page_index == page_count);
2222
2223 page_addresses_[page_count++] = p->address();
2224 }
2225 }
2226 }
2227
2228
2229 #ifdef DEBUG
VerifyObject(HeapObject * object)2230 void MapSpace::VerifyObject(HeapObject* object) {
2231 // The object should be a map or a free-list node.
2232 ASSERT(object->IsMap() || object->IsByteArray());
2233 }
2234 #endif
2235
2236
2237 // -----------------------------------------------------------------------------
2238 // GlobalPropertyCellSpace implementation
2239
2240 #ifdef DEBUG
VerifyObject(HeapObject * object)2241 void CellSpace::VerifyObject(HeapObject* object) {
2242 // The object should be a global object property cell or a free-list node.
2243 ASSERT(object->IsJSGlobalPropertyCell() ||
2244 object->map() == Heap::two_pointer_filler_map());
2245 }
2246 #endif
2247
2248
2249 // -----------------------------------------------------------------------------
2250 // LargeObjectIterator
2251
LargeObjectIterator(LargeObjectSpace * space)2252 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2253 current_ = space->first_chunk_;
2254 size_func_ = NULL;
2255 }
2256
2257
LargeObjectIterator(LargeObjectSpace * space,HeapObjectCallback size_func)2258 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2259 HeapObjectCallback size_func) {
2260 current_ = space->first_chunk_;
2261 size_func_ = size_func;
2262 }
2263
2264
next()2265 HeapObject* LargeObjectIterator::next() {
2266 ASSERT(has_next());
2267 HeapObject* object = current_->GetObject();
2268 current_ = current_->next();
2269 return object;
2270 }
2271
2272
2273 // -----------------------------------------------------------------------------
2274 // LargeObjectChunk
2275
New(int size_in_bytes,size_t * chunk_size,Executability executable)2276 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
2277 size_t* chunk_size,
2278 Executability executable) {
2279 size_t requested = ChunkSizeFor(size_in_bytes);
2280 void* mem = MemoryAllocator::AllocateRawMemory(requested,
2281 chunk_size,
2282 executable);
2283 if (mem == NULL) return NULL;
2284 LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
2285 if (*chunk_size < requested) {
2286 MemoryAllocator::FreeRawMemory(mem, *chunk_size);
2287 LOG(DeleteEvent("LargeObjectChunk", mem));
2288 return NULL;
2289 }
2290 return reinterpret_cast<LargeObjectChunk*>(mem);
2291 }
2292
2293
ChunkSizeFor(int size_in_bytes)2294 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
2295 int os_alignment = OS::AllocateAlignment();
2296 if (os_alignment < Page::kPageSize)
2297 size_in_bytes += (Page::kPageSize - os_alignment);
2298 return size_in_bytes + Page::kObjectStartOffset;
2299 }
2300
2301 // -----------------------------------------------------------------------------
2302 // LargeObjectSpace
2303
LargeObjectSpace(AllocationSpace id)2304 LargeObjectSpace::LargeObjectSpace(AllocationSpace id)
2305 : Space(id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2306 first_chunk_(NULL),
2307 size_(0),
2308 page_count_(0) {}
2309
2310
Setup()2311 bool LargeObjectSpace::Setup() {
2312 first_chunk_ = NULL;
2313 size_ = 0;
2314 page_count_ = 0;
2315 return true;
2316 }
2317
2318
TearDown()2319 void LargeObjectSpace::TearDown() {
2320 while (first_chunk_ != NULL) {
2321 LargeObjectChunk* chunk = first_chunk_;
2322 first_chunk_ = first_chunk_->next();
2323 LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
2324 MemoryAllocator::FreeRawMemory(chunk->address(), chunk->size());
2325 }
2326
2327 size_ = 0;
2328 page_count_ = 0;
2329 }
2330
2331
2332 #ifdef ENABLE_HEAP_PROTECTION
2333
Protect()2334 void LargeObjectSpace::Protect() {
2335 LargeObjectChunk* chunk = first_chunk_;
2336 while (chunk != NULL) {
2337 MemoryAllocator::Protect(chunk->address(), chunk->size());
2338 chunk = chunk->next();
2339 }
2340 }
2341
2342
Unprotect()2343 void LargeObjectSpace::Unprotect() {
2344 LargeObjectChunk* chunk = first_chunk_;
2345 while (chunk != NULL) {
2346 bool is_code = chunk->GetObject()->IsCode();
2347 MemoryAllocator::Unprotect(chunk->address(), chunk->size(),
2348 is_code ? EXECUTABLE : NOT_EXECUTABLE);
2349 chunk = chunk->next();
2350 }
2351 }
2352
2353 #endif
2354
2355
AllocateRawInternal(int requested_size,int object_size,Executability executable)2356 Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
2357 int object_size,
2358 Executability executable) {
2359 ASSERT(0 < object_size && object_size <= requested_size);
2360
2361 // Check if we want to force a GC before growing the old space further.
2362 // If so, fail the allocation.
2363 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2364 return Failure::RetryAfterGC(requested_size, identity());
2365 }
2366
2367 size_t chunk_size;
2368 LargeObjectChunk* chunk =
2369 LargeObjectChunk::New(requested_size, &chunk_size, executable);
2370 if (chunk == NULL) {
2371 return Failure::RetryAfterGC(requested_size, identity());
2372 }
2373
2374 size_ += chunk_size;
2375 page_count_++;
2376 chunk->set_next(first_chunk_);
2377 chunk->set_size(chunk_size);
2378 first_chunk_ = chunk;
2379
2380 // Set the object address and size in the page header and clear its
2381 // remembered set.
2382 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2383 Address object_address = page->ObjectAreaStart();
2384 // Clear the low order bit of the second word in the page to flag it as a
2385 // large object page. If the chunk_size happened to be written there, its
2386 // low order bit should already be clear.
2387 ASSERT((chunk_size & 0x1) == 0);
2388 page->is_normal_page &= ~0x1;
2389 page->ClearRSet();
2390 int extra_bytes = requested_size - object_size;
2391 if (extra_bytes > 0) {
2392 // The extra memory for the remembered set should be cleared.
2393 memset(object_address + object_size, 0, extra_bytes);
2394 }
2395
2396 return HeapObject::FromAddress(object_address);
2397 }
2398
2399
AllocateRawCode(int size_in_bytes)2400 Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
2401 ASSERT(0 < size_in_bytes);
2402 return AllocateRawInternal(size_in_bytes,
2403 size_in_bytes,
2404 EXECUTABLE);
2405 }
2406
2407
AllocateRawFixedArray(int size_in_bytes)2408 Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
2409 ASSERT(0 < size_in_bytes);
2410 int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes);
2411 return AllocateRawInternal(size_in_bytes + extra_rset_bytes,
2412 size_in_bytes,
2413 NOT_EXECUTABLE);
2414 }
2415
2416
AllocateRaw(int size_in_bytes)2417 Object* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
2418 ASSERT(0 < size_in_bytes);
2419 return AllocateRawInternal(size_in_bytes,
2420 size_in_bytes,
2421 NOT_EXECUTABLE);
2422 }
2423
2424
2425 // GC support
FindObject(Address a)2426 Object* LargeObjectSpace::FindObject(Address a) {
2427 for (LargeObjectChunk* chunk = first_chunk_;
2428 chunk != NULL;
2429 chunk = chunk->next()) {
2430 Address chunk_address = chunk->address();
2431 if (chunk_address <= a && a < chunk_address + chunk->size()) {
2432 return chunk->GetObject();
2433 }
2434 }
2435 return Failure::Exception();
2436 }
2437
2438
ClearRSet()2439 void LargeObjectSpace::ClearRSet() {
2440 ASSERT(Page::is_rset_in_use());
2441
2442 LargeObjectIterator it(this);
2443 while (it.has_next()) {
2444 HeapObject* object = it.next();
2445 // We only have code, sequential strings, or fixed arrays in large
2446 // object space, and only fixed arrays need remembered set support.
2447 if (object->IsFixedArray()) {
2448 // Clear the normal remembered set region of the page;
2449 Page* page = Page::FromAddress(object->address());
2450 page->ClearRSet();
2451
2452 // Clear the extra remembered set.
2453 int size = object->Size();
2454 int extra_rset_bytes = ExtraRSetBytesFor(size);
2455 memset(object->address() + size, 0, extra_rset_bytes);
2456 }
2457 }
2458 }
2459
2460
IterateRSet(ObjectSlotCallback copy_object_func)2461 void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) {
2462 ASSERT(Page::is_rset_in_use());
2463
2464 static void* lo_rset_histogram = StatsTable::CreateHistogram(
2465 "V8.RSetLO",
2466 0,
2467 // Keeping this histogram's buckets the same as the paged space histogram.
2468 Page::kObjectAreaSize / kPointerSize,
2469 30);
2470
2471 LargeObjectIterator it(this);
2472 while (it.has_next()) {
2473 // We only have code, sequential strings, or fixed arrays in large
2474 // object space, and only fixed arrays can possibly contain pointers to
2475 // the young generation.
2476 HeapObject* object = it.next();
2477 if (object->IsFixedArray()) {
2478 // Iterate the normal page remembered set range.
2479 Page* page = Page::FromAddress(object->address());
2480 Address object_end = object->address() + object->Size();
2481 int count = Heap::IterateRSetRange(page->ObjectAreaStart(),
2482 Min(page->ObjectAreaEnd(), object_end),
2483 page->RSetStart(),
2484 copy_object_func);
2485
2486 // Iterate the extra array elements.
2487 if (object_end > page->ObjectAreaEnd()) {
2488 count += Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end,
2489 object_end, copy_object_func);
2490 }
2491 if (lo_rset_histogram != NULL) {
2492 StatsTable::AddHistogramSample(lo_rset_histogram, count);
2493 }
2494 }
2495 }
2496 }
2497
2498
FreeUnmarkedObjects()2499 void LargeObjectSpace::FreeUnmarkedObjects() {
2500 LargeObjectChunk* previous = NULL;
2501 LargeObjectChunk* current = first_chunk_;
2502 while (current != NULL) {
2503 HeapObject* object = current->GetObject();
2504 if (object->IsMarked()) {
2505 object->ClearMark();
2506 MarkCompactCollector::tracer()->decrement_marked_count();
2507 previous = current;
2508 current = current->next();
2509 } else {
2510 Address chunk_address = current->address();
2511 size_t chunk_size = current->size();
2512
2513 // Cut the chunk out from the chunk list.
2514 current = current->next();
2515 if (previous == NULL) {
2516 first_chunk_ = current;
2517 } else {
2518 previous->set_next(current);
2519 }
2520
2521 // Free the chunk.
2522 if (object->IsCode()) {
2523 LOG(CodeDeleteEvent(object->address()));
2524 }
2525 size_ -= chunk_size;
2526 page_count_--;
2527 MemoryAllocator::FreeRawMemory(chunk_address, chunk_size);
2528 LOG(DeleteEvent("LargeObjectChunk", chunk_address));
2529 }
2530 }
2531 }
2532
2533
Contains(HeapObject * object)2534 bool LargeObjectSpace::Contains(HeapObject* object) {
2535 Address address = object->address();
2536 Page* page = Page::FromAddress(address);
2537
2538 SLOW_ASSERT(!page->IsLargeObjectPage()
2539 || !FindObject(address)->IsFailure());
2540
2541 return page->IsLargeObjectPage();
2542 }
2543
2544
2545 #ifdef DEBUG
2546 // We do not assume that the large object iterator works, because it depends
2547 // on the invariants we are checking during verification.
Verify()2548 void LargeObjectSpace::Verify() {
2549 for (LargeObjectChunk* chunk = first_chunk_;
2550 chunk != NULL;
2551 chunk = chunk->next()) {
2552 // Each chunk contains an object that starts at the large object page's
2553 // object area start.
2554 HeapObject* object = chunk->GetObject();
2555 Page* page = Page::FromAddress(object->address());
2556 ASSERT(object->address() == page->ObjectAreaStart());
2557
2558 // The first word should be a map, and we expect all map pointers to be
2559 // in map space.
2560 Map* map = object->map();
2561 ASSERT(map->IsMap());
2562 ASSERT(Heap::map_space()->Contains(map));
2563
2564 // We have only code, sequential strings, external strings
2565 // (sequential strings that have been morphed into external
2566 // strings), fixed arrays, and byte arrays in large object space.
2567 ASSERT(object->IsCode() || object->IsSeqString() ||
2568 object->IsExternalString() || object->IsFixedArray() ||
2569 object->IsByteArray());
2570
2571 // The object itself should look OK.
2572 object->Verify();
2573
2574 // Byte arrays and strings don't have interior pointers.
2575 if (object->IsCode()) {
2576 VerifyPointersVisitor code_visitor;
2577 Code::cast(object)->ConvertICTargetsFromAddressToObject();
2578 object->IterateBody(map->instance_type(),
2579 object->Size(),
2580 &code_visitor);
2581 Code::cast(object)->ConvertICTargetsFromObjectToAddress();
2582 } else if (object->IsFixedArray()) {
2583 // We loop over fixed arrays ourselves, rather then using the visitor,
2584 // because the visitor doesn't support the start/offset iteration
2585 // needed for IsRSetSet.
2586 FixedArray* array = FixedArray::cast(object);
2587 for (int j = 0; j < array->length(); j++) {
2588 Object* element = array->get(j);
2589 if (element->IsHeapObject()) {
2590 HeapObject* element_object = HeapObject::cast(element);
2591 ASSERT(Heap::Contains(element_object));
2592 ASSERT(element_object->map()->IsMap());
2593 if (Heap::InNewSpace(element_object)) {
2594 ASSERT(Page::IsRSetSet(object->address(),
2595 FixedArray::kHeaderSize + j * kPointerSize));
2596 }
2597 }
2598 }
2599 }
2600 }
2601 }
2602
2603
Print()2604 void LargeObjectSpace::Print() {
2605 LargeObjectIterator it(this);
2606 while (it.has_next()) {
2607 it.next()->Print();
2608 }
2609 }
2610
2611
ReportStatistics()2612 void LargeObjectSpace::ReportStatistics() {
2613 PrintF(" size: %d\n", size_);
2614 int num_objects = 0;
2615 ClearHistograms();
2616 LargeObjectIterator it(this);
2617 while (it.has_next()) {
2618 num_objects++;
2619 CollectHistogramInfo(it.next());
2620 }
2621
2622 PrintF(" number of objects %d\n", num_objects);
2623 if (num_objects > 0) ReportHistogram(false);
2624 }
2625
2626
CollectCodeStatistics()2627 void LargeObjectSpace::CollectCodeStatistics() {
2628 LargeObjectIterator obj_it(this);
2629 while (obj_it.has_next()) {
2630 HeapObject* obj = obj_it.next();
2631 if (obj->IsCode()) {
2632 Code* code = Code::cast(obj);
2633 code_kind_statistics[code->kind()] += code->Size();
2634 }
2635 }
2636 }
2637
2638
PrintRSet()2639 void LargeObjectSpace::PrintRSet() {
2640 LargeObjectIterator it(this);
2641 while (it.has_next()) {
2642 HeapObject* object = it.next();
2643 if (object->IsFixedArray()) {
2644 Page* page = Page::FromAddress(object->address());
2645
2646 Address allocation_top = object->address() + object->Size();
2647 PrintF("large page 0x%x:\n", page);
2648 PrintRSetRange(page->RSetStart(), page->RSetEnd(),
2649 reinterpret_cast<Object**>(object->address()),
2650 allocation_top);
2651 int extra_array_bytes = object->Size() - Page::kObjectAreaSize;
2652 int extra_rset_bits = RoundUp(extra_array_bytes / kPointerSize,
2653 kBitsPerInt);
2654 PrintF("------------------------------------------------------------"
2655 "-----------\n");
2656 PrintRSetRange(allocation_top,
2657 allocation_top + extra_rset_bits / kBitsPerByte,
2658 reinterpret_cast<Object**>(object->address()
2659 + Page::kObjectAreaSize),
2660 allocation_top);
2661 PrintF("\n");
2662 }
2663 }
2664 }
2665 #endif // DEBUG
2666
2667 } } // namespace v8::internal
2668