• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "macro-assembler.h"
31 #include "mark-compact.h"
32 #include "platform.h"
33 
34 namespace v8 {
35 namespace internal {
36 
37 // For contiguous spaces, top should be in the space (or at the end) and limit
38 // should be the end of the space.
39 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
40   ASSERT((space).low() <= (info).top                  \
41          && (info).top <= (space).high()              \
42          && (info).limit == (space).high())
43 
44 
45 // ----------------------------------------------------------------------------
46 // HeapObjectIterator
47 
HeapObjectIterator(PagedSpace * space)48 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
49   Initialize(space->bottom(), space->top(), NULL);
50 }
51 
52 
HeapObjectIterator(PagedSpace * space,HeapObjectCallback size_func)53 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
54                                        HeapObjectCallback size_func) {
55   Initialize(space->bottom(), space->top(), size_func);
56 }
57 
58 
HeapObjectIterator(PagedSpace * space,Address start)59 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
60   Initialize(start, space->top(), NULL);
61 }
62 
63 
HeapObjectIterator(PagedSpace * space,Address start,HeapObjectCallback size_func)64 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
65                                        HeapObjectCallback size_func) {
66   Initialize(start, space->top(), size_func);
67 }
68 
69 
Initialize(Address cur,Address end,HeapObjectCallback size_f)70 void HeapObjectIterator::Initialize(Address cur, Address end,
71                                     HeapObjectCallback size_f) {
72   cur_addr_ = cur;
73   end_addr_ = end;
74   end_page_ = Page::FromAllocationTop(end);
75   size_func_ = size_f;
76   Page* p = Page::FromAllocationTop(cur_addr_);
77   cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
78 
79 #ifdef DEBUG
80   Verify();
81 #endif
82 }
83 
84 
FromNextPage()85 HeapObject* HeapObjectIterator::FromNextPage() {
86   if (cur_addr_ == end_addr_) return NULL;
87 
88   Page* cur_page = Page::FromAllocationTop(cur_addr_);
89   cur_page = cur_page->next_page();
90   ASSERT(cur_page->is_valid());
91 
92   cur_addr_ = cur_page->ObjectAreaStart();
93   cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
94 
95   if (cur_addr_ == end_addr_) return NULL;
96   ASSERT(cur_addr_ < cur_limit_);
97 #ifdef DEBUG
98   Verify();
99 #endif
100   return FromCurrentPage();
101 }
102 
103 
104 #ifdef DEBUG
Verify()105 void HeapObjectIterator::Verify() {
106   Page* p = Page::FromAllocationTop(cur_addr_);
107   ASSERT(p == Page::FromAllocationTop(cur_limit_));
108   ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
109 }
110 #endif
111 
112 
113 // -----------------------------------------------------------------------------
114 // PageIterator
115 
PageIterator(PagedSpace * space,Mode mode)116 PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
117   prev_page_ = NULL;
118   switch (mode) {
119     case PAGES_IN_USE:
120       stop_page_ = space->AllocationTopPage();
121       break;
122     case PAGES_USED_BY_MC:
123       stop_page_ = space->MCRelocationTopPage();
124       break;
125     case ALL_PAGES:
126 #ifdef DEBUG
127       // Verify that the cached last page in the space is actually the
128       // last page.
129       for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
130         if (!p->next_page()->is_valid()) {
131           ASSERT(space->last_page_ == p);
132         }
133       }
134 #endif
135       stop_page_ = space->last_page_;
136       break;
137   }
138 }
139 
140 
141 // -----------------------------------------------------------------------------
142 // Page
143 
144 #ifdef DEBUG
145 Page::RSetState Page::rset_state_ = Page::IN_USE;
146 #endif
147 
148 // -----------------------------------------------------------------------------
149 // CodeRange
150 
151 List<CodeRange::FreeBlock> CodeRange::free_list_(0);
152 List<CodeRange::FreeBlock> CodeRange::allocation_list_(0);
153 int CodeRange::current_allocation_block_index_ = 0;
154 VirtualMemory* CodeRange::code_range_ = NULL;
155 
156 
Setup(const size_t requested)157 bool CodeRange::Setup(const size_t requested) {
158   ASSERT(code_range_ == NULL);
159 
160   code_range_ = new VirtualMemory(requested);
161   CHECK(code_range_ != NULL);
162   if (!code_range_->IsReserved()) {
163     delete code_range_;
164     code_range_ = NULL;
165     return false;
166   }
167 
168   // We are sure that we have mapped a block of requested addresses.
169   ASSERT(code_range_->size() == requested);
170   LOG(NewEvent("CodeRange", code_range_->address(), requested));
171   allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
172   current_allocation_block_index_ = 0;
173   return true;
174 }
175 
176 
CompareFreeBlockAddress(const FreeBlock * left,const FreeBlock * right)177 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
178                                        const FreeBlock* right) {
179   // The entire point of CodeRange is that the difference between two
180   // addresses in the range can be represented as a signed 32-bit int,
181   // so the cast is semantically correct.
182   return static_cast<int>(left->start - right->start);
183 }
184 
185 
GetNextAllocationBlock(size_t requested)186 void CodeRange::GetNextAllocationBlock(size_t requested) {
187   for (current_allocation_block_index_++;
188        current_allocation_block_index_ < allocation_list_.length();
189        current_allocation_block_index_++) {
190     if (requested <= allocation_list_[current_allocation_block_index_].size) {
191       return;  // Found a large enough allocation block.
192     }
193   }
194 
195   // Sort and merge the free blocks on the free list and the allocation list.
196   free_list_.AddAll(allocation_list_);
197   allocation_list_.Clear();
198   free_list_.Sort(&CompareFreeBlockAddress);
199   for (int i = 0; i < free_list_.length();) {
200     FreeBlock merged = free_list_[i];
201     i++;
202     // Add adjacent free blocks to the current merged block.
203     while (i < free_list_.length() &&
204            free_list_[i].start == merged.start + merged.size) {
205       merged.size += free_list_[i].size;
206       i++;
207     }
208     if (merged.size > 0) {
209       allocation_list_.Add(merged);
210     }
211   }
212   free_list_.Clear();
213 
214   for (current_allocation_block_index_ = 0;
215        current_allocation_block_index_ < allocation_list_.length();
216        current_allocation_block_index_++) {
217     if (requested <= allocation_list_[current_allocation_block_index_].size) {
218       return;  // Found a large enough allocation block.
219     }
220   }
221 
222   // Code range is full or too fragmented.
223   V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
224 }
225 
226 
227 
AllocateRawMemory(const size_t requested,size_t * allocated)228 void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
229   ASSERT(current_allocation_block_index_ < allocation_list_.length());
230   if (requested > allocation_list_[current_allocation_block_index_].size) {
231     // Find an allocation block large enough.  This function call may
232     // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
233     GetNextAllocationBlock(requested);
234   }
235   // Commit the requested memory at the start of the current allocation block.
236   *allocated = RoundUp(requested, Page::kPageSize);
237   FreeBlock current = allocation_list_[current_allocation_block_index_];
238   if (*allocated >= current.size - Page::kPageSize) {
239     // Don't leave a small free block, useless for a large object or chunk.
240     *allocated = current.size;
241   }
242   ASSERT(*allocated <= current.size);
243   if (!code_range_->Commit(current.start, *allocated, true)) {
244     *allocated = 0;
245     return NULL;
246   }
247   allocation_list_[current_allocation_block_index_].start += *allocated;
248   allocation_list_[current_allocation_block_index_].size -= *allocated;
249   if (*allocated == current.size) {
250     GetNextAllocationBlock(0);  // This block is used up, get the next one.
251   }
252   return current.start;
253 }
254 
255 
FreeRawMemory(void * address,size_t length)256 void CodeRange::FreeRawMemory(void* address, size_t length) {
257   free_list_.Add(FreeBlock(address, length));
258   code_range_->Uncommit(address, length);
259 }
260 
261 
TearDown()262 void CodeRange::TearDown() {
263     delete code_range_;  // Frees all memory in the virtual memory range.
264     code_range_ = NULL;
265     free_list_.Free();
266     allocation_list_.Free();
267 }
268 
269 
270 // -----------------------------------------------------------------------------
271 // MemoryAllocator
272 //
273 int MemoryAllocator::capacity_   = 0;
274 int MemoryAllocator::size_       = 0;
275 
276 VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
277 
278 // 270 is an estimate based on the static default heap size of a pair of 256K
279 // semispaces and a 64M old generation.
280 const int kEstimatedNumberOfChunks = 270;
281 List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_(
282     kEstimatedNumberOfChunks);
283 List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks);
284 int MemoryAllocator::max_nof_chunks_ = 0;
285 int MemoryAllocator::top_ = 0;
286 
287 
Push(int free_chunk_id)288 void MemoryAllocator::Push(int free_chunk_id) {
289   ASSERT(max_nof_chunks_ > 0);
290   ASSERT(top_ < max_nof_chunks_);
291   free_chunk_ids_[top_++] = free_chunk_id;
292 }
293 
294 
Pop()295 int MemoryAllocator::Pop() {
296   ASSERT(top_ > 0);
297   return free_chunk_ids_[--top_];
298 }
299 
300 
Setup(int capacity)301 bool MemoryAllocator::Setup(int capacity) {
302   capacity_ = RoundUp(capacity, Page::kPageSize);
303 
304   // Over-estimate the size of chunks_ array.  It assumes the expansion of old
305   // space is always in the unit of a chunk (kChunkSize) except the last
306   // expansion.
307   //
308   // Due to alignment, allocated space might be one page less than required
309   // number (kPagesPerChunk) of pages for old spaces.
310   //
311   // Reserve two chunk ids for semispaces, one for map space, one for old
312   // space, and one for code space.
313   max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5;
314   if (max_nof_chunks_ > kMaxNofChunks) return false;
315 
316   size_ = 0;
317   ChunkInfo info;  // uninitialized element.
318   for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
319     chunks_.Add(info);
320     free_chunk_ids_.Add(i);
321   }
322   top_ = max_nof_chunks_;
323   return true;
324 }
325 
326 
TearDown()327 void MemoryAllocator::TearDown() {
328   for (int i = 0; i < max_nof_chunks_; i++) {
329     if (chunks_[i].address() != NULL) DeleteChunk(i);
330   }
331   chunks_.Clear();
332   free_chunk_ids_.Clear();
333 
334   if (initial_chunk_ != NULL) {
335     LOG(DeleteEvent("InitialChunk", initial_chunk_->address()));
336     delete initial_chunk_;
337     initial_chunk_ = NULL;
338   }
339 
340   ASSERT(top_ == max_nof_chunks_);  // all chunks are free
341   top_ = 0;
342   capacity_ = 0;
343   size_ = 0;
344   max_nof_chunks_ = 0;
345 }
346 
347 
AllocateRawMemory(const size_t requested,size_t * allocated,Executability executable)348 void* MemoryAllocator::AllocateRawMemory(const size_t requested,
349                                          size_t* allocated,
350                                          Executability executable) {
351   if (size_ + static_cast<int>(requested) > capacity_) return NULL;
352   void* mem;
353   if (executable == EXECUTABLE  && CodeRange::exists()) {
354     mem = CodeRange::AllocateRawMemory(requested, allocated);
355   } else {
356     mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE));
357   }
358   int alloced = static_cast<int>(*allocated);
359   size_ += alloced;
360 #ifdef DEBUG
361   ZapBlock(reinterpret_cast<Address>(mem), alloced);
362 #endif
363   Counters::memory_allocated.Increment(alloced);
364   return mem;
365 }
366 
367 
FreeRawMemory(void * mem,size_t length)368 void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
369 #ifdef DEBUG
370   ZapBlock(reinterpret_cast<Address>(mem), length);
371 #endif
372   if (CodeRange::contains(static_cast<Address>(mem))) {
373     CodeRange::FreeRawMemory(mem, length);
374   } else {
375     OS::Free(mem, length);
376   }
377   Counters::memory_allocated.Decrement(static_cast<int>(length));
378   size_ -= static_cast<int>(length);
379   ASSERT(size_ >= 0);
380 }
381 
382 
ReserveInitialChunk(const size_t requested)383 void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
384   ASSERT(initial_chunk_ == NULL);
385 
386   initial_chunk_ = new VirtualMemory(requested);
387   CHECK(initial_chunk_ != NULL);
388   if (!initial_chunk_->IsReserved()) {
389     delete initial_chunk_;
390     initial_chunk_ = NULL;
391     return NULL;
392   }
393 
394   // We are sure that we have mapped a block of requested addresses.
395   ASSERT(initial_chunk_->size() == requested);
396   LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested));
397   size_ += static_cast<int>(requested);
398   return initial_chunk_->address();
399 }
400 
401 
PagesInChunk(Address start,size_t size)402 static int PagesInChunk(Address start, size_t size) {
403   // The first page starts on the first page-aligned address from start onward
404   // and the last page ends on the last page-aligned address before
405   // start+size.  Page::kPageSize is a power of two so we can divide by
406   // shifting.
407   return static_cast<int>((RoundDown(start + size, Page::kPageSize)
408       - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
409 }
410 
411 
AllocatePages(int requested_pages,int * allocated_pages,PagedSpace * owner)412 Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
413                                      PagedSpace* owner) {
414   if (requested_pages <= 0) return Page::FromAddress(NULL);
415   size_t chunk_size = requested_pages * Page::kPageSize;
416 
417   // There is not enough space to guarantee the desired number pages can be
418   // allocated.
419   if (size_ + static_cast<int>(chunk_size) > capacity_) {
420     // Request as many pages as we can.
421     chunk_size = capacity_ - size_;
422     requested_pages = static_cast<int>(chunk_size >> kPageSizeBits);
423 
424     if (requested_pages <= 0) return Page::FromAddress(NULL);
425   }
426   void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
427   if (chunk == NULL) return Page::FromAddress(NULL);
428   LOG(NewEvent("PagedChunk", chunk, chunk_size));
429 
430   *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
431   if (*allocated_pages == 0) {
432     FreeRawMemory(chunk, chunk_size);
433     LOG(DeleteEvent("PagedChunk", chunk));
434     return Page::FromAddress(NULL);
435   }
436 
437   int chunk_id = Pop();
438   chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
439 
440   return InitializePagesInChunk(chunk_id, *allocated_pages, owner);
441 }
442 
443 
CommitPages(Address start,size_t size,PagedSpace * owner,int * num_pages)444 Page* MemoryAllocator::CommitPages(Address start, size_t size,
445                                    PagedSpace* owner, int* num_pages) {
446   ASSERT(start != NULL);
447   *num_pages = PagesInChunk(start, size);
448   ASSERT(*num_pages > 0);
449   ASSERT(initial_chunk_ != NULL);
450   ASSERT(InInitialChunk(start));
451   ASSERT(InInitialChunk(start + size - 1));
452   if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
453     return Page::FromAddress(NULL);
454   }
455 #ifdef DEBUG
456   ZapBlock(start, size);
457 #endif
458   Counters::memory_allocated.Increment(static_cast<int>(size));
459 
460   // So long as we correctly overestimated the number of chunks we should not
461   // run out of chunk ids.
462   CHECK(!OutOfChunkIds());
463   int chunk_id = Pop();
464   chunks_[chunk_id].init(start, size, owner);
465   return InitializePagesInChunk(chunk_id, *num_pages, owner);
466 }
467 
468 
CommitBlock(Address start,size_t size,Executability executable)469 bool MemoryAllocator::CommitBlock(Address start,
470                                   size_t size,
471                                   Executability executable) {
472   ASSERT(start != NULL);
473   ASSERT(size > 0);
474   ASSERT(initial_chunk_ != NULL);
475   ASSERT(InInitialChunk(start));
476   ASSERT(InInitialChunk(start + size - 1));
477 
478   if (!initial_chunk_->Commit(start, size, executable)) return false;
479 #ifdef DEBUG
480   ZapBlock(start, size);
481 #endif
482   Counters::memory_allocated.Increment(static_cast<int>(size));
483   return true;
484 }
485 
486 
UncommitBlock(Address start,size_t size)487 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
488   ASSERT(start != NULL);
489   ASSERT(size > 0);
490   ASSERT(initial_chunk_ != NULL);
491   ASSERT(InInitialChunk(start));
492   ASSERT(InInitialChunk(start + size - 1));
493 
494   if (!initial_chunk_->Uncommit(start, size)) return false;
495   Counters::memory_allocated.Decrement(static_cast<int>(size));
496   return true;
497 }
498 
499 
ZapBlock(Address start,size_t size)500 void MemoryAllocator::ZapBlock(Address start, size_t size) {
501   for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
502     Memory::Address_at(start + s) = kZapValue;
503   }
504 }
505 
506 
InitializePagesInChunk(int chunk_id,int pages_in_chunk,PagedSpace * owner)507 Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
508                                               PagedSpace* owner) {
509   ASSERT(IsValidChunk(chunk_id));
510   ASSERT(pages_in_chunk > 0);
511 
512   Address chunk_start = chunks_[chunk_id].address();
513 
514   Address low = RoundUp(chunk_start, Page::kPageSize);
515 
516 #ifdef DEBUG
517   size_t chunk_size = chunks_[chunk_id].size();
518   Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
519   ASSERT(pages_in_chunk <=
520         ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
521 #endif
522 
523   Address page_addr = low;
524   for (int i = 0; i < pages_in_chunk; i++) {
525     Page* p = Page::FromAddress(page_addr);
526     p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
527     p->is_normal_page = 1;
528     page_addr += Page::kPageSize;
529   }
530 
531   // Set the next page of the last page to 0.
532   Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
533   last_page->opaque_header = OffsetFrom(0) | chunk_id;
534 
535   return Page::FromAddress(low);
536 }
537 
538 
FreePages(Page * p)539 Page* MemoryAllocator::FreePages(Page* p) {
540   if (!p->is_valid()) return p;
541 
542   // Find the first page in the same chunk as 'p'
543   Page* first_page = FindFirstPageInSameChunk(p);
544   Page* page_to_return = Page::FromAddress(NULL);
545 
546   if (p != first_page) {
547     // Find the last page in the same chunk as 'prev'.
548     Page* last_page = FindLastPageInSameChunk(p);
549     first_page = GetNextPage(last_page);  // first page in next chunk
550 
551     // set the next_page of last_page to NULL
552     SetNextPage(last_page, Page::FromAddress(NULL));
553     page_to_return = p;  // return 'p' when exiting
554   }
555 
556   while (first_page->is_valid()) {
557     int chunk_id = GetChunkId(first_page);
558     ASSERT(IsValidChunk(chunk_id));
559 
560     // Find the first page of the next chunk before deleting this chunk.
561     first_page = GetNextPage(FindLastPageInSameChunk(first_page));
562 
563     // Free the current chunk.
564     DeleteChunk(chunk_id);
565   }
566 
567   return page_to_return;
568 }
569 
570 
DeleteChunk(int chunk_id)571 void MemoryAllocator::DeleteChunk(int chunk_id) {
572   ASSERT(IsValidChunk(chunk_id));
573 
574   ChunkInfo& c = chunks_[chunk_id];
575 
576   // We cannot free a chunk contained in the initial chunk because it was not
577   // allocated with AllocateRawMemory.  Instead we uncommit the virtual
578   // memory.
579   if (InInitialChunk(c.address())) {
580     // TODO(1240712): VirtualMemory::Uncommit has a return value which
581     // is ignored here.
582     initial_chunk_->Uncommit(c.address(), c.size());
583     Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
584   } else {
585     LOG(DeleteEvent("PagedChunk", c.address()));
586     FreeRawMemory(c.address(), c.size());
587   }
588   c.init(NULL, 0, NULL);
589   Push(chunk_id);
590 }
591 
592 
FindFirstPageInSameChunk(Page * p)593 Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
594   int chunk_id = GetChunkId(p);
595   ASSERT(IsValidChunk(chunk_id));
596 
597   Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
598   return Page::FromAddress(low);
599 }
600 
601 
FindLastPageInSameChunk(Page * p)602 Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
603   int chunk_id = GetChunkId(p);
604   ASSERT(IsValidChunk(chunk_id));
605 
606   Address chunk_start = chunks_[chunk_id].address();
607   size_t chunk_size = chunks_[chunk_id].size();
608 
609   Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
610   ASSERT(chunk_start <= p->address() && p->address() < high);
611 
612   return Page::FromAddress(high - Page::kPageSize);
613 }
614 
615 
616 #ifdef DEBUG
ReportStatistics()617 void MemoryAllocator::ReportStatistics() {
618   float pct = static_cast<float>(capacity_ - size_) / capacity_;
619   PrintF("  capacity: %d, used: %d, available: %%%d\n\n",
620          capacity_, size_, static_cast<int>(pct*100));
621 }
622 #endif
623 
624 
625 // -----------------------------------------------------------------------------
626 // PagedSpace implementation
627 
PagedSpace(int max_capacity,AllocationSpace id,Executability executable)628 PagedSpace::PagedSpace(int max_capacity,
629                        AllocationSpace id,
630                        Executability executable)
631     : Space(id, executable) {
632   max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
633                   * Page::kObjectAreaSize;
634   accounting_stats_.Clear();
635 
636   allocation_info_.top = NULL;
637   allocation_info_.limit = NULL;
638 
639   mc_forwarding_info_.top = NULL;
640   mc_forwarding_info_.limit = NULL;
641 }
642 
643 
Setup(Address start,size_t size)644 bool PagedSpace::Setup(Address start, size_t size) {
645   if (HasBeenSetup()) return false;
646 
647   int num_pages = 0;
648   // Try to use the virtual memory range passed to us.  If it is too small to
649   // contain at least one page, ignore it and allocate instead.
650   int pages_in_chunk = PagesInChunk(start, size);
651   if (pages_in_chunk > 0) {
652     first_page_ = MemoryAllocator::CommitPages(RoundUp(start, Page::kPageSize),
653                                                Page::kPageSize * pages_in_chunk,
654                                                this, &num_pages);
655   } else {
656     int requested_pages = Min(MemoryAllocator::kPagesPerChunk,
657                               max_capacity_ / Page::kObjectAreaSize);
658     first_page_ =
659         MemoryAllocator::AllocatePages(requested_pages, &num_pages, this);
660     if (!first_page_->is_valid()) return false;
661   }
662 
663   // We are sure that the first page is valid and that we have at least one
664   // page.
665   ASSERT(first_page_->is_valid());
666   ASSERT(num_pages > 0);
667   accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
668   ASSERT(Capacity() <= max_capacity_);
669 
670   // Sequentially initialize remembered sets in the newly allocated
671   // pages and cache the current last page in the space.
672   for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
673     p->ClearRSet();
674     last_page_ = p;
675   }
676 
677   // Use first_page_ for allocation.
678   SetAllocationInfo(&allocation_info_, first_page_);
679 
680   return true;
681 }
682 
683 
HasBeenSetup()684 bool PagedSpace::HasBeenSetup() {
685   return (Capacity() > 0);
686 }
687 
688 
TearDown()689 void PagedSpace::TearDown() {
690   first_page_ = MemoryAllocator::FreePages(first_page_);
691   ASSERT(!first_page_->is_valid());
692 
693   accounting_stats_.Clear();
694 }
695 
696 
697 #ifdef ENABLE_HEAP_PROTECTION
698 
Protect()699 void PagedSpace::Protect() {
700   Page* page = first_page_;
701   while (page->is_valid()) {
702     MemoryAllocator::ProtectChunkFromPage(page);
703     page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
704   }
705 }
706 
707 
Unprotect()708 void PagedSpace::Unprotect() {
709   Page* page = first_page_;
710   while (page->is_valid()) {
711     MemoryAllocator::UnprotectChunkFromPage(page);
712     page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
713   }
714 }
715 
716 #endif
717 
718 
ClearRSet()719 void PagedSpace::ClearRSet() {
720   PageIterator it(this, PageIterator::ALL_PAGES);
721   while (it.has_next()) {
722     it.next()->ClearRSet();
723   }
724 }
725 
726 
FindObject(Address addr)727 Object* PagedSpace::FindObject(Address addr) {
728   // Note: this function can only be called before or after mark-compact GC
729   // because it accesses map pointers.
730   ASSERT(!MarkCompactCollector::in_use());
731 
732   if (!Contains(addr)) return Failure::Exception();
733 
734   Page* p = Page::FromAddress(addr);
735   ASSERT(IsUsed(p));
736   Address cur = p->ObjectAreaStart();
737   Address end = p->AllocationTop();
738   while (cur < end) {
739     HeapObject* obj = HeapObject::FromAddress(cur);
740     Address next = cur + obj->Size();
741     if ((cur <= addr) && (addr < next)) return obj;
742     cur = next;
743   }
744 
745   UNREACHABLE();
746   return Failure::Exception();
747 }
748 
749 
IsUsed(Page * page)750 bool PagedSpace::IsUsed(Page* page) {
751   PageIterator it(this, PageIterator::PAGES_IN_USE);
752   while (it.has_next()) {
753     if (page == it.next()) return true;
754   }
755   return false;
756 }
757 
758 
SetAllocationInfo(AllocationInfo * alloc_info,Page * p)759 void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
760   alloc_info->top = p->ObjectAreaStart();
761   alloc_info->limit = p->ObjectAreaEnd();
762   ASSERT(alloc_info->VerifyPagedAllocation());
763 }
764 
765 
MCResetRelocationInfo()766 void PagedSpace::MCResetRelocationInfo() {
767   // Set page indexes.
768   int i = 0;
769   PageIterator it(this, PageIterator::ALL_PAGES);
770   while (it.has_next()) {
771     Page* p = it.next();
772     p->mc_page_index = i++;
773   }
774 
775   // Set mc_forwarding_info_ to the first page in the space.
776   SetAllocationInfo(&mc_forwarding_info_, first_page_);
777   // All the bytes in the space are 'available'.  We will rediscover
778   // allocated and wasted bytes during GC.
779   accounting_stats_.Reset();
780 }
781 
782 
MCSpaceOffsetForAddress(Address addr)783 int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
784 #ifdef DEBUG
785   // The Contains function considers the address at the beginning of a
786   // page in the page, MCSpaceOffsetForAddress considers it is in the
787   // previous page.
788   if (Page::IsAlignedToPageSize(addr)) {
789     ASSERT(Contains(addr - kPointerSize));
790   } else {
791     ASSERT(Contains(addr));
792   }
793 #endif
794 
795   // If addr is at the end of a page, it belongs to previous page
796   Page* p = Page::IsAlignedToPageSize(addr)
797             ? Page::FromAllocationTop(addr)
798             : Page::FromAddress(addr);
799   int index = p->mc_page_index;
800   return (index * Page::kPageSize) + p->Offset(addr);
801 }
802 
803 
804 // Slow case for reallocating and promoting objects during a compacting
805 // collection.  This function is not space-specific.
SlowMCAllocateRaw(int size_in_bytes)806 HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
807   Page* current_page = TopPageOf(mc_forwarding_info_);
808   if (!current_page->next_page()->is_valid()) {
809     if (!Expand(current_page)) {
810       return NULL;
811     }
812   }
813 
814   // There are surely more pages in the space now.
815   ASSERT(current_page->next_page()->is_valid());
816   // We do not add the top of page block for current page to the space's
817   // free list---the block may contain live objects so we cannot write
818   // bookkeeping information to it.  Instead, we will recover top of page
819   // blocks when we move objects to their new locations.
820   //
821   // We do however write the allocation pointer to the page.  The encoding
822   // of forwarding addresses is as an offset in terms of live bytes, so we
823   // need quick access to the allocation top of each page to decode
824   // forwarding addresses.
825   current_page->mc_relocation_top = mc_forwarding_info_.top;
826   SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
827   return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
828 }
829 
830 
Expand(Page * last_page)831 bool PagedSpace::Expand(Page* last_page) {
832   ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
833   ASSERT(Capacity() % Page::kObjectAreaSize == 0);
834 
835   if (Capacity() == max_capacity_) return false;
836 
837   ASSERT(Capacity() < max_capacity_);
838   // Last page must be valid and its next page is invalid.
839   ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
840 
841   int available_pages = (max_capacity_ - Capacity()) / Page::kObjectAreaSize;
842   if (available_pages <= 0) return false;
843 
844   int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
845   Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this);
846   if (!p->is_valid()) return false;
847 
848   accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
849   ASSERT(Capacity() <= max_capacity_);
850 
851   MemoryAllocator::SetNextPage(last_page, p);
852 
853   // Sequentially clear remembered set of new pages and and cache the
854   // new last page in the space.
855   while (p->is_valid()) {
856     p->ClearRSet();
857     last_page_ = p;
858     p = p->next_page();
859   }
860 
861   return true;
862 }
863 
864 
865 #ifdef DEBUG
CountTotalPages()866 int PagedSpace::CountTotalPages() {
867   int count = 0;
868   for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
869     count++;
870   }
871   return count;
872 }
873 #endif
874 
875 
Shrink()876 void PagedSpace::Shrink() {
877   // Release half of free pages.
878   Page* top_page = AllocationTopPage();
879   ASSERT(top_page->is_valid());
880 
881   // Count the number of pages we would like to free.
882   int pages_to_free = 0;
883   for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
884     pages_to_free++;
885   }
886 
887   // Free pages after top_page.
888   Page* p = MemoryAllocator::FreePages(top_page->next_page());
889   MemoryAllocator::SetNextPage(top_page, p);
890 
891   // Find out how many pages we failed to free and update last_page_.
892   // Please note pages can only be freed in whole chunks.
893   last_page_ = top_page;
894   for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
895     pages_to_free--;
896     last_page_ = p;
897   }
898 
899   accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
900   ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
901 }
902 
903 
EnsureCapacity(int capacity)904 bool PagedSpace::EnsureCapacity(int capacity) {
905   if (Capacity() >= capacity) return true;
906 
907   // Start from the allocation top and loop to the last page in the space.
908   Page* last_page = AllocationTopPage();
909   Page* next_page = last_page->next_page();
910   while (next_page->is_valid()) {
911     last_page = MemoryAllocator::FindLastPageInSameChunk(next_page);
912     next_page = last_page->next_page();
913   }
914 
915   // Expand the space until it has the required capacity or expansion fails.
916   do {
917     if (!Expand(last_page)) return false;
918     ASSERT(last_page->next_page()->is_valid());
919     last_page =
920         MemoryAllocator::FindLastPageInSameChunk(last_page->next_page());
921   } while (Capacity() < capacity);
922 
923   return true;
924 }
925 
926 
927 #ifdef DEBUG
Print()928 void PagedSpace::Print() { }
929 #endif
930 
931 
932 #ifdef DEBUG
933 // We do not assume that the PageIterator works, because it depends on the
934 // invariants we are checking during verification.
Verify(ObjectVisitor * visitor)935 void PagedSpace::Verify(ObjectVisitor* visitor) {
936   // The allocation pointer should be valid, and it should be in a page in the
937   // space.
938   ASSERT(allocation_info_.VerifyPagedAllocation());
939   Page* top_page = Page::FromAllocationTop(allocation_info_.top);
940   ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
941 
942   // Loop over all the pages.
943   bool above_allocation_top = false;
944   Page* current_page = first_page_;
945   while (current_page->is_valid()) {
946     if (above_allocation_top) {
947       // We don't care what's above the allocation top.
948     } else {
949       // Unless this is the last page in the space containing allocated
950       // objects, the allocation top should be at a constant offset from the
951       // object area end.
952       Address top = current_page->AllocationTop();
953       if (current_page == top_page) {
954         ASSERT(top == allocation_info_.top);
955         // The next page will be above the allocation top.
956         above_allocation_top = true;
957       } else {
958         ASSERT(top == current_page->ObjectAreaEnd() - page_extra_);
959       }
960 
961       // It should be packed with objects from the bottom to the top.
962       Address current = current_page->ObjectAreaStart();
963       while (current < top) {
964         HeapObject* object = HeapObject::FromAddress(current);
965 
966         // The first word should be a map, and we expect all map pointers to
967         // be in map space.
968         Map* map = object->map();
969         ASSERT(map->IsMap());
970         ASSERT(Heap::map_space()->Contains(map));
971 
972         // Perform space-specific object verification.
973         VerifyObject(object);
974 
975         // The object itself should look OK.
976         object->Verify();
977 
978         // All the interior pointers should be contained in the heap and
979         // have their remembered set bits set if required as determined
980         // by the visitor.
981         int size = object->Size();
982         object->IterateBody(map->instance_type(), size, visitor);
983 
984         current += size;
985       }
986 
987       // The allocation pointer should not be in the middle of an object.
988       ASSERT(current == top);
989     }
990 
991     current_page = current_page->next_page();
992   }
993 }
994 #endif
995 
996 
997 // -----------------------------------------------------------------------------
998 // NewSpace implementation
999 
1000 
Setup(Address start,int size)1001 bool NewSpace::Setup(Address start, int size) {
1002   // Setup new space based on the preallocated memory block defined by
1003   // start and size. The provided space is divided into two semi-spaces.
1004   // To support fast containment testing in the new space, the size of
1005   // this chunk must be a power of two and it must be aligned to its size.
1006   int initial_semispace_capacity = Heap::InitialSemiSpaceSize();
1007   int maximum_semispace_capacity = Heap::MaxSemiSpaceSize();
1008 
1009   ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1010   ASSERT(IsPowerOf2(maximum_semispace_capacity));
1011 
1012   // Allocate and setup the histogram arrays if necessary.
1013 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1014   allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1015   promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1016 
1017 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1018                        promoted_histogram_[name].set_name(#name);
1019   INSTANCE_TYPE_LIST(SET_NAME)
1020 #undef SET_NAME
1021 #endif
1022 
1023   ASSERT(size == 2 * Heap::ReservedSemiSpaceSize());
1024   ASSERT(IsAddressAligned(start, size, 0));
1025 
1026   if (!to_space_.Setup(start,
1027                        initial_semispace_capacity,
1028                        maximum_semispace_capacity)) {
1029     return false;
1030   }
1031   if (!from_space_.Setup(start + maximum_semispace_capacity,
1032                          initial_semispace_capacity,
1033                          maximum_semispace_capacity)) {
1034     return false;
1035   }
1036 
1037   start_ = start;
1038   address_mask_ = ~(size - 1);
1039   object_mask_ = address_mask_ | kHeapObjectTag;
1040   object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1041 
1042   allocation_info_.top = to_space_.low();
1043   allocation_info_.limit = to_space_.high();
1044   mc_forwarding_info_.top = NULL;
1045   mc_forwarding_info_.limit = NULL;
1046 
1047   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1048   return true;
1049 }
1050 
1051 
TearDown()1052 void NewSpace::TearDown() {
1053 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1054   if (allocated_histogram_) {
1055     DeleteArray(allocated_histogram_);
1056     allocated_histogram_ = NULL;
1057   }
1058   if (promoted_histogram_) {
1059     DeleteArray(promoted_histogram_);
1060     promoted_histogram_ = NULL;
1061   }
1062 #endif
1063 
1064   start_ = NULL;
1065   allocation_info_.top = NULL;
1066   allocation_info_.limit = NULL;
1067   mc_forwarding_info_.top = NULL;
1068   mc_forwarding_info_.limit = NULL;
1069 
1070   to_space_.TearDown();
1071   from_space_.TearDown();
1072 }
1073 
1074 
1075 #ifdef ENABLE_HEAP_PROTECTION
1076 
Protect()1077 void NewSpace::Protect() {
1078   MemoryAllocator::Protect(ToSpaceLow(), Capacity());
1079   MemoryAllocator::Protect(FromSpaceLow(), Capacity());
1080 }
1081 
1082 
Unprotect()1083 void NewSpace::Unprotect() {
1084   MemoryAllocator::Unprotect(ToSpaceLow(), Capacity(),
1085                              to_space_.executable());
1086   MemoryAllocator::Unprotect(FromSpaceLow(), Capacity(),
1087                              from_space_.executable());
1088 }
1089 
1090 #endif
1091 
1092 
Flip()1093 void NewSpace::Flip() {
1094   SemiSpace tmp = from_space_;
1095   from_space_ = to_space_;
1096   to_space_ = tmp;
1097 }
1098 
1099 
Grow()1100 void NewSpace::Grow() {
1101   ASSERT(Capacity() < MaximumCapacity());
1102   if (to_space_.Grow()) {
1103     // Only grow from space if we managed to grow to space.
1104     if (!from_space_.Grow()) {
1105       // If we managed to grow to space but couldn't grow from space,
1106       // attempt to shrink to space.
1107       if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1108         // We are in an inconsistent state because we could not
1109         // commit/uncommit memory from new space.
1110         V8::FatalProcessOutOfMemory("Failed to grow new space.");
1111       }
1112     }
1113   }
1114   allocation_info_.limit = to_space_.high();
1115   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1116 }
1117 
1118 
Shrink()1119 void NewSpace::Shrink() {
1120   int new_capacity = Max(InitialCapacity(), 2 * Size());
1121   int rounded_new_capacity =
1122       RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
1123   if (rounded_new_capacity < Capacity() &&
1124       to_space_.ShrinkTo(rounded_new_capacity))  {
1125     // Only shrink from space if we managed to shrink to space.
1126     if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1127       // If we managed to shrink to space but couldn't shrink from
1128       // space, attempt to grow to space again.
1129       if (!to_space_.GrowTo(from_space_.Capacity())) {
1130         // We are in an inconsistent state because we could not
1131         // commit/uncommit memory from new space.
1132         V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1133       }
1134     }
1135   }
1136   allocation_info_.limit = to_space_.high();
1137   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1138 }
1139 
1140 
ResetAllocationInfo()1141 void NewSpace::ResetAllocationInfo() {
1142   allocation_info_.top = to_space_.low();
1143   allocation_info_.limit = to_space_.high();
1144   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1145 }
1146 
1147 
MCResetRelocationInfo()1148 void NewSpace::MCResetRelocationInfo() {
1149   mc_forwarding_info_.top = from_space_.low();
1150   mc_forwarding_info_.limit = from_space_.high();
1151   ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
1152 }
1153 
1154 
MCCommitRelocationInfo()1155 void NewSpace::MCCommitRelocationInfo() {
1156   // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
1157   // valid allocation info for the to space.
1158   allocation_info_.top = mc_forwarding_info_.top;
1159   allocation_info_.limit = to_space_.high();
1160   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1161 }
1162 
1163 
1164 #ifdef DEBUG
1165 // We do not use the SemispaceIterator because verification doesn't assume
1166 // that it works (it depends on the invariants we are checking).
Verify()1167 void NewSpace::Verify() {
1168   // The allocation pointer should be in the space or at the very end.
1169   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1170 
1171   // There should be objects packed in from the low address up to the
1172   // allocation pointer.
1173   Address current = to_space_.low();
1174   while (current < top()) {
1175     HeapObject* object = HeapObject::FromAddress(current);
1176 
1177     // The first word should be a map, and we expect all map pointers to
1178     // be in map space.
1179     Map* map = object->map();
1180     ASSERT(map->IsMap());
1181     ASSERT(Heap::map_space()->Contains(map));
1182 
1183     // The object should not be code or a map.
1184     ASSERT(!object->IsMap());
1185     ASSERT(!object->IsCode());
1186 
1187     // The object itself should look OK.
1188     object->Verify();
1189 
1190     // All the interior pointers should be contained in the heap.
1191     VerifyPointersVisitor visitor;
1192     int size = object->Size();
1193     object->IterateBody(map->instance_type(), size, &visitor);
1194 
1195     current += size;
1196   }
1197 
1198   // The allocation pointer should not be in the middle of an object.
1199   ASSERT(current == top());
1200 }
1201 #endif
1202 
1203 
Commit()1204 bool SemiSpace::Commit() {
1205   ASSERT(!is_committed());
1206   if (!MemoryAllocator::CommitBlock(start_, capacity_, executable())) {
1207     return false;
1208   }
1209   committed_ = true;
1210   return true;
1211 }
1212 
1213 
Uncommit()1214 bool SemiSpace::Uncommit() {
1215   ASSERT(is_committed());
1216   if (!MemoryAllocator::UncommitBlock(start_, capacity_)) {
1217     return false;
1218   }
1219   committed_ = false;
1220   return true;
1221 }
1222 
1223 
1224 // -----------------------------------------------------------------------------
1225 // SemiSpace implementation
1226 
Setup(Address start,int initial_capacity,int maximum_capacity)1227 bool SemiSpace::Setup(Address start,
1228                       int initial_capacity,
1229                       int maximum_capacity) {
1230   // Creates a space in the young generation. The constructor does not
1231   // allocate memory from the OS.  A SemiSpace is given a contiguous chunk of
1232   // memory of size 'capacity' when set up, and does not grow or shrink
1233   // otherwise.  In the mark-compact collector, the memory region of the from
1234   // space is used as the marking stack. It requires contiguous memory
1235   // addresses.
1236   initial_capacity_ = initial_capacity;
1237   capacity_ = initial_capacity;
1238   maximum_capacity_ = maximum_capacity;
1239   committed_ = false;
1240 
1241   start_ = start;
1242   address_mask_ = ~(maximum_capacity - 1);
1243   object_mask_ = address_mask_ | kHeapObjectTag;
1244   object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1245   age_mark_ = start_;
1246 
1247   return Commit();
1248 }
1249 
1250 
TearDown()1251 void SemiSpace::TearDown() {
1252   start_ = NULL;
1253   capacity_ = 0;
1254 }
1255 
1256 
Grow()1257 bool SemiSpace::Grow() {
1258   // Double the semispace size but only up to maximum capacity.
1259   int maximum_extra = maximum_capacity_ - capacity_;
1260   int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
1261                   maximum_extra);
1262   if (!MemoryAllocator::CommitBlock(high(), extra, executable())) {
1263     return false;
1264   }
1265   capacity_ += extra;
1266   return true;
1267 }
1268 
1269 
GrowTo(int new_capacity)1270 bool SemiSpace::GrowTo(int new_capacity) {
1271   ASSERT(new_capacity <= maximum_capacity_);
1272   ASSERT(new_capacity > capacity_);
1273   size_t delta = new_capacity - capacity_;
1274   ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1275   if (!MemoryAllocator::CommitBlock(high(), delta, executable())) {
1276     return false;
1277   }
1278   capacity_ = new_capacity;
1279   return true;
1280 }
1281 
1282 
ShrinkTo(int new_capacity)1283 bool SemiSpace::ShrinkTo(int new_capacity) {
1284   ASSERT(new_capacity >= initial_capacity_);
1285   ASSERT(new_capacity < capacity_);
1286   size_t delta = capacity_ - new_capacity;
1287   ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1288   if (!MemoryAllocator::UncommitBlock(high() - delta, delta)) {
1289     return false;
1290   }
1291   capacity_ = new_capacity;
1292   return true;
1293 }
1294 
1295 
1296 #ifdef DEBUG
Print()1297 void SemiSpace::Print() { }
1298 
1299 
Verify()1300 void SemiSpace::Verify() { }
1301 #endif
1302 
1303 
1304 // -----------------------------------------------------------------------------
1305 // SemiSpaceIterator implementation.
SemiSpaceIterator(NewSpace * space)1306 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1307   Initialize(space, space->bottom(), space->top(), NULL);
1308 }
1309 
1310 
SemiSpaceIterator(NewSpace * space,HeapObjectCallback size_func)1311 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1312                                      HeapObjectCallback size_func) {
1313   Initialize(space, space->bottom(), space->top(), size_func);
1314 }
1315 
1316 
SemiSpaceIterator(NewSpace * space,Address start)1317 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1318   Initialize(space, start, space->top(), NULL);
1319 }
1320 
1321 
Initialize(NewSpace * space,Address start,Address end,HeapObjectCallback size_func)1322 void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
1323                                    Address end,
1324                                    HeapObjectCallback size_func) {
1325   ASSERT(space->ToSpaceContains(start));
1326   ASSERT(space->ToSpaceLow() <= end
1327          && end <= space->ToSpaceHigh());
1328   space_ = &space->to_space_;
1329   current_ = start;
1330   limit_ = end;
1331   size_func_ = size_func;
1332 }
1333 
1334 
1335 #ifdef DEBUG
1336 // A static array of histogram info for each type.
1337 static HistogramInfo heap_histograms[LAST_TYPE+1];
1338 static JSObject::SpillInformation js_spill_information;
1339 
1340 // heap_histograms is shared, always clear it before using it.
ClearHistograms()1341 static void ClearHistograms() {
1342   // We reset the name each time, though it hasn't changed.
1343 #define DEF_TYPE_NAME(name) heap_histograms[name].set_name(#name);
1344   INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1345 #undef DEF_TYPE_NAME
1346 
1347 #define CLEAR_HISTOGRAM(name) heap_histograms[name].clear();
1348   INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1349 #undef CLEAR_HISTOGRAM
1350 
1351   js_spill_information.Clear();
1352 }
1353 
1354 
1355 static int code_kind_statistics[Code::NUMBER_OF_KINDS];
1356 
1357 
ClearCodeKindStatistics()1358 static void ClearCodeKindStatistics() {
1359   for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1360     code_kind_statistics[i] = 0;
1361   }
1362 }
1363 
1364 
ReportCodeKindStatistics()1365 static void ReportCodeKindStatistics() {
1366   const char* table[Code::NUMBER_OF_KINDS];
1367 
1368 #define CASE(name)                            \
1369   case Code::name: table[Code::name] = #name; \
1370   break
1371 
1372   for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1373     switch (static_cast<Code::Kind>(i)) {
1374       CASE(FUNCTION);
1375       CASE(STUB);
1376       CASE(BUILTIN);
1377       CASE(LOAD_IC);
1378       CASE(KEYED_LOAD_IC);
1379       CASE(STORE_IC);
1380       CASE(KEYED_STORE_IC);
1381       CASE(CALL_IC);
1382     }
1383   }
1384 
1385 #undef CASE
1386 
1387   PrintF("\n   Code kind histograms: \n");
1388   for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1389     if (code_kind_statistics[i] > 0) {
1390       PrintF("     %-20s: %10d bytes\n", table[i], code_kind_statistics[i]);
1391     }
1392   }
1393   PrintF("\n");
1394 }
1395 
1396 
CollectHistogramInfo(HeapObject * obj)1397 static int CollectHistogramInfo(HeapObject* obj) {
1398   InstanceType type = obj->map()->instance_type();
1399   ASSERT(0 <= type && type <= LAST_TYPE);
1400   ASSERT(heap_histograms[type].name() != NULL);
1401   heap_histograms[type].increment_number(1);
1402   heap_histograms[type].increment_bytes(obj->Size());
1403 
1404   if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1405     JSObject::cast(obj)->IncrementSpillStatistics(&js_spill_information);
1406   }
1407 
1408   return obj->Size();
1409 }
1410 
1411 
ReportHistogram(bool print_spill)1412 static void ReportHistogram(bool print_spill) {
1413   PrintF("\n  Object Histogram:\n");
1414   for (int i = 0; i <= LAST_TYPE; i++) {
1415     if (heap_histograms[i].number() > 0) {
1416       PrintF("    %-33s%10d (%10d bytes)\n",
1417              heap_histograms[i].name(),
1418              heap_histograms[i].number(),
1419              heap_histograms[i].bytes());
1420     }
1421   }
1422   PrintF("\n");
1423 
1424   // Summarize string types.
1425   int string_number = 0;
1426   int string_bytes = 0;
1427 #define INCREMENT(type, size, name, camel_name)      \
1428     string_number += heap_histograms[type].number(); \
1429     string_bytes += heap_histograms[type].bytes();
1430   STRING_TYPE_LIST(INCREMENT)
1431 #undef INCREMENT
1432   if (string_number > 0) {
1433     PrintF("    %-33s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1434            string_bytes);
1435   }
1436 
1437   if (FLAG_collect_heap_spill_statistics && print_spill) {
1438     js_spill_information.Print();
1439   }
1440 }
1441 #endif  // DEBUG
1442 
1443 
1444 // Support for statistics gathering for --heap-stats and --log-gc.
1445 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
ClearHistograms()1446 void NewSpace::ClearHistograms() {
1447   for (int i = 0; i <= LAST_TYPE; i++) {
1448     allocated_histogram_[i].clear();
1449     promoted_histogram_[i].clear();
1450   }
1451 }
1452 
1453 // Because the copying collector does not touch garbage objects, we iterate
1454 // the new space before a collection to get a histogram of allocated objects.
1455 // This only happens (1) when compiled with DEBUG and the --heap-stats flag is
1456 // set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
1457 // flag is set.
CollectStatistics()1458 void NewSpace::CollectStatistics() {
1459   ClearHistograms();
1460   SemiSpaceIterator it(this);
1461   for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1462     RecordAllocation(obj);
1463 }
1464 
1465 
1466 #ifdef ENABLE_LOGGING_AND_PROFILING
DoReportStatistics(HistogramInfo * info,const char * description)1467 static void DoReportStatistics(HistogramInfo* info, const char* description) {
1468   LOG(HeapSampleBeginEvent("NewSpace", description));
1469   // Lump all the string types together.
1470   int string_number = 0;
1471   int string_bytes = 0;
1472 #define INCREMENT(type, size, name, camel_name)       \
1473     string_number += info[type].number();             \
1474     string_bytes += info[type].bytes();
1475   STRING_TYPE_LIST(INCREMENT)
1476 #undef INCREMENT
1477   if (string_number > 0) {
1478     LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1479   }
1480 
1481   // Then do the other types.
1482   for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1483     if (info[i].number() > 0) {
1484       LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
1485                               info[i].bytes()));
1486     }
1487   }
1488   LOG(HeapSampleEndEvent("NewSpace", description));
1489 }
1490 #endif  // ENABLE_LOGGING_AND_PROFILING
1491 
1492 
ReportStatistics()1493 void NewSpace::ReportStatistics() {
1494 #ifdef DEBUG
1495   if (FLAG_heap_stats) {
1496     float pct = static_cast<float>(Available()) / Capacity();
1497     PrintF("  capacity: %d, available: %d, %%%d\n",
1498            Capacity(), Available(), static_cast<int>(pct*100));
1499     PrintF("\n  Object Histogram:\n");
1500     for (int i = 0; i <= LAST_TYPE; i++) {
1501       if (allocated_histogram_[i].number() > 0) {
1502         PrintF("    %-33s%10d (%10d bytes)\n",
1503                allocated_histogram_[i].name(),
1504                allocated_histogram_[i].number(),
1505                allocated_histogram_[i].bytes());
1506       }
1507     }
1508     PrintF("\n");
1509   }
1510 #endif  // DEBUG
1511 
1512 #ifdef ENABLE_LOGGING_AND_PROFILING
1513   if (FLAG_log_gc) {
1514     DoReportStatistics(allocated_histogram_, "allocated");
1515     DoReportStatistics(promoted_histogram_, "promoted");
1516   }
1517 #endif  // ENABLE_LOGGING_AND_PROFILING
1518 }
1519 
1520 
RecordAllocation(HeapObject * obj)1521 void NewSpace::RecordAllocation(HeapObject* obj) {
1522   InstanceType type = obj->map()->instance_type();
1523   ASSERT(0 <= type && type <= LAST_TYPE);
1524   allocated_histogram_[type].increment_number(1);
1525   allocated_histogram_[type].increment_bytes(obj->Size());
1526 }
1527 
1528 
RecordPromotion(HeapObject * obj)1529 void NewSpace::RecordPromotion(HeapObject* obj) {
1530   InstanceType type = obj->map()->instance_type();
1531   ASSERT(0 <= type && type <= LAST_TYPE);
1532   promoted_histogram_[type].increment_number(1);
1533   promoted_histogram_[type].increment_bytes(obj->Size());
1534 }
1535 #endif  // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1536 
1537 
1538 // -----------------------------------------------------------------------------
1539 // Free lists for old object spaces implementation
1540 
set_size(int size_in_bytes)1541 void FreeListNode::set_size(int size_in_bytes) {
1542   ASSERT(size_in_bytes > 0);
1543   ASSERT(IsAligned(size_in_bytes, kPointerSize));
1544 
1545   // We write a map and possibly size information to the block.  If the block
1546   // is big enough to be a ByteArray with at least one extra word (the next
1547   // pointer), we set its map to be the byte array map and its size to an
1548   // appropriate array length for the desired size from HeapObject::Size().
1549   // If the block is too small (eg, one or two words), to hold both a size
1550   // field and a next pointer, we give it a filler map that gives it the
1551   // correct size.
1552   if (size_in_bytes > ByteArray::kAlignedSize) {
1553     set_map(Heap::raw_unchecked_byte_array_map());
1554     // Can't use ByteArray::cast because it fails during deserialization.
1555     ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
1556     this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
1557   } else if (size_in_bytes == kPointerSize) {
1558     set_map(Heap::raw_unchecked_one_pointer_filler_map());
1559   } else if (size_in_bytes == 2 * kPointerSize) {
1560     set_map(Heap::raw_unchecked_two_pointer_filler_map());
1561   } else {
1562     UNREACHABLE();
1563   }
1564   // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
1565   // deserialization because the byte array map is not done yet.
1566 }
1567 
1568 
next()1569 Address FreeListNode::next() {
1570   ASSERT(IsFreeListNode(this));
1571   if (map() == Heap::raw_unchecked_byte_array_map()) {
1572     ASSERT(Size() >= kNextOffset + kPointerSize);
1573     return Memory::Address_at(address() + kNextOffset);
1574   } else {
1575     return Memory::Address_at(address() + kPointerSize);
1576   }
1577 }
1578 
1579 
set_next(Address next)1580 void FreeListNode::set_next(Address next) {
1581   ASSERT(IsFreeListNode(this));
1582   if (map() == Heap::raw_unchecked_byte_array_map()) {
1583     ASSERT(Size() >= kNextOffset + kPointerSize);
1584     Memory::Address_at(address() + kNextOffset) = next;
1585   } else {
1586     Memory::Address_at(address() + kPointerSize) = next;
1587   }
1588 }
1589 
1590 
OldSpaceFreeList(AllocationSpace owner)1591 OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) {
1592   Reset();
1593 }
1594 
1595 
Reset()1596 void OldSpaceFreeList::Reset() {
1597   available_ = 0;
1598   for (int i = 0; i < kFreeListsLength; i++) {
1599     free_[i].head_node_ = NULL;
1600   }
1601   needs_rebuild_ = false;
1602   finger_ = kHead;
1603   free_[kHead].next_size_ = kEnd;
1604 }
1605 
1606 
RebuildSizeList()1607 void OldSpaceFreeList::RebuildSizeList() {
1608   ASSERT(needs_rebuild_);
1609   int cur = kHead;
1610   for (int i = cur + 1; i < kFreeListsLength; i++) {
1611     if (free_[i].head_node_ != NULL) {
1612       free_[cur].next_size_ = i;
1613       cur = i;
1614     }
1615   }
1616   free_[cur].next_size_ = kEnd;
1617   needs_rebuild_ = false;
1618 }
1619 
1620 
Free(Address start,int size_in_bytes)1621 int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
1622 #ifdef DEBUG
1623   MemoryAllocator::ZapBlock(start, size_in_bytes);
1624 #endif
1625   FreeListNode* node = FreeListNode::FromAddress(start);
1626   node->set_size(size_in_bytes);
1627 
1628   // We don't use the freelists in compacting mode.  This makes it more like a
1629   // GC that only has mark-sweep-compact and doesn't have a mark-sweep
1630   // collector.
1631   if (FLAG_always_compact) {
1632     return size_in_bytes;
1633   }
1634 
1635   // Early return to drop too-small blocks on the floor (one or two word
1636   // blocks cannot hold a map pointer, a size field, and a pointer to the
1637   // next block in the free list).
1638   if (size_in_bytes < kMinBlockSize) {
1639     return size_in_bytes;
1640   }
1641 
1642   // Insert other blocks at the head of an exact free list.
1643   int index = size_in_bytes >> kPointerSizeLog2;
1644   node->set_next(free_[index].head_node_);
1645   free_[index].head_node_ = node->address();
1646   available_ += size_in_bytes;
1647   needs_rebuild_ = true;
1648   return 0;
1649 }
1650 
1651 
Allocate(int size_in_bytes,int * wasted_bytes)1652 Object* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
1653   ASSERT(0 < size_in_bytes);
1654   ASSERT(size_in_bytes <= kMaxBlockSize);
1655   ASSERT(IsAligned(size_in_bytes, kPointerSize));
1656 
1657   if (needs_rebuild_) RebuildSizeList();
1658   int index = size_in_bytes >> kPointerSizeLog2;
1659   // Check for a perfect fit.
1660   if (free_[index].head_node_ != NULL) {
1661     FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
1662     // If this was the last block of its size, remove the size.
1663     if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index);
1664     available_ -= size_in_bytes;
1665     *wasted_bytes = 0;
1666     ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
1667     return node;
1668   }
1669   // Search the size list for the best fit.
1670   int prev = finger_ < index ? finger_ : kHead;
1671   int cur = FindSize(index, &prev);
1672   ASSERT(index < cur);
1673   if (cur == kEnd) {
1674     // No large enough size in list.
1675     *wasted_bytes = 0;
1676     return Failure::RetryAfterGC(size_in_bytes, owner_);
1677   }
1678   ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
1679   int rem = cur - index;
1680   int rem_bytes = rem << kPointerSizeLog2;
1681   FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
1682   ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
1683   FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
1684                                                      size_in_bytes);
1685   // Distinguish the cases prev < rem < cur and rem <= prev < cur
1686   // to avoid many redundant tests and calls to Insert/RemoveSize.
1687   if (prev < rem) {
1688     // Simple case: insert rem between prev and cur.
1689     finger_ = prev;
1690     free_[prev].next_size_ = rem;
1691     // If this was the last block of size cur, remove the size.
1692     if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
1693       free_[rem].next_size_ = free_[cur].next_size_;
1694     } else {
1695       free_[rem].next_size_ = cur;
1696     }
1697     // Add the remainder block.
1698     rem_node->set_size(rem_bytes);
1699     rem_node->set_next(free_[rem].head_node_);
1700     free_[rem].head_node_ = rem_node->address();
1701   } else {
1702     // If this was the last block of size cur, remove the size.
1703     if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
1704       finger_ = prev;
1705       free_[prev].next_size_ = free_[cur].next_size_;
1706     }
1707     if (rem_bytes < kMinBlockSize) {
1708       // Too-small remainder is wasted.
1709       rem_node->set_size(rem_bytes);
1710       available_ -= size_in_bytes + rem_bytes;
1711       *wasted_bytes = rem_bytes;
1712       return cur_node;
1713     }
1714     // Add the remainder block and, if needed, insert its size.
1715     rem_node->set_size(rem_bytes);
1716     rem_node->set_next(free_[rem].head_node_);
1717     free_[rem].head_node_ = rem_node->address();
1718     if (rem_node->next() == NULL) InsertSize(rem);
1719   }
1720   available_ -= size_in_bytes;
1721   *wasted_bytes = 0;
1722   return cur_node;
1723 }
1724 
1725 
1726 #ifdef DEBUG
Contains(FreeListNode * node)1727 bool OldSpaceFreeList::Contains(FreeListNode* node) {
1728   for (int i = 0; i < kFreeListsLength; i++) {
1729     Address cur_addr = free_[i].head_node_;
1730     while (cur_addr != NULL) {
1731       FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1732       if (cur_node == node) return true;
1733       cur_addr = cur_node->next();
1734     }
1735   }
1736   return false;
1737 }
1738 #endif
1739 
1740 
FixedSizeFreeList(AllocationSpace owner,int object_size)1741 FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
1742     : owner_(owner), object_size_(object_size) {
1743   Reset();
1744 }
1745 
1746 
Reset()1747 void FixedSizeFreeList::Reset() {
1748   available_ = 0;
1749   head_ = NULL;
1750 }
1751 
1752 
Free(Address start)1753 void FixedSizeFreeList::Free(Address start) {
1754 #ifdef DEBUG
1755   MemoryAllocator::ZapBlock(start, object_size_);
1756 #endif
1757   // We only use the freelists with mark-sweep.
1758   ASSERT(!MarkCompactCollector::IsCompacting());
1759   FreeListNode* node = FreeListNode::FromAddress(start);
1760   node->set_size(object_size_);
1761   node->set_next(head_);
1762   head_ = node->address();
1763   available_ += object_size_;
1764 }
1765 
1766 
Allocate()1767 Object* FixedSizeFreeList::Allocate() {
1768   if (head_ == NULL) {
1769     return Failure::RetryAfterGC(object_size_, owner_);
1770   }
1771 
1772   ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
1773   FreeListNode* node = FreeListNode::FromAddress(head_);
1774   head_ = node->next();
1775   available_ -= object_size_;
1776   return node;
1777 }
1778 
1779 
1780 // -----------------------------------------------------------------------------
1781 // OldSpace implementation
1782 
PrepareForMarkCompact(bool will_compact)1783 void OldSpace::PrepareForMarkCompact(bool will_compact) {
1784   if (will_compact) {
1785     // Reset relocation info.  During a compacting collection, everything in
1786     // the space is considered 'available' and we will rediscover live data
1787     // and waste during the collection.
1788     MCResetRelocationInfo();
1789     ASSERT(Available() == Capacity());
1790   } else {
1791     // During a non-compacting collection, everything below the linear
1792     // allocation pointer is considered allocated (everything above is
1793     // available) and we will rediscover available and wasted bytes during
1794     // the collection.
1795     accounting_stats_.AllocateBytes(free_list_.available());
1796     accounting_stats_.FillWastedBytes(Waste());
1797   }
1798 
1799   // Clear the free list before a full GC---it will be rebuilt afterward.
1800   free_list_.Reset();
1801 }
1802 
1803 
MCCommitRelocationInfo()1804 void OldSpace::MCCommitRelocationInfo() {
1805   // Update fast allocation info.
1806   allocation_info_.top = mc_forwarding_info_.top;
1807   allocation_info_.limit = mc_forwarding_info_.limit;
1808   ASSERT(allocation_info_.VerifyPagedAllocation());
1809 
1810   // The space is compacted and we haven't yet built free lists or
1811   // wasted any space.
1812   ASSERT(Waste() == 0);
1813   ASSERT(AvailableFree() == 0);
1814 
1815   // Build the free list for the space.
1816   int computed_size = 0;
1817   PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
1818   while (it.has_next()) {
1819     Page* p = it.next();
1820     // Space below the relocation pointer is allocated.
1821     computed_size +=
1822         static_cast<int>(p->mc_relocation_top - p->ObjectAreaStart());
1823     if (it.has_next()) {
1824       // Free the space at the top of the page.  We cannot use
1825       // p->mc_relocation_top after the call to Free (because Free will clear
1826       // remembered set bits).
1827       int extra_size =
1828           static_cast<int>(p->ObjectAreaEnd() - p->mc_relocation_top);
1829       if (extra_size > 0) {
1830         int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size);
1831         // The bytes we have just "freed" to add to the free list were
1832         // already accounted as available.
1833         accounting_stats_.WasteBytes(wasted_bytes);
1834       }
1835     }
1836   }
1837 
1838   // Make sure the computed size - based on the used portion of the pages in
1839   // use - matches the size obtained while computing forwarding addresses.
1840   ASSERT(computed_size == Size());
1841 }
1842 
1843 
ReserveSpace(int bytes)1844 bool NewSpace::ReserveSpace(int bytes) {
1845   // We can't reliably unpack a partial snapshot that needs more new space
1846   // space than the minimum NewSpace size.
1847   ASSERT(bytes <= InitialCapacity());
1848   Address limit = allocation_info_.limit;
1849   Address top = allocation_info_.top;
1850   return limit - top >= bytes;
1851 }
1852 
1853 
ReserveSpace(int bytes)1854 bool PagedSpace::ReserveSpace(int bytes) {
1855   Address limit = allocation_info_.limit;
1856   Address top = allocation_info_.top;
1857   if (limit - top >= bytes) return true;
1858 
1859   // There wasn't enough space in the current page.  Lets put the rest
1860   // of the page on the free list and start a fresh page.
1861   PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
1862 
1863   Page* reserved_page = TopPageOf(allocation_info_);
1864   int bytes_left_to_reserve = bytes;
1865   while (bytes_left_to_reserve > 0) {
1866     if (!reserved_page->next_page()->is_valid()) {
1867       if (Heap::OldGenerationAllocationLimitReached()) return false;
1868       Expand(reserved_page);
1869     }
1870     bytes_left_to_reserve -= Page::kPageSize;
1871     reserved_page = reserved_page->next_page();
1872     if (!reserved_page->is_valid()) return false;
1873   }
1874   ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
1875   SetAllocationInfo(&allocation_info_,
1876                     TopPageOf(allocation_info_)->next_page());
1877   return true;
1878 }
1879 
1880 
1881 // You have to call this last, since the implementation from PagedSpace
1882 // doesn't know that memory was 'promised' to large object space.
ReserveSpace(int bytes)1883 bool LargeObjectSpace::ReserveSpace(int bytes) {
1884   return Heap::OldGenerationSpaceAvailable() >= bytes;
1885 }
1886 
1887 
1888 // Slow case for normal allocation.  Try in order: (1) allocate in the next
1889 // page in the space, (2) allocate off the space's free list, (3) expand the
1890 // space, (4) fail.
SlowAllocateRaw(int size_in_bytes)1891 HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
1892   // Linear allocation in this space has failed.  If there is another page
1893   // in the space, move to that page and allocate there.  This allocation
1894   // should succeed (size_in_bytes should not be greater than a page's
1895   // object area size).
1896   Page* current_page = TopPageOf(allocation_info_);
1897   if (current_page->next_page()->is_valid()) {
1898     return AllocateInNextPage(current_page, size_in_bytes);
1899   }
1900 
1901   // There is no next page in this space.  Try free list allocation unless that
1902   // is currently forbidden.
1903   if (!Heap::linear_allocation()) {
1904     int wasted_bytes;
1905     Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
1906     accounting_stats_.WasteBytes(wasted_bytes);
1907     if (!result->IsFailure()) {
1908       accounting_stats_.AllocateBytes(size_in_bytes);
1909       return HeapObject::cast(result);
1910     }
1911   }
1912 
1913   // Free list allocation failed and there is no next page.  Fail if we have
1914   // hit the old generation size limit that should cause a garbage
1915   // collection.
1916   if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
1917     return NULL;
1918   }
1919 
1920   // Try to expand the space and allocate in the new next page.
1921   ASSERT(!current_page->next_page()->is_valid());
1922   if (Expand(current_page)) {
1923     return AllocateInNextPage(current_page, size_in_bytes);
1924   }
1925 
1926   // Finally, fail.
1927   return NULL;
1928 }
1929 
1930 
PutRestOfCurrentPageOnFreeList(Page * current_page)1931 void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
1932   int free_size =
1933       static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
1934   if (free_size > 0) {
1935     int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
1936     accounting_stats_.WasteBytes(wasted_bytes);
1937   }
1938 }
1939 
1940 
PutRestOfCurrentPageOnFreeList(Page * current_page)1941 void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
1942   int free_size =
1943       static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
1944   // In the fixed space free list all the free list items have the right size.
1945   // We use up the rest of the page while preserving this invariant.
1946   while (free_size >= object_size_in_bytes_) {
1947     free_list_.Free(allocation_info_.top);
1948     allocation_info_.top += object_size_in_bytes_;
1949     free_size -= object_size_in_bytes_;
1950     accounting_stats_.WasteBytes(object_size_in_bytes_);
1951   }
1952 }
1953 
1954 
1955 // Add the block at the top of the page to the space's free list, set the
1956 // allocation info to the next page (assumed to be one), and allocate
1957 // linearly there.
AllocateInNextPage(Page * current_page,int size_in_bytes)1958 HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
1959                                          int size_in_bytes) {
1960   ASSERT(current_page->next_page()->is_valid());
1961   PutRestOfCurrentPageOnFreeList(current_page);
1962   SetAllocationInfo(&allocation_info_, current_page->next_page());
1963   return AllocateLinearly(&allocation_info_, size_in_bytes);
1964 }
1965 
1966 
1967 #ifdef DEBUG
1968 struct CommentStatistic {
1969   const char* comment;
1970   int size;
1971   int count;
Clearv8::internal::CommentStatistic1972   void Clear() {
1973     comment = NULL;
1974     size = 0;
1975     count = 0;
1976   }
1977 };
1978 
1979 
1980 // must be small, since an iteration is used for lookup
1981 const int kMaxComments = 64;
1982 static CommentStatistic comments_statistics[kMaxComments+1];
1983 
1984 
ReportCodeStatistics()1985 void PagedSpace::ReportCodeStatistics() {
1986   ReportCodeKindStatistics();
1987   PrintF("Code comment statistics (\"   [ comment-txt   :    size/   "
1988          "count  (average)\"):\n");
1989   for (int i = 0; i <= kMaxComments; i++) {
1990     const CommentStatistic& cs = comments_statistics[i];
1991     if (cs.size > 0) {
1992       PrintF("   %-30s: %10d/%6d     (%d)\n", cs.comment, cs.size, cs.count,
1993              cs.size/cs.count);
1994     }
1995   }
1996   PrintF("\n");
1997 }
1998 
1999 
ResetCodeStatistics()2000 void PagedSpace::ResetCodeStatistics() {
2001   ClearCodeKindStatistics();
2002   for (int i = 0; i < kMaxComments; i++) comments_statistics[i].Clear();
2003   comments_statistics[kMaxComments].comment = "Unknown";
2004   comments_statistics[kMaxComments].size = 0;
2005   comments_statistics[kMaxComments].count = 0;
2006 }
2007 
2008 
2009 // Adds comment to 'comment_statistics' table. Performance OK sa long as
2010 // 'kMaxComments' is small
EnterComment(const char * comment,int delta)2011 static void EnterComment(const char* comment, int delta) {
2012   // Do not count empty comments
2013   if (delta <= 0) return;
2014   CommentStatistic* cs = &comments_statistics[kMaxComments];
2015   // Search for a free or matching entry in 'comments_statistics': 'cs'
2016   // points to result.
2017   for (int i = 0; i < kMaxComments; i++) {
2018     if (comments_statistics[i].comment == NULL) {
2019       cs = &comments_statistics[i];
2020       cs->comment = comment;
2021       break;
2022     } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2023       cs = &comments_statistics[i];
2024       break;
2025     }
2026   }
2027   // Update entry for 'comment'
2028   cs->size += delta;
2029   cs->count += 1;
2030 }
2031 
2032 
2033 // Call for each nested comment start (start marked with '[ xxx', end marked
2034 // with ']'.  RelocIterator 'it' must point to a comment reloc info.
CollectCommentStatistics(RelocIterator * it)2035 static void CollectCommentStatistics(RelocIterator* it) {
2036   ASSERT(!it->done());
2037   ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2038   const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2039   if (tmp[0] != '[') {
2040     // Not a nested comment; skip
2041     return;
2042   }
2043 
2044   // Search for end of nested comment or a new nested comment
2045   const char* const comment_txt =
2046       reinterpret_cast<const char*>(it->rinfo()->data());
2047   const byte* prev_pc = it->rinfo()->pc();
2048   int flat_delta = 0;
2049   it->next();
2050   while (true) {
2051     // All nested comments must be terminated properly, and therefore exit
2052     // from loop.
2053     ASSERT(!it->done());
2054     if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2055       const char* const txt =
2056           reinterpret_cast<const char*>(it->rinfo()->data());
2057       flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2058       if (txt[0] == ']') break;  // End of nested  comment
2059       // A new comment
2060       CollectCommentStatistics(it);
2061       // Skip code that was covered with previous comment
2062       prev_pc = it->rinfo()->pc();
2063     }
2064     it->next();
2065   }
2066   EnterComment(comment_txt, flat_delta);
2067 }
2068 
2069 
2070 // Collects code size statistics:
2071 // - by code kind
2072 // - by code comment
CollectCodeStatistics()2073 void PagedSpace::CollectCodeStatistics() {
2074   HeapObjectIterator obj_it(this);
2075   for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
2076     if (obj->IsCode()) {
2077       Code* code = Code::cast(obj);
2078       code_kind_statistics[code->kind()] += code->Size();
2079       RelocIterator it(code);
2080       int delta = 0;
2081       const byte* prev_pc = code->instruction_start();
2082       while (!it.done()) {
2083         if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2084           delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2085           CollectCommentStatistics(&it);
2086           prev_pc = it.rinfo()->pc();
2087         }
2088         it.next();
2089       }
2090 
2091       ASSERT(code->instruction_start() <= prev_pc &&
2092              prev_pc <= code->relocation_start());
2093       delta += static_cast<int>(code->relocation_start() - prev_pc);
2094       EnterComment("NoComment", delta);
2095     }
2096   }
2097 }
2098 
2099 
ReportStatistics()2100 void OldSpace::ReportStatistics() {
2101   int pct = Available() * 100 / Capacity();
2102   PrintF("  capacity: %d, waste: %d, available: %d, %%%d\n",
2103          Capacity(), Waste(), Available(), pct);
2104 
2105   // Report remembered set statistics.
2106   int rset_marked_pointers = 0;
2107   int rset_marked_arrays = 0;
2108   int rset_marked_array_elements = 0;
2109   int cross_gen_pointers = 0;
2110   int cross_gen_array_elements = 0;
2111 
2112   PageIterator page_it(this, PageIterator::PAGES_IN_USE);
2113   while (page_it.has_next()) {
2114     Page* p = page_it.next();
2115 
2116     for (Address rset_addr = p->RSetStart();
2117          rset_addr < p->RSetEnd();
2118          rset_addr += kIntSize) {
2119       int rset = Memory::int_at(rset_addr);
2120       if (rset != 0) {
2121         // Bits were set
2122         int intoff =
2123             static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
2124         int bitoff = 0;
2125         for (; bitoff < kBitsPerInt; ++bitoff) {
2126           if ((rset & (1 << bitoff)) != 0) {
2127             int bitpos = intoff*kBitsPerByte + bitoff;
2128             Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
2129             Object** obj = reinterpret_cast<Object**>(slot);
2130             if (*obj == Heap::raw_unchecked_fixed_array_map()) {
2131               rset_marked_arrays++;
2132               FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot));
2133 
2134               rset_marked_array_elements += fa->length();
2135               // Manually inline FixedArray::IterateBody
2136               Address elm_start = slot + FixedArray::kHeaderSize;
2137               Address elm_stop = elm_start + fa->length() * kPointerSize;
2138               for (Address elm_addr = elm_start;
2139                    elm_addr < elm_stop; elm_addr += kPointerSize) {
2140                 // Filter non-heap-object pointers
2141                 Object** elm_p = reinterpret_cast<Object**>(elm_addr);
2142                 if (Heap::InNewSpace(*elm_p))
2143                   cross_gen_array_elements++;
2144               }
2145             } else {
2146               rset_marked_pointers++;
2147               if (Heap::InNewSpace(*obj))
2148                 cross_gen_pointers++;
2149             }
2150           }
2151         }
2152       }
2153     }
2154   }
2155 
2156   pct = rset_marked_pointers == 0 ?
2157         0 : cross_gen_pointers * 100 / rset_marked_pointers;
2158   PrintF("  rset-marked pointers %d, to-new-space %d (%%%d)\n",
2159             rset_marked_pointers, cross_gen_pointers, pct);
2160   PrintF("  rset_marked arrays %d, ", rset_marked_arrays);
2161   PrintF("  elements %d, ", rset_marked_array_elements);
2162   pct = rset_marked_array_elements == 0 ? 0
2163            : cross_gen_array_elements * 100 / rset_marked_array_elements;
2164   PrintF("  pointers to new space %d (%%%d)\n", cross_gen_array_elements, pct);
2165   PrintF("  total rset-marked bits %d\n",
2166             (rset_marked_pointers + rset_marked_arrays));
2167   pct = (rset_marked_pointers + rset_marked_array_elements) == 0 ? 0
2168         : (cross_gen_pointers + cross_gen_array_elements) * 100 /
2169           (rset_marked_pointers + rset_marked_array_elements);
2170   PrintF("  total rset pointers %d, true cross generation ones %d (%%%d)\n",
2171          (rset_marked_pointers + rset_marked_array_elements),
2172          (cross_gen_pointers + cross_gen_array_elements),
2173          pct);
2174 
2175   ClearHistograms();
2176   HeapObjectIterator obj_it(this);
2177   for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2178     CollectHistogramInfo(obj);
2179   ReportHistogram(true);
2180 }
2181 
2182 
2183 // Dump the range of remembered set words between [start, end) corresponding
2184 // to the pointers starting at object_p.  The allocation_top is an object
2185 // pointer which should not be read past.  This is important for large object
2186 // pages, where some bits in the remembered set range do not correspond to
2187 // allocated addresses.
PrintRSetRange(Address start,Address end,Object ** object_p,Address allocation_top)2188 static void PrintRSetRange(Address start, Address end, Object** object_p,
2189                            Address allocation_top) {
2190   Address rset_address = start;
2191 
2192   // If the range starts on on odd numbered word (eg, for large object extra
2193   // remembered set ranges), print some spaces.
2194   if ((reinterpret_cast<uintptr_t>(start) / kIntSize) % 2 == 1) {
2195     PrintF("                                    ");
2196   }
2197 
2198   // Loop over all the words in the range.
2199   while (rset_address < end) {
2200     uint32_t rset_word = Memory::uint32_at(rset_address);
2201     int bit_position = 0;
2202 
2203     // Loop over all the bits in the word.
2204     while (bit_position < kBitsPerInt) {
2205       if (object_p == reinterpret_cast<Object**>(allocation_top)) {
2206         // Print a bar at the allocation pointer.
2207         PrintF("|");
2208       } else if (object_p > reinterpret_cast<Object**>(allocation_top)) {
2209         // Do not dereference object_p past the allocation pointer.
2210         PrintF("#");
2211       } else if ((rset_word & (1 << bit_position)) == 0) {
2212         // Print a dot for zero bits.
2213         PrintF(".");
2214       } else if (Heap::InNewSpace(*object_p)) {
2215         // Print an X for one bits for pointers to new space.
2216         PrintF("X");
2217       } else {
2218         // Print a circle for one bits for pointers to old space.
2219         PrintF("o");
2220       }
2221 
2222       // Print a space after every 8th bit except the last.
2223       if (bit_position % 8 == 7 && bit_position != (kBitsPerInt - 1)) {
2224         PrintF(" ");
2225       }
2226 
2227       // Advance to next bit.
2228       bit_position++;
2229       object_p++;
2230     }
2231 
2232     // Print a newline after every odd numbered word, otherwise a space.
2233     if ((reinterpret_cast<uintptr_t>(rset_address) / kIntSize) % 2 == 1) {
2234       PrintF("\n");
2235     } else {
2236       PrintF(" ");
2237     }
2238 
2239     // Advance to next remembered set word.
2240     rset_address += kIntSize;
2241   }
2242 }
2243 
2244 
DoPrintRSet(const char * space_name)2245 void PagedSpace::DoPrintRSet(const char* space_name) {
2246   PageIterator it(this, PageIterator::PAGES_IN_USE);
2247   while (it.has_next()) {
2248     Page* p = it.next();
2249     PrintF("%s page 0x%x:\n", space_name, p);
2250     PrintRSetRange(p->RSetStart(), p->RSetEnd(),
2251                    reinterpret_cast<Object**>(p->ObjectAreaStart()),
2252                    p->AllocationTop());
2253     PrintF("\n");
2254   }
2255 }
2256 
2257 
PrintRSet()2258 void OldSpace::PrintRSet() { DoPrintRSet("old"); }
2259 #endif
2260 
2261 // -----------------------------------------------------------------------------
2262 // FixedSpace implementation
2263 
PrepareForMarkCompact(bool will_compact)2264 void FixedSpace::PrepareForMarkCompact(bool will_compact) {
2265   if (will_compact) {
2266     // Reset relocation info.
2267     MCResetRelocationInfo();
2268 
2269     // During a compacting collection, everything in the space is considered
2270     // 'available' (set by the call to MCResetRelocationInfo) and we will
2271     // rediscover live and wasted bytes during the collection.
2272     ASSERT(Available() == Capacity());
2273   } else {
2274     // During a non-compacting collection, everything below the linear
2275     // allocation pointer except wasted top-of-page blocks is considered
2276     // allocated and we will rediscover available bytes during the
2277     // collection.
2278     accounting_stats_.AllocateBytes(free_list_.available());
2279   }
2280 
2281   // Clear the free list before a full GC---it will be rebuilt afterward.
2282   free_list_.Reset();
2283 }
2284 
2285 
MCCommitRelocationInfo()2286 void FixedSpace::MCCommitRelocationInfo() {
2287   // Update fast allocation info.
2288   allocation_info_.top = mc_forwarding_info_.top;
2289   allocation_info_.limit = mc_forwarding_info_.limit;
2290   ASSERT(allocation_info_.VerifyPagedAllocation());
2291 
2292   // The space is compacted and we haven't yet wasted any space.
2293   ASSERT(Waste() == 0);
2294 
2295   // Update allocation_top of each page in use and compute waste.
2296   int computed_size = 0;
2297   PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2298   while (it.has_next()) {
2299     Page* page = it.next();
2300     Address page_top = page->AllocationTop();
2301     computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
2302     if (it.has_next()) {
2303       accounting_stats_.WasteBytes(
2304           static_cast<int>(page->ObjectAreaEnd() - page_top));
2305     }
2306   }
2307 
2308   // Make sure the computed size - based on the used portion of the
2309   // pages in use - matches the size we adjust during allocation.
2310   ASSERT(computed_size == Size());
2311 }
2312 
2313 
2314 // Slow case for normal allocation. Try in order: (1) allocate in the next
2315 // page in the space, (2) allocate off the space's free list, (3) expand the
2316 // space, (4) fail.
SlowAllocateRaw(int size_in_bytes)2317 HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
2318   ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2319   // Linear allocation in this space has failed.  If there is another page
2320   // in the space, move to that page and allocate there.  This allocation
2321   // should succeed.
2322   Page* current_page = TopPageOf(allocation_info_);
2323   if (current_page->next_page()->is_valid()) {
2324     return AllocateInNextPage(current_page, size_in_bytes);
2325   }
2326 
2327   // There is no next page in this space.  Try free list allocation unless
2328   // that is currently forbidden.  The fixed space free list implicitly assumes
2329   // that all free blocks are of the fixed size.
2330   if (!Heap::linear_allocation()) {
2331     Object* result = free_list_.Allocate();
2332     if (!result->IsFailure()) {
2333       accounting_stats_.AllocateBytes(size_in_bytes);
2334       return HeapObject::cast(result);
2335     }
2336   }
2337 
2338   // Free list allocation failed and there is no next page.  Fail if we have
2339   // hit the old generation size limit that should cause a garbage
2340   // collection.
2341   if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2342     return NULL;
2343   }
2344 
2345   // Try to expand the space and allocate in the new next page.
2346   ASSERT(!current_page->next_page()->is_valid());
2347   if (Expand(current_page)) {
2348     return AllocateInNextPage(current_page, size_in_bytes);
2349   }
2350 
2351   // Finally, fail.
2352   return NULL;
2353 }
2354 
2355 
2356 // Move to the next page (there is assumed to be one) and allocate there.
2357 // The top of page block is always wasted, because it is too small to hold a
2358 // map.
AllocateInNextPage(Page * current_page,int size_in_bytes)2359 HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
2360                                            int size_in_bytes) {
2361   ASSERT(current_page->next_page()->is_valid());
2362   ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == page_extra_);
2363   ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2364   accounting_stats_.WasteBytes(page_extra_);
2365   SetAllocationInfo(&allocation_info_, current_page->next_page());
2366   return AllocateLinearly(&allocation_info_, size_in_bytes);
2367 }
2368 
2369 
2370 #ifdef DEBUG
ReportStatistics()2371 void FixedSpace::ReportStatistics() {
2372   int pct = Available() * 100 / Capacity();
2373   PrintF("  capacity: %d, waste: %d, available: %d, %%%d\n",
2374          Capacity(), Waste(), Available(), pct);
2375 
2376   // Report remembered set statistics.
2377   int rset_marked_pointers = 0;
2378   int cross_gen_pointers = 0;
2379 
2380   PageIterator page_it(this, PageIterator::PAGES_IN_USE);
2381   while (page_it.has_next()) {
2382     Page* p = page_it.next();
2383 
2384     for (Address rset_addr = p->RSetStart();
2385          rset_addr < p->RSetEnd();
2386          rset_addr += kIntSize) {
2387       int rset = Memory::int_at(rset_addr);
2388       if (rset != 0) {
2389         // Bits were set
2390         int intoff =
2391             static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
2392         int bitoff = 0;
2393         for (; bitoff < kBitsPerInt; ++bitoff) {
2394           if ((rset & (1 << bitoff)) != 0) {
2395             int bitpos = intoff*kBitsPerByte + bitoff;
2396             Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
2397             Object** obj = reinterpret_cast<Object**>(slot);
2398             rset_marked_pointers++;
2399             if (Heap::InNewSpace(*obj))
2400               cross_gen_pointers++;
2401           }
2402         }
2403       }
2404     }
2405   }
2406 
2407   pct = rset_marked_pointers == 0 ?
2408           0 : cross_gen_pointers * 100 / rset_marked_pointers;
2409   PrintF("  rset-marked pointers %d, to-new-space %d (%%%d)\n",
2410             rset_marked_pointers, cross_gen_pointers, pct);
2411 
2412   ClearHistograms();
2413   HeapObjectIterator obj_it(this);
2414   for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2415     CollectHistogramInfo(obj);
2416   ReportHistogram(false);
2417 }
2418 
2419 
PrintRSet()2420 void FixedSpace::PrintRSet() { DoPrintRSet(name_); }
2421 #endif
2422 
2423 
2424 // -----------------------------------------------------------------------------
2425 // MapSpace implementation
2426 
PrepareForMarkCompact(bool will_compact)2427 void MapSpace::PrepareForMarkCompact(bool will_compact) {
2428   // Call prepare of the super class.
2429   FixedSpace::PrepareForMarkCompact(will_compact);
2430 
2431   if (will_compact) {
2432     // Initialize map index entry.
2433     int page_count = 0;
2434     PageIterator it(this, PageIterator::ALL_PAGES);
2435     while (it.has_next()) {
2436       ASSERT_MAP_PAGE_INDEX(page_count);
2437 
2438       Page* p = it.next();
2439       ASSERT(p->mc_page_index == page_count);
2440 
2441       page_addresses_[page_count++] = p->address();
2442     }
2443   }
2444 }
2445 
2446 
2447 #ifdef DEBUG
VerifyObject(HeapObject * object)2448 void MapSpace::VerifyObject(HeapObject* object) {
2449   // The object should be a map or a free-list node.
2450   ASSERT(object->IsMap() || object->IsByteArray());
2451 }
2452 #endif
2453 
2454 
2455 // -----------------------------------------------------------------------------
2456 // GlobalPropertyCellSpace implementation
2457 
2458 #ifdef DEBUG
VerifyObject(HeapObject * object)2459 void CellSpace::VerifyObject(HeapObject* object) {
2460   // The object should be a global object property cell or a free-list node.
2461   ASSERT(object->IsJSGlobalPropertyCell() ||
2462          object->map() == Heap::two_pointer_filler_map());
2463 }
2464 #endif
2465 
2466 
2467 // -----------------------------------------------------------------------------
2468 // LargeObjectIterator
2469 
LargeObjectIterator(LargeObjectSpace * space)2470 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2471   current_ = space->first_chunk_;
2472   size_func_ = NULL;
2473 }
2474 
2475 
LargeObjectIterator(LargeObjectSpace * space,HeapObjectCallback size_func)2476 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2477                                          HeapObjectCallback size_func) {
2478   current_ = space->first_chunk_;
2479   size_func_ = size_func;
2480 }
2481 
2482 
next()2483 HeapObject* LargeObjectIterator::next() {
2484   if (current_ == NULL) return NULL;
2485 
2486   HeapObject* object = current_->GetObject();
2487   current_ = current_->next();
2488   return object;
2489 }
2490 
2491 
2492 // -----------------------------------------------------------------------------
2493 // LargeObjectChunk
2494 
New(int size_in_bytes,size_t * chunk_size,Executability executable)2495 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
2496                                         size_t* chunk_size,
2497                                         Executability executable) {
2498   size_t requested = ChunkSizeFor(size_in_bytes);
2499   void* mem = MemoryAllocator::AllocateRawMemory(requested,
2500                                                  chunk_size,
2501                                                  executable);
2502   if (mem == NULL) return NULL;
2503   LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
2504   if (*chunk_size < requested) {
2505     MemoryAllocator::FreeRawMemory(mem, *chunk_size);
2506     LOG(DeleteEvent("LargeObjectChunk", mem));
2507     return NULL;
2508   }
2509   return reinterpret_cast<LargeObjectChunk*>(mem);
2510 }
2511 
2512 
ChunkSizeFor(int size_in_bytes)2513 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
2514   int os_alignment = static_cast<int>(OS::AllocateAlignment());
2515   if (os_alignment < Page::kPageSize)
2516     size_in_bytes += (Page::kPageSize - os_alignment);
2517   return size_in_bytes + Page::kObjectStartOffset;
2518 }
2519 
2520 // -----------------------------------------------------------------------------
2521 // LargeObjectSpace
2522 
LargeObjectSpace(AllocationSpace id)2523 LargeObjectSpace::LargeObjectSpace(AllocationSpace id)
2524     : Space(id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
2525       first_chunk_(NULL),
2526       size_(0),
2527       page_count_(0) {}
2528 
2529 
Setup()2530 bool LargeObjectSpace::Setup() {
2531   first_chunk_ = NULL;
2532   size_ = 0;
2533   page_count_ = 0;
2534   return true;
2535 }
2536 
2537 
TearDown()2538 void LargeObjectSpace::TearDown() {
2539   while (first_chunk_ != NULL) {
2540     LargeObjectChunk* chunk = first_chunk_;
2541     first_chunk_ = first_chunk_->next();
2542     LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
2543     MemoryAllocator::FreeRawMemory(chunk->address(), chunk->size());
2544   }
2545 
2546   size_ = 0;
2547   page_count_ = 0;
2548 }
2549 
2550 
2551 #ifdef ENABLE_HEAP_PROTECTION
2552 
Protect()2553 void LargeObjectSpace::Protect() {
2554   LargeObjectChunk* chunk = first_chunk_;
2555   while (chunk != NULL) {
2556     MemoryAllocator::Protect(chunk->address(), chunk->size());
2557     chunk = chunk->next();
2558   }
2559 }
2560 
2561 
Unprotect()2562 void LargeObjectSpace::Unprotect() {
2563   LargeObjectChunk* chunk = first_chunk_;
2564   while (chunk != NULL) {
2565     bool is_code = chunk->GetObject()->IsCode();
2566     MemoryAllocator::Unprotect(chunk->address(), chunk->size(),
2567                                is_code ? EXECUTABLE : NOT_EXECUTABLE);
2568     chunk = chunk->next();
2569   }
2570 }
2571 
2572 #endif
2573 
2574 
AllocateRawInternal(int requested_size,int object_size,Executability executable)2575 Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
2576                                               int object_size,
2577                                               Executability executable) {
2578   ASSERT(0 < object_size && object_size <= requested_size);
2579 
2580   // Check if we want to force a GC before growing the old space further.
2581   // If so, fail the allocation.
2582   if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2583     return Failure::RetryAfterGC(requested_size, identity());
2584   }
2585 
2586   size_t chunk_size;
2587   LargeObjectChunk* chunk =
2588       LargeObjectChunk::New(requested_size, &chunk_size, executable);
2589   if (chunk == NULL) {
2590     return Failure::RetryAfterGC(requested_size, identity());
2591   }
2592 
2593   size_ += static_cast<int>(chunk_size);
2594   page_count_++;
2595   chunk->set_next(first_chunk_);
2596   chunk->set_size(chunk_size);
2597   first_chunk_ = chunk;
2598 
2599   // Set the object address and size in the page header and clear its
2600   // remembered set.
2601   Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2602   Address object_address = page->ObjectAreaStart();
2603   // Clear the low order bit of the second word in the page to flag it as a
2604   // large object page.  If the chunk_size happened to be written there, its
2605   // low order bit should already be clear.
2606   ASSERT((chunk_size & 0x1) == 0);
2607   page->is_normal_page &= ~0x1;
2608   page->ClearRSet();
2609   int extra_bytes = requested_size - object_size;
2610   if (extra_bytes > 0) {
2611     // The extra memory for the remembered set should be cleared.
2612     memset(object_address + object_size, 0, extra_bytes);
2613   }
2614 
2615   return HeapObject::FromAddress(object_address);
2616 }
2617 
2618 
AllocateRawCode(int size_in_bytes)2619 Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
2620   ASSERT(0 < size_in_bytes);
2621   return AllocateRawInternal(size_in_bytes,
2622                              size_in_bytes,
2623                              EXECUTABLE);
2624 }
2625 
2626 
AllocateRawFixedArray(int size_in_bytes)2627 Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
2628   ASSERT(0 < size_in_bytes);
2629   int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes);
2630   return AllocateRawInternal(size_in_bytes + extra_rset_bytes,
2631                              size_in_bytes,
2632                              NOT_EXECUTABLE);
2633 }
2634 
2635 
AllocateRaw(int size_in_bytes)2636 Object* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
2637   ASSERT(0 < size_in_bytes);
2638   return AllocateRawInternal(size_in_bytes,
2639                              size_in_bytes,
2640                              NOT_EXECUTABLE);
2641 }
2642 
2643 
2644 // GC support
FindObject(Address a)2645 Object* LargeObjectSpace::FindObject(Address a) {
2646   for (LargeObjectChunk* chunk = first_chunk_;
2647        chunk != NULL;
2648        chunk = chunk->next()) {
2649     Address chunk_address = chunk->address();
2650     if (chunk_address <= a && a < chunk_address + chunk->size()) {
2651       return chunk->GetObject();
2652     }
2653   }
2654   return Failure::Exception();
2655 }
2656 
2657 
ClearRSet()2658 void LargeObjectSpace::ClearRSet() {
2659   ASSERT(Page::is_rset_in_use());
2660 
2661   LargeObjectIterator it(this);
2662   for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
2663     // We only have code, sequential strings, or fixed arrays in large
2664     // object space, and only fixed arrays need remembered set support.
2665     if (object->IsFixedArray()) {
2666       // Clear the normal remembered set region of the page;
2667       Page* page = Page::FromAddress(object->address());
2668       page->ClearRSet();
2669 
2670       // Clear the extra remembered set.
2671       int size = object->Size();
2672       int extra_rset_bytes = ExtraRSetBytesFor(size);
2673       memset(object->address() + size, 0, extra_rset_bytes);
2674     }
2675   }
2676 }
2677 
2678 
IterateRSet(ObjectSlotCallback copy_object_func)2679 void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) {
2680   ASSERT(Page::is_rset_in_use());
2681 
2682   static void* lo_rset_histogram = StatsTable::CreateHistogram(
2683       "V8.RSetLO",
2684       0,
2685       // Keeping this histogram's buckets the same as the paged space histogram.
2686       Page::kObjectAreaSize / kPointerSize,
2687       30);
2688 
2689   LargeObjectIterator it(this);
2690   for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
2691     // We only have code, sequential strings, or fixed arrays in large
2692     // object space, and only fixed arrays can possibly contain pointers to
2693     // the young generation.
2694     if (object->IsFixedArray()) {
2695       // Iterate the normal page remembered set range.
2696       Page* page = Page::FromAddress(object->address());
2697       Address object_end = object->address() + object->Size();
2698       int count = Heap::IterateRSetRange(page->ObjectAreaStart(),
2699                                          Min(page->ObjectAreaEnd(), object_end),
2700                                          page->RSetStart(),
2701                                          copy_object_func);
2702 
2703       // Iterate the extra array elements.
2704       if (object_end > page->ObjectAreaEnd()) {
2705         count += Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end,
2706                                         object_end, copy_object_func);
2707       }
2708       if (lo_rset_histogram != NULL) {
2709         StatsTable::AddHistogramSample(lo_rset_histogram, count);
2710       }
2711     }
2712   }
2713 }
2714 
2715 
FreeUnmarkedObjects()2716 void LargeObjectSpace::FreeUnmarkedObjects() {
2717   LargeObjectChunk* previous = NULL;
2718   LargeObjectChunk* current = first_chunk_;
2719   while (current != NULL) {
2720     HeapObject* object = current->GetObject();
2721     if (object->IsMarked()) {
2722       object->ClearMark();
2723       MarkCompactCollector::tracer()->decrement_marked_count();
2724       previous = current;
2725       current = current->next();
2726     } else {
2727       Address chunk_address = current->address();
2728       size_t chunk_size = current->size();
2729 
2730       // Cut the chunk out from the chunk list.
2731       current = current->next();
2732       if (previous == NULL) {
2733         first_chunk_ = current;
2734       } else {
2735         previous->set_next(current);
2736       }
2737 
2738       // Free the chunk.
2739       MarkCompactCollector::ReportDeleteIfNeeded(object);
2740       size_ -= static_cast<int>(chunk_size);
2741       page_count_--;
2742       MemoryAllocator::FreeRawMemory(chunk_address, chunk_size);
2743       LOG(DeleteEvent("LargeObjectChunk", chunk_address));
2744     }
2745   }
2746 }
2747 
2748 
Contains(HeapObject * object)2749 bool LargeObjectSpace::Contains(HeapObject* object) {
2750   Address address = object->address();
2751   Page* page = Page::FromAddress(address);
2752 
2753   SLOW_ASSERT(!page->IsLargeObjectPage()
2754               || !FindObject(address)->IsFailure());
2755 
2756   return page->IsLargeObjectPage();
2757 }
2758 
2759 
2760 #ifdef DEBUG
2761 // We do not assume that the large object iterator works, because it depends
2762 // on the invariants we are checking during verification.
Verify()2763 void LargeObjectSpace::Verify() {
2764   for (LargeObjectChunk* chunk = first_chunk_;
2765        chunk != NULL;
2766        chunk = chunk->next()) {
2767     // Each chunk contains an object that starts at the large object page's
2768     // object area start.
2769     HeapObject* object = chunk->GetObject();
2770     Page* page = Page::FromAddress(object->address());
2771     ASSERT(object->address() == page->ObjectAreaStart());
2772 
2773     // The first word should be a map, and we expect all map pointers to be
2774     // in map space.
2775     Map* map = object->map();
2776     ASSERT(map->IsMap());
2777     ASSERT(Heap::map_space()->Contains(map));
2778 
2779     // We have only code, sequential strings, external strings
2780     // (sequential strings that have been morphed into external
2781     // strings), fixed arrays, and byte arrays in large object space.
2782     ASSERT(object->IsCode() || object->IsSeqString() ||
2783            object->IsExternalString() || object->IsFixedArray() ||
2784            object->IsByteArray());
2785 
2786     // The object itself should look OK.
2787     object->Verify();
2788 
2789     // Byte arrays and strings don't have interior pointers.
2790     if (object->IsCode()) {
2791       VerifyPointersVisitor code_visitor;
2792       object->IterateBody(map->instance_type(),
2793                           object->Size(),
2794                           &code_visitor);
2795     } else if (object->IsFixedArray()) {
2796       // We loop over fixed arrays ourselves, rather then using the visitor,
2797       // because the visitor doesn't support the start/offset iteration
2798       // needed for IsRSetSet.
2799       FixedArray* array = FixedArray::cast(object);
2800       for (int j = 0; j < array->length(); j++) {
2801         Object* element = array->get(j);
2802         if (element->IsHeapObject()) {
2803           HeapObject* element_object = HeapObject::cast(element);
2804           ASSERT(Heap::Contains(element_object));
2805           ASSERT(element_object->map()->IsMap());
2806           if (Heap::InNewSpace(element_object)) {
2807             ASSERT(Page::IsRSetSet(object->address(),
2808                                    FixedArray::kHeaderSize + j * kPointerSize));
2809           }
2810         }
2811       }
2812     }
2813   }
2814 }
2815 
2816 
Print()2817 void LargeObjectSpace::Print() {
2818   LargeObjectIterator it(this);
2819   for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
2820     obj->Print();
2821   }
2822 }
2823 
2824 
ReportStatistics()2825 void LargeObjectSpace::ReportStatistics() {
2826   PrintF("  size: %d\n", size_);
2827   int num_objects = 0;
2828   ClearHistograms();
2829   LargeObjectIterator it(this);
2830   for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
2831     num_objects++;
2832     CollectHistogramInfo(obj);
2833   }
2834 
2835   PrintF("  number of objects %d\n", num_objects);
2836   if (num_objects > 0) ReportHistogram(false);
2837 }
2838 
2839 
CollectCodeStatistics()2840 void LargeObjectSpace::CollectCodeStatistics() {
2841   LargeObjectIterator obj_it(this);
2842   for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
2843     if (obj->IsCode()) {
2844       Code* code = Code::cast(obj);
2845       code_kind_statistics[code->kind()] += code->Size();
2846     }
2847   }
2848 }
2849 
2850 
PrintRSet()2851 void LargeObjectSpace::PrintRSet() {
2852   LargeObjectIterator it(this);
2853   for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
2854     if (object->IsFixedArray()) {
2855       Page* page = Page::FromAddress(object->address());
2856 
2857       Address allocation_top = object->address() + object->Size();
2858       PrintF("large page 0x%x:\n", page);
2859       PrintRSetRange(page->RSetStart(), page->RSetEnd(),
2860                      reinterpret_cast<Object**>(object->address()),
2861                      allocation_top);
2862       int extra_array_bytes = object->Size() - Page::kObjectAreaSize;
2863       int extra_rset_bits = RoundUp(extra_array_bytes / kPointerSize,
2864                                     kBitsPerInt);
2865       PrintF("------------------------------------------------------------"
2866              "-----------\n");
2867       PrintRSetRange(allocation_top,
2868                      allocation_top + extra_rset_bits / kBitsPerByte,
2869                      reinterpret_cast<Object**>(object->address()
2870                                                 + Page::kObjectAreaSize),
2871                      allocation_top);
2872       PrintF("\n");
2873     }
2874   }
2875 }
2876 #endif  // DEBUG
2877 
2878 } }  // namespace v8::internal
2879