• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2006-2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "liveobjectlist-inl.h"
31 #include "macro-assembler.h"
32 #include "mark-compact.h"
33 #include "platform.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 // For contiguous spaces, top should be in the space (or at the end) and limit
39 // should be the end of the space.
40 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
41   ASSERT((space).low() <= (info).top                  \
42          && (info).top <= (space).high()              \
43          && (info).limit == (space).high())
44 
45 // ----------------------------------------------------------------------------
46 // HeapObjectIterator
47 
HeapObjectIterator(PagedSpace * space)48 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
49   Initialize(space->bottom(), space->top(), NULL);
50 }
51 
52 
HeapObjectIterator(PagedSpace * space,HeapObjectCallback size_func)53 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
54                                        HeapObjectCallback size_func) {
55   Initialize(space->bottom(), space->top(), size_func);
56 }
57 
58 
HeapObjectIterator(PagedSpace * space,Address start)59 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
60   Initialize(start, space->top(), NULL);
61 }
62 
63 
HeapObjectIterator(PagedSpace * space,Address start,HeapObjectCallback size_func)64 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
65                                        HeapObjectCallback size_func) {
66   Initialize(start, space->top(), size_func);
67 }
68 
69 
HeapObjectIterator(Page * page,HeapObjectCallback size_func)70 HeapObjectIterator::HeapObjectIterator(Page* page,
71                                        HeapObjectCallback size_func) {
72   Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func);
73 }
74 
75 
Initialize(Address cur,Address end,HeapObjectCallback size_f)76 void HeapObjectIterator::Initialize(Address cur, Address end,
77                                     HeapObjectCallback size_f) {
78   cur_addr_ = cur;
79   end_addr_ = end;
80   end_page_ = Page::FromAllocationTop(end);
81   size_func_ = size_f;
82   Page* p = Page::FromAllocationTop(cur_addr_);
83   cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
84 
85 #ifdef DEBUG
86   Verify();
87 #endif
88 }
89 
90 
FromNextPage()91 HeapObject* HeapObjectIterator::FromNextPage() {
92   if (cur_addr_ == end_addr_) return NULL;
93 
94   Page* cur_page = Page::FromAllocationTop(cur_addr_);
95   cur_page = cur_page->next_page();
96   ASSERT(cur_page->is_valid());
97 
98   cur_addr_ = cur_page->ObjectAreaStart();
99   cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
100 
101   if (cur_addr_ == end_addr_) return NULL;
102   ASSERT(cur_addr_ < cur_limit_);
103 #ifdef DEBUG
104   Verify();
105 #endif
106   return FromCurrentPage();
107 }
108 
109 
110 #ifdef DEBUG
Verify()111 void HeapObjectIterator::Verify() {
112   Page* p = Page::FromAllocationTop(cur_addr_);
113   ASSERT(p == Page::FromAllocationTop(cur_limit_));
114   ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
115 }
116 #endif
117 
118 
119 // -----------------------------------------------------------------------------
120 // PageIterator
121 
PageIterator(PagedSpace * space,Mode mode)122 PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
123   prev_page_ = NULL;
124   switch (mode) {
125     case PAGES_IN_USE:
126       stop_page_ = space->AllocationTopPage();
127       break;
128     case PAGES_USED_BY_MC:
129       stop_page_ = space->MCRelocationTopPage();
130       break;
131     case ALL_PAGES:
132 #ifdef DEBUG
133       // Verify that the cached last page in the space is actually the
134       // last page.
135       for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
136         if (!p->next_page()->is_valid()) {
137           ASSERT(space->last_page_ == p);
138         }
139       }
140 #endif
141       stop_page_ = space->last_page_;
142       break;
143   }
144 }
145 
146 
147 // -----------------------------------------------------------------------------
148 // CodeRange
149 
150 
CodeRange(Isolate * isolate)151 CodeRange::CodeRange(Isolate* isolate)
152     : isolate_(isolate),
153       code_range_(NULL),
154       free_list_(0),
155       allocation_list_(0),
156       current_allocation_block_index_(0) {
157 }
158 
159 
Setup(const size_t requested)160 bool CodeRange::Setup(const size_t requested) {
161   ASSERT(code_range_ == NULL);
162 
163   code_range_ = new VirtualMemory(requested);
164   CHECK(code_range_ != NULL);
165   if (!code_range_->IsReserved()) {
166     delete code_range_;
167     code_range_ = NULL;
168     return false;
169   }
170 
171   // We are sure that we have mapped a block of requested addresses.
172   ASSERT(code_range_->size() == requested);
173   LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
174   allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
175   current_allocation_block_index_ = 0;
176   return true;
177 }
178 
179 
CompareFreeBlockAddress(const FreeBlock * left,const FreeBlock * right)180 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
181                                        const FreeBlock* right) {
182   // The entire point of CodeRange is that the difference between two
183   // addresses in the range can be represented as a signed 32-bit int,
184   // so the cast is semantically correct.
185   return static_cast<int>(left->start - right->start);
186 }
187 
188 
GetNextAllocationBlock(size_t requested)189 void CodeRange::GetNextAllocationBlock(size_t requested) {
190   for (current_allocation_block_index_++;
191        current_allocation_block_index_ < allocation_list_.length();
192        current_allocation_block_index_++) {
193     if (requested <= allocation_list_[current_allocation_block_index_].size) {
194       return;  // Found a large enough allocation block.
195     }
196   }
197 
198   // Sort and merge the free blocks on the free list and the allocation list.
199   free_list_.AddAll(allocation_list_);
200   allocation_list_.Clear();
201   free_list_.Sort(&CompareFreeBlockAddress);
202   for (int i = 0; i < free_list_.length();) {
203     FreeBlock merged = free_list_[i];
204     i++;
205     // Add adjacent free blocks to the current merged block.
206     while (i < free_list_.length() &&
207            free_list_[i].start == merged.start + merged.size) {
208       merged.size += free_list_[i].size;
209       i++;
210     }
211     if (merged.size > 0) {
212       allocation_list_.Add(merged);
213     }
214   }
215   free_list_.Clear();
216 
217   for (current_allocation_block_index_ = 0;
218        current_allocation_block_index_ < allocation_list_.length();
219        current_allocation_block_index_++) {
220     if (requested <= allocation_list_[current_allocation_block_index_].size) {
221       return;  // Found a large enough allocation block.
222     }
223   }
224 
225   // Code range is full or too fragmented.
226   V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
227 }
228 
229 
230 
AllocateRawMemory(const size_t requested,size_t * allocated)231 void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
232   ASSERT(current_allocation_block_index_ < allocation_list_.length());
233   if (requested > allocation_list_[current_allocation_block_index_].size) {
234     // Find an allocation block large enough.  This function call may
235     // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
236     GetNextAllocationBlock(requested);
237   }
238   // Commit the requested memory at the start of the current allocation block.
239   *allocated = RoundUp(requested, Page::kPageSize);
240   FreeBlock current = allocation_list_[current_allocation_block_index_];
241   if (*allocated >= current.size - Page::kPageSize) {
242     // Don't leave a small free block, useless for a large object or chunk.
243     *allocated = current.size;
244   }
245   ASSERT(*allocated <= current.size);
246   if (!code_range_->Commit(current.start, *allocated, true)) {
247     *allocated = 0;
248     return NULL;
249   }
250   allocation_list_[current_allocation_block_index_].start += *allocated;
251   allocation_list_[current_allocation_block_index_].size -= *allocated;
252   if (*allocated == current.size) {
253     GetNextAllocationBlock(0);  // This block is used up, get the next one.
254   }
255   return current.start;
256 }
257 
258 
FreeRawMemory(void * address,size_t length)259 void CodeRange::FreeRawMemory(void* address, size_t length) {
260   free_list_.Add(FreeBlock(address, length));
261   code_range_->Uncommit(address, length);
262 }
263 
264 
TearDown()265 void CodeRange::TearDown() {
266     delete code_range_;  // Frees all memory in the virtual memory range.
267     code_range_ = NULL;
268     free_list_.Free();
269     allocation_list_.Free();
270 }
271 
272 
273 // -----------------------------------------------------------------------------
274 // MemoryAllocator
275 //
276 
277 // 270 is an estimate based on the static default heap size of a pair of 256K
278 // semispaces and a 64M old generation.
279 const int kEstimatedNumberOfChunks = 270;
280 
281 
MemoryAllocator(Isolate * isolate)282 MemoryAllocator::MemoryAllocator(Isolate* isolate)
283     : isolate_(isolate),
284       capacity_(0),
285       capacity_executable_(0),
286       size_(0),
287       size_executable_(0),
288       initial_chunk_(NULL),
289       chunks_(kEstimatedNumberOfChunks),
290       free_chunk_ids_(kEstimatedNumberOfChunks),
291       max_nof_chunks_(0),
292       top_(0) {
293 }
294 
295 
Push(int free_chunk_id)296 void MemoryAllocator::Push(int free_chunk_id) {
297   ASSERT(max_nof_chunks_ > 0);
298   ASSERT(top_ < max_nof_chunks_);
299   free_chunk_ids_[top_++] = free_chunk_id;
300 }
301 
302 
Pop()303 int MemoryAllocator::Pop() {
304   ASSERT(top_ > 0);
305   return free_chunk_ids_[--top_];
306 }
307 
308 
Setup(intptr_t capacity,intptr_t capacity_executable)309 bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
310   capacity_ = RoundUp(capacity, Page::kPageSize);
311   capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
312   ASSERT_GE(capacity_, capacity_executable_);
313 
314   // Over-estimate the size of chunks_ array.  It assumes the expansion of old
315   // space is always in the unit of a chunk (kChunkSize) except the last
316   // expansion.
317   //
318   // Due to alignment, allocated space might be one page less than required
319   // number (kPagesPerChunk) of pages for old spaces.
320   //
321   // Reserve two chunk ids for semispaces, one for map space, one for old
322   // space, and one for code space.
323   max_nof_chunks_ =
324       static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5;
325   if (max_nof_chunks_ > kMaxNofChunks) return false;
326 
327   size_ = 0;
328   size_executable_ = 0;
329   ChunkInfo info;  // uninitialized element.
330   for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
331     chunks_.Add(info);
332     free_chunk_ids_.Add(i);
333   }
334   top_ = max_nof_chunks_;
335   return true;
336 }
337 
338 
TearDown()339 void MemoryAllocator::TearDown() {
340   for (int i = 0; i < max_nof_chunks_; i++) {
341     if (chunks_[i].address() != NULL) DeleteChunk(i);
342   }
343   chunks_.Clear();
344   free_chunk_ids_.Clear();
345 
346   if (initial_chunk_ != NULL) {
347     LOG(isolate_, DeleteEvent("InitialChunk", initial_chunk_->address()));
348     delete initial_chunk_;
349     initial_chunk_ = NULL;
350   }
351 
352   ASSERT(top_ == max_nof_chunks_);  // all chunks are free
353   top_ = 0;
354   capacity_ = 0;
355   capacity_executable_ = 0;
356   size_ = 0;
357   max_nof_chunks_ = 0;
358 }
359 
360 
AllocateRawMemory(const size_t requested,size_t * allocated,Executability executable)361 void* MemoryAllocator::AllocateRawMemory(const size_t requested,
362                                          size_t* allocated,
363                                          Executability executable) {
364   if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
365     return NULL;
366   }
367 
368   void* mem;
369   if (executable == EXECUTABLE) {
370     // Check executable memory limit.
371     if (size_executable_ + requested >
372         static_cast<size_t>(capacity_executable_)) {
373       LOG(isolate_,
374           StringEvent("MemoryAllocator::AllocateRawMemory",
375                       "V8 Executable Allocation capacity exceeded"));
376       return NULL;
377     }
378     // Allocate executable memory either from code range or from the
379     // OS.
380     if (isolate_->code_range()->exists()) {
381       mem = isolate_->code_range()->AllocateRawMemory(requested, allocated);
382     } else {
383       mem = OS::Allocate(requested, allocated, true);
384     }
385     // Update executable memory size.
386     size_executable_ += static_cast<int>(*allocated);
387   } else {
388     mem = OS::Allocate(requested, allocated, false);
389   }
390   int alloced = static_cast<int>(*allocated);
391   size_ += alloced;
392 
393 #ifdef DEBUG
394   ZapBlock(reinterpret_cast<Address>(mem), alloced);
395 #endif
396   isolate_->counters()->memory_allocated()->Increment(alloced);
397   return mem;
398 }
399 
400 
FreeRawMemory(void * mem,size_t length,Executability executable)401 void MemoryAllocator::FreeRawMemory(void* mem,
402                                     size_t length,
403                                     Executability executable) {
404 #ifdef DEBUG
405   ZapBlock(reinterpret_cast<Address>(mem), length);
406 #endif
407   if (isolate_->code_range()->contains(static_cast<Address>(mem))) {
408     isolate_->code_range()->FreeRawMemory(mem, length);
409   } else {
410     OS::Free(mem, length);
411   }
412   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(length));
413   size_ -= static_cast<int>(length);
414   if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
415 
416   ASSERT(size_ >= 0);
417   ASSERT(size_executable_ >= 0);
418 }
419 
420 
PerformAllocationCallback(ObjectSpace space,AllocationAction action,size_t size)421 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
422                                                 AllocationAction action,
423                                                 size_t size) {
424   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
425     MemoryAllocationCallbackRegistration registration =
426       memory_allocation_callbacks_[i];
427     if ((registration.space & space) == space &&
428         (registration.action & action) == action)
429       registration.callback(space, action, static_cast<int>(size));
430   }
431 }
432 
433 
MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback)434 bool MemoryAllocator::MemoryAllocationCallbackRegistered(
435     MemoryAllocationCallback callback) {
436   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
437     if (memory_allocation_callbacks_[i].callback == callback) return true;
438   }
439   return false;
440 }
441 
442 
AddMemoryAllocationCallback(MemoryAllocationCallback callback,ObjectSpace space,AllocationAction action)443 void MemoryAllocator::AddMemoryAllocationCallback(
444     MemoryAllocationCallback callback,
445     ObjectSpace space,
446     AllocationAction action) {
447   ASSERT(callback != NULL);
448   MemoryAllocationCallbackRegistration registration(callback, space, action);
449   ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
450   return memory_allocation_callbacks_.Add(registration);
451 }
452 
453 
RemoveMemoryAllocationCallback(MemoryAllocationCallback callback)454 void MemoryAllocator::RemoveMemoryAllocationCallback(
455      MemoryAllocationCallback callback) {
456   ASSERT(callback != NULL);
457   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
458     if (memory_allocation_callbacks_[i].callback == callback) {
459       memory_allocation_callbacks_.Remove(i);
460       return;
461     }
462   }
463   UNREACHABLE();
464 }
465 
ReserveInitialChunk(const size_t requested)466 void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
467   ASSERT(initial_chunk_ == NULL);
468 
469   initial_chunk_ = new VirtualMemory(requested);
470   CHECK(initial_chunk_ != NULL);
471   if (!initial_chunk_->IsReserved()) {
472     delete initial_chunk_;
473     initial_chunk_ = NULL;
474     return NULL;
475   }
476 
477   // We are sure that we have mapped a block of requested addresses.
478   ASSERT(initial_chunk_->size() == requested);
479   LOG(isolate_,
480       NewEvent("InitialChunk", initial_chunk_->address(), requested));
481   size_ += static_cast<int>(requested);
482   return initial_chunk_->address();
483 }
484 
485 
PagesInChunk(Address start,size_t size)486 static int PagesInChunk(Address start, size_t size) {
487   // The first page starts on the first page-aligned address from start onward
488   // and the last page ends on the last page-aligned address before
489   // start+size.  Page::kPageSize is a power of two so we can divide by
490   // shifting.
491   return static_cast<int>((RoundDown(start + size, Page::kPageSize)
492       - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
493 }
494 
495 
AllocatePages(int requested_pages,int * allocated_pages,PagedSpace * owner)496 Page* MemoryAllocator::AllocatePages(int requested_pages,
497                                      int* allocated_pages,
498                                      PagedSpace* owner) {
499   if (requested_pages <= 0) return Page::FromAddress(NULL);
500   size_t chunk_size = requested_pages * Page::kPageSize;
501 
502   void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
503   if (chunk == NULL) return Page::FromAddress(NULL);
504   LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size));
505 
506   *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
507   // We may 'lose' a page due to alignment.
508   ASSERT(*allocated_pages >= kPagesPerChunk - 1);
509   if (*allocated_pages == 0) {
510     FreeRawMemory(chunk, chunk_size, owner->executable());
511     LOG(isolate_, DeleteEvent("PagedChunk", chunk));
512     return Page::FromAddress(NULL);
513   }
514 
515   int chunk_id = Pop();
516   chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
517 
518   ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
519   PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
520   Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
521 
522   return new_pages;
523 }
524 
525 
CommitPages(Address start,size_t size,PagedSpace * owner,int * num_pages)526 Page* MemoryAllocator::CommitPages(Address start, size_t size,
527                                    PagedSpace* owner, int* num_pages) {
528   ASSERT(start != NULL);
529   *num_pages = PagesInChunk(start, size);
530   ASSERT(*num_pages > 0);
531   ASSERT(initial_chunk_ != NULL);
532   ASSERT(InInitialChunk(start));
533   ASSERT(InInitialChunk(start + size - 1));
534   if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
535     return Page::FromAddress(NULL);
536   }
537 #ifdef DEBUG
538   ZapBlock(start, size);
539 #endif
540   isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
541 
542   // So long as we correctly overestimated the number of chunks we should not
543   // run out of chunk ids.
544   CHECK(!OutOfChunkIds());
545   int chunk_id = Pop();
546   chunks_[chunk_id].init(start, size, owner);
547   return InitializePagesInChunk(chunk_id, *num_pages, owner);
548 }
549 
550 
CommitBlock(Address start,size_t size,Executability executable)551 bool MemoryAllocator::CommitBlock(Address start,
552                                   size_t size,
553                                   Executability executable) {
554   ASSERT(start != NULL);
555   ASSERT(size > 0);
556   ASSERT(initial_chunk_ != NULL);
557   ASSERT(InInitialChunk(start));
558   ASSERT(InInitialChunk(start + size - 1));
559 
560   if (!initial_chunk_->Commit(start, size, executable)) return false;
561 #ifdef DEBUG
562   ZapBlock(start, size);
563 #endif
564   isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
565   return true;
566 }
567 
568 
UncommitBlock(Address start,size_t size)569 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
570   ASSERT(start != NULL);
571   ASSERT(size > 0);
572   ASSERT(initial_chunk_ != NULL);
573   ASSERT(InInitialChunk(start));
574   ASSERT(InInitialChunk(start + size - 1));
575 
576   if (!initial_chunk_->Uncommit(start, size)) return false;
577   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
578   return true;
579 }
580 
581 
ZapBlock(Address start,size_t size)582 void MemoryAllocator::ZapBlock(Address start, size_t size) {
583   for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
584     Memory::Address_at(start + s) = kZapValue;
585   }
586 }
587 
588 
InitializePagesInChunk(int chunk_id,int pages_in_chunk,PagedSpace * owner)589 Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
590                                               PagedSpace* owner) {
591   ASSERT(IsValidChunk(chunk_id));
592   ASSERT(pages_in_chunk > 0);
593 
594   Address chunk_start = chunks_[chunk_id].address();
595 
596   Address low = RoundUp(chunk_start, Page::kPageSize);
597 
598 #ifdef DEBUG
599   size_t chunk_size = chunks_[chunk_id].size();
600   Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
601   ASSERT(pages_in_chunk <=
602         ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
603 #endif
604 
605   Address page_addr = low;
606   for (int i = 0; i < pages_in_chunk; i++) {
607     Page* p = Page::FromAddress(page_addr);
608     p->heap_ = owner->heap();
609     p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
610     p->InvalidateWatermark(true);
611     p->SetIsLargeObjectPage(false);
612     p->SetAllocationWatermark(p->ObjectAreaStart());
613     p->SetCachedAllocationWatermark(p->ObjectAreaStart());
614     page_addr += Page::kPageSize;
615   }
616 
617   // Set the next page of the last page to 0.
618   Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
619   last_page->opaque_header = OffsetFrom(0) | chunk_id;
620 
621   return Page::FromAddress(low);
622 }
623 
624 
FreePages(Page * p)625 Page* MemoryAllocator::FreePages(Page* p) {
626   if (!p->is_valid()) return p;
627 
628   // Find the first page in the same chunk as 'p'
629   Page* first_page = FindFirstPageInSameChunk(p);
630   Page* page_to_return = Page::FromAddress(NULL);
631 
632   if (p != first_page) {
633     // Find the last page in the same chunk as 'prev'.
634     Page* last_page = FindLastPageInSameChunk(p);
635     first_page = GetNextPage(last_page);  // first page in next chunk
636 
637     // set the next_page of last_page to NULL
638     SetNextPage(last_page, Page::FromAddress(NULL));
639     page_to_return = p;  // return 'p' when exiting
640   }
641 
642   while (first_page->is_valid()) {
643     int chunk_id = GetChunkId(first_page);
644     ASSERT(IsValidChunk(chunk_id));
645 
646     // Find the first page of the next chunk before deleting this chunk.
647     first_page = GetNextPage(FindLastPageInSameChunk(first_page));
648 
649     // Free the current chunk.
650     DeleteChunk(chunk_id);
651   }
652 
653   return page_to_return;
654 }
655 
656 
FreeAllPages(PagedSpace * space)657 void MemoryAllocator::FreeAllPages(PagedSpace* space) {
658   for (int i = 0, length = chunks_.length(); i < length; i++) {
659     if (chunks_[i].owner() == space) {
660       DeleteChunk(i);
661     }
662   }
663 }
664 
665 
DeleteChunk(int chunk_id)666 void MemoryAllocator::DeleteChunk(int chunk_id) {
667   ASSERT(IsValidChunk(chunk_id));
668 
669   ChunkInfo& c = chunks_[chunk_id];
670 
671   // We cannot free a chunk contained in the initial chunk because it was not
672   // allocated with AllocateRawMemory.  Instead we uncommit the virtual
673   // memory.
674   if (InInitialChunk(c.address())) {
675     // TODO(1240712): VirtualMemory::Uncommit has a return value which
676     // is ignored here.
677     initial_chunk_->Uncommit(c.address(), c.size());
678     Counters* counters = isolate_->counters();
679     counters->memory_allocated()->Decrement(static_cast<int>(c.size()));
680   } else {
681     LOG(isolate_, DeleteEvent("PagedChunk", c.address()));
682     ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity());
683     size_t size = c.size();
684     FreeRawMemory(c.address(), size, c.executable());
685     PerformAllocationCallback(space, kAllocationActionFree, size);
686   }
687   c.init(NULL, 0, NULL);
688   Push(chunk_id);
689 }
690 
691 
FindFirstPageInSameChunk(Page * p)692 Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
693   int chunk_id = GetChunkId(p);
694   ASSERT(IsValidChunk(chunk_id));
695 
696   Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
697   return Page::FromAddress(low);
698 }
699 
700 
FindLastPageInSameChunk(Page * p)701 Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
702   int chunk_id = GetChunkId(p);
703   ASSERT(IsValidChunk(chunk_id));
704 
705   Address chunk_start = chunks_[chunk_id].address();
706   size_t chunk_size = chunks_[chunk_id].size();
707 
708   Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
709   ASSERT(chunk_start <= p->address() && p->address() < high);
710 
711   return Page::FromAddress(high - Page::kPageSize);
712 }
713 
714 
715 #ifdef DEBUG
ReportStatistics()716 void MemoryAllocator::ReportStatistics() {
717   float pct = static_cast<float>(capacity_ - size_) / capacity_;
718   PrintF("  capacity: %" V8_PTR_PREFIX "d"
719              ", used: %" V8_PTR_PREFIX "d"
720              ", available: %%%d\n\n",
721          capacity_, size_, static_cast<int>(pct*100));
722 }
723 #endif
724 
725 
RelinkPageListInChunkOrder(PagedSpace * space,Page ** first_page,Page ** last_page,Page ** last_page_in_use)726 void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
727                                                  Page** first_page,
728                                                  Page** last_page,
729                                                  Page** last_page_in_use) {
730   Page* first = NULL;
731   Page* last = NULL;
732 
733   for (int i = 0, length = chunks_.length(); i < length; i++) {
734     ChunkInfo& chunk = chunks_[i];
735 
736     if (chunk.owner() == space) {
737       if (first == NULL) {
738         Address low = RoundUp(chunk.address(), Page::kPageSize);
739         first = Page::FromAddress(low);
740       }
741       last = RelinkPagesInChunk(i,
742                                 chunk.address(),
743                                 chunk.size(),
744                                 last,
745                                 last_page_in_use);
746     }
747   }
748 
749   if (first_page != NULL) {
750     *first_page = first;
751   }
752 
753   if (last_page != NULL) {
754     *last_page = last;
755   }
756 }
757 
758 
RelinkPagesInChunk(int chunk_id,Address chunk_start,size_t chunk_size,Page * prev,Page ** last_page_in_use)759 Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
760                                           Address chunk_start,
761                                           size_t chunk_size,
762                                           Page* prev,
763                                           Page** last_page_in_use) {
764   Address page_addr = RoundUp(chunk_start, Page::kPageSize);
765   int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
766 
767   if (prev->is_valid()) {
768     SetNextPage(prev, Page::FromAddress(page_addr));
769   }
770 
771   for (int i = 0; i < pages_in_chunk; i++) {
772     Page* p = Page::FromAddress(page_addr);
773     p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
774     page_addr += Page::kPageSize;
775 
776     p->InvalidateWatermark(true);
777     if (p->WasInUseBeforeMC()) {
778       *last_page_in_use = p;
779     }
780   }
781 
782   // Set the next page of the last page to 0.
783   Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
784   last_page->opaque_header = OffsetFrom(0) | chunk_id;
785 
786   if (last_page->WasInUseBeforeMC()) {
787     *last_page_in_use = last_page;
788   }
789 
790   return last_page;
791 }
792 
793 
794 // -----------------------------------------------------------------------------
795 // PagedSpace implementation
796 
PagedSpace(Heap * heap,intptr_t max_capacity,AllocationSpace id,Executability executable)797 PagedSpace::PagedSpace(Heap* heap,
798                        intptr_t max_capacity,
799                        AllocationSpace id,
800                        Executability executable)
801     : Space(heap, id, executable) {
802   max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
803                   * Page::kObjectAreaSize;
804   accounting_stats_.Clear();
805 
806   allocation_info_.top = NULL;
807   allocation_info_.limit = NULL;
808 
809   mc_forwarding_info_.top = NULL;
810   mc_forwarding_info_.limit = NULL;
811 }
812 
813 
Setup(Address start,size_t size)814 bool PagedSpace::Setup(Address start, size_t size) {
815   if (HasBeenSetup()) return false;
816 
817   int num_pages = 0;
818   // Try to use the virtual memory range passed to us.  If it is too small to
819   // contain at least one page, ignore it and allocate instead.
820   int pages_in_chunk = PagesInChunk(start, size);
821   if (pages_in_chunk > 0) {
822     first_page_ = Isolate::Current()->memory_allocator()->CommitPages(
823         RoundUp(start, Page::kPageSize),
824         Page::kPageSize * pages_in_chunk,
825         this, &num_pages);
826   } else {
827     int requested_pages =
828         Min(MemoryAllocator::kPagesPerChunk,
829             static_cast<int>(max_capacity_ / Page::kObjectAreaSize));
830     first_page_ =
831         Isolate::Current()->memory_allocator()->AllocatePages(
832             requested_pages, &num_pages, this);
833     if (!first_page_->is_valid()) return false;
834   }
835 
836   // We are sure that the first page is valid and that we have at least one
837   // page.
838   ASSERT(first_page_->is_valid());
839   ASSERT(num_pages > 0);
840   accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
841   ASSERT(Capacity() <= max_capacity_);
842 
843   // Sequentially clear region marks in the newly allocated
844   // pages and cache the current last page in the space.
845   for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
846     p->SetRegionMarks(Page::kAllRegionsCleanMarks);
847     last_page_ = p;
848   }
849 
850   // Use first_page_ for allocation.
851   SetAllocationInfo(&allocation_info_, first_page_);
852 
853   page_list_is_chunk_ordered_ = true;
854 
855   return true;
856 }
857 
858 
HasBeenSetup()859 bool PagedSpace::HasBeenSetup() {
860   return (Capacity() > 0);
861 }
862 
863 
TearDown()864 void PagedSpace::TearDown() {
865   Isolate::Current()->memory_allocator()->FreeAllPages(this);
866   first_page_ = NULL;
867   accounting_stats_.Clear();
868 }
869 
870 
871 #ifdef ENABLE_HEAP_PROTECTION
872 
Protect()873 void PagedSpace::Protect() {
874   Page* page = first_page_;
875   while (page->is_valid()) {
876     Isolate::Current()->memory_allocator()->ProtectChunkFromPage(page);
877     page = Isolate::Current()->memory_allocator()->
878         FindLastPageInSameChunk(page)->next_page();
879   }
880 }
881 
882 
Unprotect()883 void PagedSpace::Unprotect() {
884   Page* page = first_page_;
885   while (page->is_valid()) {
886     Isolate::Current()->memory_allocator()->UnprotectChunkFromPage(page);
887     page = Isolate::Current()->memory_allocator()->
888         FindLastPageInSameChunk(page)->next_page();
889   }
890 }
891 
892 #endif
893 
894 
MarkAllPagesClean()895 void PagedSpace::MarkAllPagesClean() {
896   PageIterator it(this, PageIterator::ALL_PAGES);
897   while (it.has_next()) {
898     it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
899   }
900 }
901 
902 
FindObject(Address addr)903 MaybeObject* PagedSpace::FindObject(Address addr) {
904   // Note: this function can only be called before or after mark-compact GC
905   // because it accesses map pointers.
906   ASSERT(!heap()->mark_compact_collector()->in_use());
907 
908   if (!Contains(addr)) return Failure::Exception();
909 
910   Page* p = Page::FromAddress(addr);
911   ASSERT(IsUsed(p));
912   Address cur = p->ObjectAreaStart();
913   Address end = p->AllocationTop();
914   while (cur < end) {
915     HeapObject* obj = HeapObject::FromAddress(cur);
916     Address next = cur + obj->Size();
917     if ((cur <= addr) && (addr < next)) return obj;
918     cur = next;
919   }
920 
921   UNREACHABLE();
922   return Failure::Exception();
923 }
924 
925 
IsUsed(Page * page)926 bool PagedSpace::IsUsed(Page* page) {
927   PageIterator it(this, PageIterator::PAGES_IN_USE);
928   while (it.has_next()) {
929     if (page == it.next()) return true;
930   }
931   return false;
932 }
933 
934 
SetAllocationInfo(AllocationInfo * alloc_info,Page * p)935 void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
936   alloc_info->top = p->ObjectAreaStart();
937   alloc_info->limit = p->ObjectAreaEnd();
938   ASSERT(alloc_info->VerifyPagedAllocation());
939 }
940 
941 
MCResetRelocationInfo()942 void PagedSpace::MCResetRelocationInfo() {
943   // Set page indexes.
944   int i = 0;
945   PageIterator it(this, PageIterator::ALL_PAGES);
946   while (it.has_next()) {
947     Page* p = it.next();
948     p->mc_page_index = i++;
949   }
950 
951   // Set mc_forwarding_info_ to the first page in the space.
952   SetAllocationInfo(&mc_forwarding_info_, first_page_);
953   // All the bytes in the space are 'available'.  We will rediscover
954   // allocated and wasted bytes during GC.
955   accounting_stats_.Reset();
956 }
957 
958 
MCSpaceOffsetForAddress(Address addr)959 int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
960 #ifdef DEBUG
961   // The Contains function considers the address at the beginning of a
962   // page in the page, MCSpaceOffsetForAddress considers it is in the
963   // previous page.
964   if (Page::IsAlignedToPageSize(addr)) {
965     ASSERT(Contains(addr - kPointerSize));
966   } else {
967     ASSERT(Contains(addr));
968   }
969 #endif
970 
971   // If addr is at the end of a page, it belongs to previous page
972   Page* p = Page::IsAlignedToPageSize(addr)
973             ? Page::FromAllocationTop(addr)
974             : Page::FromAddress(addr);
975   int index = p->mc_page_index;
976   return (index * Page::kPageSize) + p->Offset(addr);
977 }
978 
979 
980 // Slow case for reallocating and promoting objects during a compacting
981 // collection.  This function is not space-specific.
SlowMCAllocateRaw(int size_in_bytes)982 HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
983   Page* current_page = TopPageOf(mc_forwarding_info_);
984   if (!current_page->next_page()->is_valid()) {
985     if (!Expand(current_page)) {
986       return NULL;
987     }
988   }
989 
990   // There are surely more pages in the space now.
991   ASSERT(current_page->next_page()->is_valid());
992   // We do not add the top of page block for current page to the space's
993   // free list---the block may contain live objects so we cannot write
994   // bookkeeping information to it.  Instead, we will recover top of page
995   // blocks when we move objects to their new locations.
996   //
997   // We do however write the allocation pointer to the page.  The encoding
998   // of forwarding addresses is as an offset in terms of live bytes, so we
999   // need quick access to the allocation top of each page to decode
1000   // forwarding addresses.
1001   current_page->SetAllocationWatermark(mc_forwarding_info_.top);
1002   current_page->next_page()->InvalidateWatermark(true);
1003   SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
1004   return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
1005 }
1006 
1007 
Expand(Page * last_page)1008 bool PagedSpace::Expand(Page* last_page) {
1009   ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
1010   ASSERT(Capacity() % Page::kObjectAreaSize == 0);
1011 
1012   if (Capacity() == max_capacity_) return false;
1013 
1014   ASSERT(Capacity() < max_capacity_);
1015   // Last page must be valid and its next page is invalid.
1016   ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
1017 
1018   int available_pages =
1019       static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize);
1020   // We don't want to have to handle small chunks near the end so if there are
1021   // not kPagesPerChunk pages available without exceeding the max capacity then
1022   // act as if memory has run out.
1023   if (available_pages < MemoryAllocator::kPagesPerChunk) return false;
1024 
1025   int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
1026   Page* p = heap()->isolate()->memory_allocator()->AllocatePages(
1027       desired_pages, &desired_pages, this);
1028   if (!p->is_valid()) return false;
1029 
1030   accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
1031   ASSERT(Capacity() <= max_capacity_);
1032 
1033   heap()->isolate()->memory_allocator()->SetNextPage(last_page, p);
1034 
1035   // Sequentially clear region marks of new pages and and cache the
1036   // new last page in the space.
1037   while (p->is_valid()) {
1038     p->SetRegionMarks(Page::kAllRegionsCleanMarks);
1039     last_page_ = p;
1040     p = p->next_page();
1041   }
1042 
1043   return true;
1044 }
1045 
1046 
1047 #ifdef DEBUG
CountTotalPages()1048 int PagedSpace::CountTotalPages() {
1049   int count = 0;
1050   for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
1051     count++;
1052   }
1053   return count;
1054 }
1055 #endif
1056 
1057 
Shrink()1058 void PagedSpace::Shrink() {
1059   if (!page_list_is_chunk_ordered_) {
1060     // We can't shrink space if pages is not chunk-ordered
1061     // (see comment for class MemoryAllocator for definition).
1062     return;
1063   }
1064 
1065   // Release half of free pages.
1066   Page* top_page = AllocationTopPage();
1067   ASSERT(top_page->is_valid());
1068 
1069   // Count the number of pages we would like to free.
1070   int pages_to_free = 0;
1071   for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
1072     pages_to_free++;
1073   }
1074 
1075   // Free pages after top_page.
1076   Page* p = heap()->isolate()->memory_allocator()->
1077       FreePages(top_page->next_page());
1078   heap()->isolate()->memory_allocator()->SetNextPage(top_page, p);
1079 
1080   // Find out how many pages we failed to free and update last_page_.
1081   // Please note pages can only be freed in whole chunks.
1082   last_page_ = top_page;
1083   for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
1084     pages_to_free--;
1085     last_page_ = p;
1086   }
1087 
1088   accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
1089   ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
1090 }
1091 
1092 
EnsureCapacity(int capacity)1093 bool PagedSpace::EnsureCapacity(int capacity) {
1094   if (Capacity() >= capacity) return true;
1095 
1096   // Start from the allocation top and loop to the last page in the space.
1097   Page* last_page = AllocationTopPage();
1098   Page* next_page = last_page->next_page();
1099   while (next_page->is_valid()) {
1100     last_page = heap()->isolate()->memory_allocator()->
1101         FindLastPageInSameChunk(next_page);
1102     next_page = last_page->next_page();
1103   }
1104 
1105   // Expand the space until it has the required capacity or expansion fails.
1106   do {
1107     if (!Expand(last_page)) return false;
1108     ASSERT(last_page->next_page()->is_valid());
1109     last_page =
1110         heap()->isolate()->memory_allocator()->FindLastPageInSameChunk(
1111             last_page->next_page());
1112   } while (Capacity() < capacity);
1113 
1114   return true;
1115 }
1116 
1117 
1118 #ifdef DEBUG
Print()1119 void PagedSpace::Print() { }
1120 #endif
1121 
1122 
1123 #ifdef DEBUG
1124 // We do not assume that the PageIterator works, because it depends on the
1125 // invariants we are checking during verification.
Verify(ObjectVisitor * visitor)1126 void PagedSpace::Verify(ObjectVisitor* visitor) {
1127   // The allocation pointer should be valid, and it should be in a page in the
1128   // space.
1129   ASSERT(allocation_info_.VerifyPagedAllocation());
1130   Page* top_page = Page::FromAllocationTop(allocation_info_.top);
1131   ASSERT(heap()->isolate()->memory_allocator()->IsPageInSpace(top_page, this));
1132 
1133   // Loop over all the pages.
1134   bool above_allocation_top = false;
1135   Page* current_page = first_page_;
1136   while (current_page->is_valid()) {
1137     if (above_allocation_top) {
1138       // We don't care what's above the allocation top.
1139     } else {
1140       Address top = current_page->AllocationTop();
1141       if (current_page == top_page) {
1142         ASSERT(top == allocation_info_.top);
1143         // The next page will be above the allocation top.
1144         above_allocation_top = true;
1145       }
1146 
1147       // It should be packed with objects from the bottom to the top.
1148       Address current = current_page->ObjectAreaStart();
1149       while (current < top) {
1150         HeapObject* object = HeapObject::FromAddress(current);
1151 
1152         // The first word should be a map, and we expect all map pointers to
1153         // be in map space.
1154         Map* map = object->map();
1155         ASSERT(map->IsMap());
1156         ASSERT(heap()->map_space()->Contains(map));
1157 
1158         // Perform space-specific object verification.
1159         VerifyObject(object);
1160 
1161         // The object itself should look OK.
1162         object->Verify();
1163 
1164         // All the interior pointers should be contained in the heap and
1165         // have page regions covering intergenerational references should be
1166         // marked dirty.
1167         int size = object->Size();
1168         object->IterateBody(map->instance_type(), size, visitor);
1169 
1170         current += size;
1171       }
1172 
1173       // The allocation pointer should not be in the middle of an object.
1174       ASSERT(current == top);
1175     }
1176 
1177     current_page = current_page->next_page();
1178   }
1179 }
1180 #endif
1181 
1182 
1183 // -----------------------------------------------------------------------------
1184 // NewSpace implementation
1185 
1186 
Setup(Address start,int size)1187 bool NewSpace::Setup(Address start, int size) {
1188   // Setup new space based on the preallocated memory block defined by
1189   // start and size. The provided space is divided into two semi-spaces.
1190   // To support fast containment testing in the new space, the size of
1191   // this chunk must be a power of two and it must be aligned to its size.
1192   int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1193   int maximum_semispace_capacity = heap()->MaxSemiSpaceSize();
1194 
1195   ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1196   ASSERT(IsPowerOf2(maximum_semispace_capacity));
1197 
1198   // Allocate and setup the histogram arrays if necessary.
1199 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1200   allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1201   promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1202 
1203 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1204                        promoted_histogram_[name].set_name(#name);
1205   INSTANCE_TYPE_LIST(SET_NAME)
1206 #undef SET_NAME
1207 #endif
1208 
1209   ASSERT(size == 2 * heap()->ReservedSemiSpaceSize());
1210   ASSERT(IsAddressAligned(start, size, 0));
1211 
1212   if (!to_space_.Setup(start,
1213                        initial_semispace_capacity,
1214                        maximum_semispace_capacity)) {
1215     return false;
1216   }
1217   if (!from_space_.Setup(start + maximum_semispace_capacity,
1218                          initial_semispace_capacity,
1219                          maximum_semispace_capacity)) {
1220     return false;
1221   }
1222 
1223   start_ = start;
1224   address_mask_ = ~(size - 1);
1225   object_mask_ = address_mask_ | kHeapObjectTagMask;
1226   object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1227 
1228   allocation_info_.top = to_space_.low();
1229   allocation_info_.limit = to_space_.high();
1230   mc_forwarding_info_.top = NULL;
1231   mc_forwarding_info_.limit = NULL;
1232 
1233   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1234   return true;
1235 }
1236 
1237 
TearDown()1238 void NewSpace::TearDown() {
1239 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1240   if (allocated_histogram_) {
1241     DeleteArray(allocated_histogram_);
1242     allocated_histogram_ = NULL;
1243   }
1244   if (promoted_histogram_) {
1245     DeleteArray(promoted_histogram_);
1246     promoted_histogram_ = NULL;
1247   }
1248 #endif
1249 
1250   start_ = NULL;
1251   allocation_info_.top = NULL;
1252   allocation_info_.limit = NULL;
1253   mc_forwarding_info_.top = NULL;
1254   mc_forwarding_info_.limit = NULL;
1255 
1256   to_space_.TearDown();
1257   from_space_.TearDown();
1258 }
1259 
1260 
1261 #ifdef ENABLE_HEAP_PROTECTION
1262 
Protect()1263 void NewSpace::Protect() {
1264   heap()->isolate()->memory_allocator()->Protect(ToSpaceLow(), Capacity());
1265   heap()->isolate()->memory_allocator()->Protect(FromSpaceLow(), Capacity());
1266 }
1267 
1268 
Unprotect()1269 void NewSpace::Unprotect() {
1270   heap()->isolate()->memory_allocator()->Unprotect(ToSpaceLow(), Capacity(),
1271                                                    to_space_.executable());
1272   heap()->isolate()->memory_allocator()->Unprotect(FromSpaceLow(), Capacity(),
1273                                                    from_space_.executable());
1274 }
1275 
1276 #endif
1277 
1278 
Flip()1279 void NewSpace::Flip() {
1280   SemiSpace tmp = from_space_;
1281   from_space_ = to_space_;
1282   to_space_ = tmp;
1283 }
1284 
1285 
Grow()1286 void NewSpace::Grow() {
1287   ASSERT(Capacity() < MaximumCapacity());
1288   if (to_space_.Grow()) {
1289     // Only grow from space if we managed to grow to space.
1290     if (!from_space_.Grow()) {
1291       // If we managed to grow to space but couldn't grow from space,
1292       // attempt to shrink to space.
1293       if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1294         // We are in an inconsistent state because we could not
1295         // commit/uncommit memory from new space.
1296         V8::FatalProcessOutOfMemory("Failed to grow new space.");
1297       }
1298     }
1299   }
1300   allocation_info_.limit = to_space_.high();
1301   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1302 }
1303 
1304 
Shrink()1305 void NewSpace::Shrink() {
1306   int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
1307   int rounded_new_capacity =
1308       RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
1309   if (rounded_new_capacity < Capacity() &&
1310       to_space_.ShrinkTo(rounded_new_capacity))  {
1311     // Only shrink from space if we managed to shrink to space.
1312     if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1313       // If we managed to shrink to space but couldn't shrink from
1314       // space, attempt to grow to space again.
1315       if (!to_space_.GrowTo(from_space_.Capacity())) {
1316         // We are in an inconsistent state because we could not
1317         // commit/uncommit memory from new space.
1318         V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1319       }
1320     }
1321   }
1322   allocation_info_.limit = to_space_.high();
1323   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1324 }
1325 
1326 
ResetAllocationInfo()1327 void NewSpace::ResetAllocationInfo() {
1328   allocation_info_.top = to_space_.low();
1329   allocation_info_.limit = to_space_.high();
1330   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1331 }
1332 
1333 
MCResetRelocationInfo()1334 void NewSpace::MCResetRelocationInfo() {
1335   mc_forwarding_info_.top = from_space_.low();
1336   mc_forwarding_info_.limit = from_space_.high();
1337   ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
1338 }
1339 
1340 
MCCommitRelocationInfo()1341 void NewSpace::MCCommitRelocationInfo() {
1342   // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
1343   // valid allocation info for the to space.
1344   allocation_info_.top = mc_forwarding_info_.top;
1345   allocation_info_.limit = to_space_.high();
1346   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1347 }
1348 
1349 
1350 #ifdef DEBUG
1351 // We do not use the SemispaceIterator because verification doesn't assume
1352 // that it works (it depends on the invariants we are checking).
Verify()1353 void NewSpace::Verify() {
1354   // The allocation pointer should be in the space or at the very end.
1355   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1356 
1357   // There should be objects packed in from the low address up to the
1358   // allocation pointer.
1359   Address current = to_space_.low();
1360   while (current < top()) {
1361     HeapObject* object = HeapObject::FromAddress(current);
1362 
1363     // The first word should be a map, and we expect all map pointers to
1364     // be in map space.
1365     Map* map = object->map();
1366     ASSERT(map->IsMap());
1367     ASSERT(heap()->map_space()->Contains(map));
1368 
1369     // The object should not be code or a map.
1370     ASSERT(!object->IsMap());
1371     ASSERT(!object->IsCode());
1372 
1373     // The object itself should look OK.
1374     object->Verify();
1375 
1376     // All the interior pointers should be contained in the heap.
1377     VerifyPointersVisitor visitor;
1378     int size = object->Size();
1379     object->IterateBody(map->instance_type(), size, &visitor);
1380 
1381     current += size;
1382   }
1383 
1384   // The allocation pointer should not be in the middle of an object.
1385   ASSERT(current == top());
1386 }
1387 #endif
1388 
1389 
Commit()1390 bool SemiSpace::Commit() {
1391   ASSERT(!is_committed());
1392   if (!heap()->isolate()->memory_allocator()->CommitBlock(
1393       start_, capacity_, executable())) {
1394     return false;
1395   }
1396   committed_ = true;
1397   return true;
1398 }
1399 
1400 
Uncommit()1401 bool SemiSpace::Uncommit() {
1402   ASSERT(is_committed());
1403   if (!heap()->isolate()->memory_allocator()->UncommitBlock(
1404       start_, capacity_)) {
1405     return false;
1406   }
1407   committed_ = false;
1408   return true;
1409 }
1410 
1411 
1412 // -----------------------------------------------------------------------------
1413 // SemiSpace implementation
1414 
Setup(Address start,int initial_capacity,int maximum_capacity)1415 bool SemiSpace::Setup(Address start,
1416                       int initial_capacity,
1417                       int maximum_capacity) {
1418   // Creates a space in the young generation. The constructor does not
1419   // allocate memory from the OS.  A SemiSpace is given a contiguous chunk of
1420   // memory of size 'capacity' when set up, and does not grow or shrink
1421   // otherwise.  In the mark-compact collector, the memory region of the from
1422   // space is used as the marking stack. It requires contiguous memory
1423   // addresses.
1424   initial_capacity_ = initial_capacity;
1425   capacity_ = initial_capacity;
1426   maximum_capacity_ = maximum_capacity;
1427   committed_ = false;
1428 
1429   start_ = start;
1430   address_mask_ = ~(maximum_capacity - 1);
1431   object_mask_ = address_mask_ | kHeapObjectTagMask;
1432   object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1433   age_mark_ = start_;
1434 
1435   return Commit();
1436 }
1437 
1438 
TearDown()1439 void SemiSpace::TearDown() {
1440   start_ = NULL;
1441   capacity_ = 0;
1442 }
1443 
1444 
Grow()1445 bool SemiSpace::Grow() {
1446   // Double the semispace size but only up to maximum capacity.
1447   int maximum_extra = maximum_capacity_ - capacity_;
1448   int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
1449                   maximum_extra);
1450   if (!heap()->isolate()->memory_allocator()->CommitBlock(
1451       high(), extra, executable())) {
1452     return false;
1453   }
1454   capacity_ += extra;
1455   return true;
1456 }
1457 
1458 
GrowTo(int new_capacity)1459 bool SemiSpace::GrowTo(int new_capacity) {
1460   ASSERT(new_capacity <= maximum_capacity_);
1461   ASSERT(new_capacity > capacity_);
1462   size_t delta = new_capacity - capacity_;
1463   ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1464   if (!heap()->isolate()->memory_allocator()->CommitBlock(
1465       high(), delta, executable())) {
1466     return false;
1467   }
1468   capacity_ = new_capacity;
1469   return true;
1470 }
1471 
1472 
ShrinkTo(int new_capacity)1473 bool SemiSpace::ShrinkTo(int new_capacity) {
1474   ASSERT(new_capacity >= initial_capacity_);
1475   ASSERT(new_capacity < capacity_);
1476   size_t delta = capacity_ - new_capacity;
1477   ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1478   if (!heap()->isolate()->memory_allocator()->UncommitBlock(
1479       high() - delta, delta)) {
1480     return false;
1481   }
1482   capacity_ = new_capacity;
1483   return true;
1484 }
1485 
1486 
1487 #ifdef DEBUG
Print()1488 void SemiSpace::Print() { }
1489 
1490 
Verify()1491 void SemiSpace::Verify() { }
1492 #endif
1493 
1494 
1495 // -----------------------------------------------------------------------------
1496 // SemiSpaceIterator implementation.
SemiSpaceIterator(NewSpace * space)1497 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1498   Initialize(space, space->bottom(), space->top(), NULL);
1499 }
1500 
1501 
SemiSpaceIterator(NewSpace * space,HeapObjectCallback size_func)1502 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1503                                      HeapObjectCallback size_func) {
1504   Initialize(space, space->bottom(), space->top(), size_func);
1505 }
1506 
1507 
SemiSpaceIterator(NewSpace * space,Address start)1508 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1509   Initialize(space, start, space->top(), NULL);
1510 }
1511 
1512 
Initialize(NewSpace * space,Address start,Address end,HeapObjectCallback size_func)1513 void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
1514                                    Address end,
1515                                    HeapObjectCallback size_func) {
1516   ASSERT(space->ToSpaceContains(start));
1517   ASSERT(space->ToSpaceLow() <= end
1518          && end <= space->ToSpaceHigh());
1519   space_ = &space->to_space_;
1520   current_ = start;
1521   limit_ = end;
1522   size_func_ = size_func;
1523 }
1524 
1525 
1526 #ifdef DEBUG
1527 // heap_histograms is shared, always clear it before using it.
ClearHistograms()1528 static void ClearHistograms() {
1529   Isolate* isolate = Isolate::Current();
1530   // We reset the name each time, though it hasn't changed.
1531 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1532   INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1533 #undef DEF_TYPE_NAME
1534 
1535 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1536   INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1537 #undef CLEAR_HISTOGRAM
1538 
1539   isolate->js_spill_information()->Clear();
1540 }
1541 
1542 
ClearCodeKindStatistics()1543 static void ClearCodeKindStatistics() {
1544   Isolate* isolate = Isolate::Current();
1545   for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1546     isolate->code_kind_statistics()[i] = 0;
1547   }
1548 }
1549 
1550 
ReportCodeKindStatistics()1551 static void ReportCodeKindStatistics() {
1552   Isolate* isolate = Isolate::Current();
1553   const char* table[Code::NUMBER_OF_KINDS] = { NULL };
1554 
1555 #define CASE(name)                            \
1556   case Code::name: table[Code::name] = #name; \
1557   break
1558 
1559   for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1560     switch (static_cast<Code::Kind>(i)) {
1561       CASE(FUNCTION);
1562       CASE(OPTIMIZED_FUNCTION);
1563       CASE(STUB);
1564       CASE(BUILTIN);
1565       CASE(LOAD_IC);
1566       CASE(KEYED_LOAD_IC);
1567       CASE(KEYED_EXTERNAL_ARRAY_LOAD_IC);
1568       CASE(STORE_IC);
1569       CASE(KEYED_STORE_IC);
1570       CASE(KEYED_EXTERNAL_ARRAY_STORE_IC);
1571       CASE(CALL_IC);
1572       CASE(KEYED_CALL_IC);
1573       CASE(TYPE_RECORDING_BINARY_OP_IC);
1574       CASE(COMPARE_IC);
1575     }
1576   }
1577 
1578 #undef CASE
1579 
1580   PrintF("\n   Code kind histograms: \n");
1581   for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1582     if (isolate->code_kind_statistics()[i] > 0) {
1583       PrintF("     %-20s: %10d bytes\n", table[i],
1584           isolate->code_kind_statistics()[i]);
1585     }
1586   }
1587   PrintF("\n");
1588 }
1589 
1590 
CollectHistogramInfo(HeapObject * obj)1591 static int CollectHistogramInfo(HeapObject* obj) {
1592   Isolate* isolate = Isolate::Current();
1593   InstanceType type = obj->map()->instance_type();
1594   ASSERT(0 <= type && type <= LAST_TYPE);
1595   ASSERT(isolate->heap_histograms()[type].name() != NULL);
1596   isolate->heap_histograms()[type].increment_number(1);
1597   isolate->heap_histograms()[type].increment_bytes(obj->Size());
1598 
1599   if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1600     JSObject::cast(obj)->IncrementSpillStatistics(
1601         isolate->js_spill_information());
1602   }
1603 
1604   return obj->Size();
1605 }
1606 
1607 
ReportHistogram(bool print_spill)1608 static void ReportHistogram(bool print_spill) {
1609   Isolate* isolate = Isolate::Current();
1610   PrintF("\n  Object Histogram:\n");
1611   for (int i = 0; i <= LAST_TYPE; i++) {
1612     if (isolate->heap_histograms()[i].number() > 0) {
1613       PrintF("    %-34s%10d (%10d bytes)\n",
1614              isolate->heap_histograms()[i].name(),
1615              isolate->heap_histograms()[i].number(),
1616              isolate->heap_histograms()[i].bytes());
1617     }
1618   }
1619   PrintF("\n");
1620 
1621   // Summarize string types.
1622   int string_number = 0;
1623   int string_bytes = 0;
1624 #define INCREMENT(type, size, name, camel_name)      \
1625     string_number += isolate->heap_histograms()[type].number(); \
1626     string_bytes += isolate->heap_histograms()[type].bytes();
1627   STRING_TYPE_LIST(INCREMENT)
1628 #undef INCREMENT
1629   if (string_number > 0) {
1630     PrintF("    %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1631            string_bytes);
1632   }
1633 
1634   if (FLAG_collect_heap_spill_statistics && print_spill) {
1635     isolate->js_spill_information()->Print();
1636   }
1637 }
1638 #endif  // DEBUG
1639 
1640 
1641 // Support for statistics gathering for --heap-stats and --log-gc.
1642 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
ClearHistograms()1643 void NewSpace::ClearHistograms() {
1644   for (int i = 0; i <= LAST_TYPE; i++) {
1645     allocated_histogram_[i].clear();
1646     promoted_histogram_[i].clear();
1647   }
1648 }
1649 
1650 // Because the copying collector does not touch garbage objects, we iterate
1651 // the new space before a collection to get a histogram of allocated objects.
1652 // This only happens (1) when compiled with DEBUG and the --heap-stats flag is
1653 // set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
1654 // flag is set.
CollectStatistics()1655 void NewSpace::CollectStatistics() {
1656   ClearHistograms();
1657   SemiSpaceIterator it(this);
1658   for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1659     RecordAllocation(obj);
1660 }
1661 
1662 
1663 #ifdef ENABLE_LOGGING_AND_PROFILING
DoReportStatistics(Isolate * isolate,HistogramInfo * info,const char * description)1664 static void DoReportStatistics(Isolate* isolate,
1665                                HistogramInfo* info, const char* description) {
1666   LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1667   // Lump all the string types together.
1668   int string_number = 0;
1669   int string_bytes = 0;
1670 #define INCREMENT(type, size, name, camel_name)       \
1671     string_number += info[type].number();             \
1672     string_bytes += info[type].bytes();
1673   STRING_TYPE_LIST(INCREMENT)
1674 #undef INCREMENT
1675   if (string_number > 0) {
1676     LOG(isolate,
1677         HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1678   }
1679 
1680   // Then do the other types.
1681   for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1682     if (info[i].number() > 0) {
1683       LOG(isolate,
1684           HeapSampleItemEvent(info[i].name(), info[i].number(),
1685                               info[i].bytes()));
1686     }
1687   }
1688   LOG(isolate, HeapSampleEndEvent("NewSpace", description));
1689 }
1690 #endif  // ENABLE_LOGGING_AND_PROFILING
1691 
1692 
ReportStatistics()1693 void NewSpace::ReportStatistics() {
1694 #ifdef DEBUG
1695   if (FLAG_heap_stats) {
1696     float pct = static_cast<float>(Available()) / Capacity();
1697     PrintF("  capacity: %" V8_PTR_PREFIX "d"
1698                ", available: %" V8_PTR_PREFIX "d, %%%d\n",
1699            Capacity(), Available(), static_cast<int>(pct*100));
1700     PrintF("\n  Object Histogram:\n");
1701     for (int i = 0; i <= LAST_TYPE; i++) {
1702       if (allocated_histogram_[i].number() > 0) {
1703         PrintF("    %-34s%10d (%10d bytes)\n",
1704                allocated_histogram_[i].name(),
1705                allocated_histogram_[i].number(),
1706                allocated_histogram_[i].bytes());
1707       }
1708     }
1709     PrintF("\n");
1710   }
1711 #endif  // DEBUG
1712 
1713 #ifdef ENABLE_LOGGING_AND_PROFILING
1714   if (FLAG_log_gc) {
1715     Isolate* isolate = ISOLATE;
1716     DoReportStatistics(isolate, allocated_histogram_, "allocated");
1717     DoReportStatistics(isolate, promoted_histogram_, "promoted");
1718   }
1719 #endif  // ENABLE_LOGGING_AND_PROFILING
1720 }
1721 
1722 
RecordAllocation(HeapObject * obj)1723 void NewSpace::RecordAllocation(HeapObject* obj) {
1724   InstanceType type = obj->map()->instance_type();
1725   ASSERT(0 <= type && type <= LAST_TYPE);
1726   allocated_histogram_[type].increment_number(1);
1727   allocated_histogram_[type].increment_bytes(obj->Size());
1728 }
1729 
1730 
RecordPromotion(HeapObject * obj)1731 void NewSpace::RecordPromotion(HeapObject* obj) {
1732   InstanceType type = obj->map()->instance_type();
1733   ASSERT(0 <= type && type <= LAST_TYPE);
1734   promoted_histogram_[type].increment_number(1);
1735   promoted_histogram_[type].increment_bytes(obj->Size());
1736 }
1737 #endif  // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1738 
1739 
1740 // -----------------------------------------------------------------------------
1741 // Free lists for old object spaces implementation
1742 
set_size(Heap * heap,int size_in_bytes)1743 void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
1744   ASSERT(size_in_bytes > 0);
1745   ASSERT(IsAligned(size_in_bytes, kPointerSize));
1746 
1747   // We write a map and possibly size information to the block.  If the block
1748   // is big enough to be a ByteArray with at least one extra word (the next
1749   // pointer), we set its map to be the byte array map and its size to an
1750   // appropriate array length for the desired size from HeapObject::Size().
1751   // If the block is too small (eg, one or two words), to hold both a size
1752   // field and a next pointer, we give it a filler map that gives it the
1753   // correct size.
1754   if (size_in_bytes > ByteArray::kHeaderSize) {
1755     set_map(heap->raw_unchecked_byte_array_map());
1756     // Can't use ByteArray::cast because it fails during deserialization.
1757     ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
1758     this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
1759   } else if (size_in_bytes == kPointerSize) {
1760     set_map(heap->raw_unchecked_one_pointer_filler_map());
1761   } else if (size_in_bytes == 2 * kPointerSize) {
1762     set_map(heap->raw_unchecked_two_pointer_filler_map());
1763   } else {
1764     UNREACHABLE();
1765   }
1766   // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
1767   // deserialization because the byte array map is not done yet.
1768 }
1769 
1770 
next(Heap * heap)1771 Address FreeListNode::next(Heap* heap) {
1772   ASSERT(IsFreeListNode(this));
1773   if (map() == heap->raw_unchecked_byte_array_map()) {
1774     ASSERT(Size() >= kNextOffset + kPointerSize);
1775     return Memory::Address_at(address() + kNextOffset);
1776   } else {
1777     return Memory::Address_at(address() + kPointerSize);
1778   }
1779 }
1780 
1781 
set_next(Heap * heap,Address next)1782 void FreeListNode::set_next(Heap* heap, Address next) {
1783   ASSERT(IsFreeListNode(this));
1784   if (map() == heap->raw_unchecked_byte_array_map()) {
1785     ASSERT(Size() >= kNextOffset + kPointerSize);
1786     Memory::Address_at(address() + kNextOffset) = next;
1787   } else {
1788     Memory::Address_at(address() + kPointerSize) = next;
1789   }
1790 }
1791 
1792 
OldSpaceFreeList(Heap * heap,AllocationSpace owner)1793 OldSpaceFreeList::OldSpaceFreeList(Heap* heap, AllocationSpace owner)
1794   : heap_(heap),
1795     owner_(owner) {
1796   Reset();
1797 }
1798 
1799 
Reset()1800 void OldSpaceFreeList::Reset() {
1801   available_ = 0;
1802   for (int i = 0; i < kFreeListsLength; i++) {
1803     free_[i].head_node_ = NULL;
1804   }
1805   needs_rebuild_ = false;
1806   finger_ = kHead;
1807   free_[kHead].next_size_ = kEnd;
1808 }
1809 
1810 
RebuildSizeList()1811 void OldSpaceFreeList::RebuildSizeList() {
1812   ASSERT(needs_rebuild_);
1813   int cur = kHead;
1814   for (int i = cur + 1; i < kFreeListsLength; i++) {
1815     if (free_[i].head_node_ != NULL) {
1816       free_[cur].next_size_ = i;
1817       cur = i;
1818     }
1819   }
1820   free_[cur].next_size_ = kEnd;
1821   needs_rebuild_ = false;
1822 }
1823 
1824 
Free(Address start,int size_in_bytes)1825 int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
1826 #ifdef DEBUG
1827   Isolate::Current()->memory_allocator()->ZapBlock(start, size_in_bytes);
1828 #endif
1829   FreeListNode* node = FreeListNode::FromAddress(start);
1830   node->set_size(heap_, size_in_bytes);
1831 
1832   // We don't use the freelists in compacting mode.  This makes it more like a
1833   // GC that only has mark-sweep-compact and doesn't have a mark-sweep
1834   // collector.
1835   if (FLAG_always_compact) {
1836     return size_in_bytes;
1837   }
1838 
1839   // Early return to drop too-small blocks on the floor (one or two word
1840   // blocks cannot hold a map pointer, a size field, and a pointer to the
1841   // next block in the free list).
1842   if (size_in_bytes < kMinBlockSize) {
1843     return size_in_bytes;
1844   }
1845 
1846   // Insert other blocks at the head of an exact free list.
1847   int index = size_in_bytes >> kPointerSizeLog2;
1848   node->set_next(heap_, free_[index].head_node_);
1849   free_[index].head_node_ = node->address();
1850   available_ += size_in_bytes;
1851   needs_rebuild_ = true;
1852   return 0;
1853 }
1854 
1855 
Allocate(int size_in_bytes,int * wasted_bytes)1856 MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
1857   ASSERT(0 < size_in_bytes);
1858   ASSERT(size_in_bytes <= kMaxBlockSize);
1859   ASSERT(IsAligned(size_in_bytes, kPointerSize));
1860 
1861   if (needs_rebuild_) RebuildSizeList();
1862   int index = size_in_bytes >> kPointerSizeLog2;
1863   // Check for a perfect fit.
1864   if (free_[index].head_node_ != NULL) {
1865     FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
1866     // If this was the last block of its size, remove the size.
1867     if ((free_[index].head_node_ = node->next(heap_)) == NULL)
1868       RemoveSize(index);
1869     available_ -= size_in_bytes;
1870     *wasted_bytes = 0;
1871     ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
1872     return node;
1873   }
1874   // Search the size list for the best fit.
1875   int prev = finger_ < index ? finger_ : kHead;
1876   int cur = FindSize(index, &prev);
1877   ASSERT(index < cur);
1878   if (cur == kEnd) {
1879     // No large enough size in list.
1880     *wasted_bytes = 0;
1881     return Failure::RetryAfterGC(owner_);
1882   }
1883   ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
1884   int rem = cur - index;
1885   int rem_bytes = rem << kPointerSizeLog2;
1886   FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
1887   ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
1888   FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
1889                                                      size_in_bytes);
1890   // Distinguish the cases prev < rem < cur and rem <= prev < cur
1891   // to avoid many redundant tests and calls to Insert/RemoveSize.
1892   if (prev < rem) {
1893     // Simple case: insert rem between prev and cur.
1894     finger_ = prev;
1895     free_[prev].next_size_ = rem;
1896     // If this was the last block of size cur, remove the size.
1897     if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
1898       free_[rem].next_size_ = free_[cur].next_size_;
1899     } else {
1900       free_[rem].next_size_ = cur;
1901     }
1902     // Add the remainder block.
1903     rem_node->set_size(heap_, rem_bytes);
1904     rem_node->set_next(heap_, free_[rem].head_node_);
1905     free_[rem].head_node_ = rem_node->address();
1906   } else {
1907     // If this was the last block of size cur, remove the size.
1908     if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
1909       finger_ = prev;
1910       free_[prev].next_size_ = free_[cur].next_size_;
1911     }
1912     if (rem_bytes < kMinBlockSize) {
1913       // Too-small remainder is wasted.
1914       rem_node->set_size(heap_, rem_bytes);
1915       available_ -= size_in_bytes + rem_bytes;
1916       *wasted_bytes = rem_bytes;
1917       return cur_node;
1918     }
1919     // Add the remainder block and, if needed, insert its size.
1920     rem_node->set_size(heap_, rem_bytes);
1921     rem_node->set_next(heap_, free_[rem].head_node_);
1922     free_[rem].head_node_ = rem_node->address();
1923     if (rem_node->next(heap_) == NULL) InsertSize(rem);
1924   }
1925   available_ -= size_in_bytes;
1926   *wasted_bytes = 0;
1927   return cur_node;
1928 }
1929 
1930 
MarkNodes()1931 void OldSpaceFreeList::MarkNodes() {
1932   for (int i = 0; i < kFreeListsLength; i++) {
1933     Address cur_addr = free_[i].head_node_;
1934     while (cur_addr != NULL) {
1935       FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1936       cur_addr = cur_node->next(heap_);
1937       cur_node->SetMark();
1938     }
1939   }
1940 }
1941 
1942 
1943 #ifdef DEBUG
Contains(FreeListNode * node)1944 bool OldSpaceFreeList::Contains(FreeListNode* node) {
1945   for (int i = 0; i < kFreeListsLength; i++) {
1946     Address cur_addr = free_[i].head_node_;
1947     while (cur_addr != NULL) {
1948       FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1949       if (cur_node == node) return true;
1950       cur_addr = cur_node->next(heap_);
1951     }
1952   }
1953   return false;
1954 }
1955 #endif
1956 
1957 
FixedSizeFreeList(Heap * heap,AllocationSpace owner,int object_size)1958 FixedSizeFreeList::FixedSizeFreeList(Heap* heap,
1959                                      AllocationSpace owner,
1960                                      int object_size)
1961     : heap_(heap), owner_(owner), object_size_(object_size) {
1962   Reset();
1963 }
1964 
1965 
Reset()1966 void FixedSizeFreeList::Reset() {
1967   available_ = 0;
1968   head_ = tail_ = NULL;
1969 }
1970 
1971 
Free(Address start)1972 void FixedSizeFreeList::Free(Address start) {
1973 #ifdef DEBUG
1974   Isolate::Current()->memory_allocator()->ZapBlock(start, object_size_);
1975 #endif
1976   // We only use the freelists with mark-sweep.
1977   ASSERT(!HEAP->mark_compact_collector()->IsCompacting());
1978   FreeListNode* node = FreeListNode::FromAddress(start);
1979   node->set_size(heap_, object_size_);
1980   node->set_next(heap_, NULL);
1981   if (head_ == NULL) {
1982     tail_ = head_ = node->address();
1983   } else {
1984     FreeListNode::FromAddress(tail_)->set_next(heap_, node->address());
1985     tail_ = node->address();
1986   }
1987   available_ += object_size_;
1988 }
1989 
1990 
Allocate()1991 MaybeObject* FixedSizeFreeList::Allocate() {
1992   if (head_ == NULL) {
1993     return Failure::RetryAfterGC(owner_);
1994   }
1995 
1996   ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
1997   FreeListNode* node = FreeListNode::FromAddress(head_);
1998   head_ = node->next(heap_);
1999   available_ -= object_size_;
2000   return node;
2001 }
2002 
2003 
MarkNodes()2004 void FixedSizeFreeList::MarkNodes() {
2005   Address cur_addr = head_;
2006   while (cur_addr != NULL && cur_addr != tail_) {
2007     FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
2008     cur_addr = cur_node->next(heap_);
2009     cur_node->SetMark();
2010   }
2011 }
2012 
2013 
2014 // -----------------------------------------------------------------------------
2015 // OldSpace implementation
2016 
PrepareForMarkCompact(bool will_compact)2017 void OldSpace::PrepareForMarkCompact(bool will_compact) {
2018   // Call prepare of the super class.
2019   PagedSpace::PrepareForMarkCompact(will_compact);
2020 
2021   if (will_compact) {
2022     // Reset relocation info.  During a compacting collection, everything in
2023     // the space is considered 'available' and we will rediscover live data
2024     // and waste during the collection.
2025     MCResetRelocationInfo();
2026     ASSERT(Available() == Capacity());
2027   } else {
2028     // During a non-compacting collection, everything below the linear
2029     // allocation pointer is considered allocated (everything above is
2030     // available) and we will rediscover available and wasted bytes during
2031     // the collection.
2032     accounting_stats_.AllocateBytes(free_list_.available());
2033     accounting_stats_.FillWastedBytes(Waste());
2034   }
2035 
2036   // Clear the free list before a full GC---it will be rebuilt afterward.
2037   free_list_.Reset();
2038 }
2039 
2040 
MCCommitRelocationInfo()2041 void OldSpace::MCCommitRelocationInfo() {
2042   // Update fast allocation info.
2043   allocation_info_.top = mc_forwarding_info_.top;
2044   allocation_info_.limit = mc_forwarding_info_.limit;
2045   ASSERT(allocation_info_.VerifyPagedAllocation());
2046 
2047   // The space is compacted and we haven't yet built free lists or
2048   // wasted any space.
2049   ASSERT(Waste() == 0);
2050   ASSERT(AvailableFree() == 0);
2051 
2052   // Build the free list for the space.
2053   int computed_size = 0;
2054   PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2055   while (it.has_next()) {
2056     Page* p = it.next();
2057     // Space below the relocation pointer is allocated.
2058     computed_size +=
2059         static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
2060     if (it.has_next()) {
2061       // Free the space at the top of the page.
2062       int extra_size =
2063           static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
2064       if (extra_size > 0) {
2065         int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
2066                                            extra_size);
2067         // The bytes we have just "freed" to add to the free list were
2068         // already accounted as available.
2069         accounting_stats_.WasteBytes(wasted_bytes);
2070       }
2071     }
2072   }
2073 
2074   // Make sure the computed size - based on the used portion of the pages in
2075   // use - matches the size obtained while computing forwarding addresses.
2076   ASSERT(computed_size == Size());
2077 }
2078 
2079 
ReserveSpace(int bytes)2080 bool NewSpace::ReserveSpace(int bytes) {
2081   // We can't reliably unpack a partial snapshot that needs more new space
2082   // space than the minimum NewSpace size.
2083   ASSERT(bytes <= InitialCapacity());
2084   Address limit = allocation_info_.limit;
2085   Address top = allocation_info_.top;
2086   return limit - top >= bytes;
2087 }
2088 
2089 
FreePages(Page * prev,Page * last)2090 void PagedSpace::FreePages(Page* prev, Page* last) {
2091   if (last == AllocationTopPage()) {
2092     // Pages are already at the end of used pages.
2093     return;
2094   }
2095 
2096   Page* first = NULL;
2097 
2098   // Remove pages from the list.
2099   if (prev == NULL) {
2100     first = first_page_;
2101     first_page_ = last->next_page();
2102   } else {
2103     first = prev->next_page();
2104     heap()->isolate()->memory_allocator()->SetNextPage(
2105         prev, last->next_page());
2106   }
2107 
2108   // Attach it after the last page.
2109   heap()->isolate()->memory_allocator()->SetNextPage(last_page_, first);
2110   last_page_ = last;
2111   heap()->isolate()->memory_allocator()->SetNextPage(last, NULL);
2112 
2113   // Clean them up.
2114   do {
2115     first->InvalidateWatermark(true);
2116     first->SetAllocationWatermark(first->ObjectAreaStart());
2117     first->SetCachedAllocationWatermark(first->ObjectAreaStart());
2118     first->SetRegionMarks(Page::kAllRegionsCleanMarks);
2119     first = first->next_page();
2120   } while (first != NULL);
2121 
2122   // Order of pages in this space might no longer be consistent with
2123   // order of pages in chunks.
2124   page_list_is_chunk_ordered_ = false;
2125 }
2126 
2127 
RelinkPageListInChunkOrder(bool deallocate_blocks)2128 void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
2129   const bool add_to_freelist = true;
2130 
2131   // Mark used and unused pages to properly fill unused pages
2132   // after reordering.
2133   PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
2134   Page* last_in_use = AllocationTopPage();
2135   bool in_use = true;
2136 
2137   while (all_pages_iterator.has_next()) {
2138     Page* p = all_pages_iterator.next();
2139     p->SetWasInUseBeforeMC(in_use);
2140     if (p == last_in_use) {
2141       // We passed a page containing allocation top. All consequent
2142       // pages are not used.
2143       in_use = false;
2144     }
2145   }
2146 
2147   if (page_list_is_chunk_ordered_) return;
2148 
2149   Page* new_last_in_use = Page::FromAddress(NULL);
2150   heap()->isolate()->memory_allocator()->RelinkPageListInChunkOrder(
2151       this, &first_page_, &last_page_, &new_last_in_use);
2152   ASSERT(new_last_in_use->is_valid());
2153 
2154   if (new_last_in_use != last_in_use) {
2155     // Current allocation top points to a page which is now in the middle
2156     // of page list. We should move allocation top forward to the new last
2157     // used page so various object iterators will continue to work properly.
2158     int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
2159                                          last_in_use->AllocationTop());
2160 
2161     last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
2162     if (size_in_bytes > 0) {
2163       Address start = last_in_use->AllocationTop();
2164       if (deallocate_blocks) {
2165         accounting_stats_.AllocateBytes(size_in_bytes);
2166         DeallocateBlock(start, size_in_bytes, add_to_freelist);
2167       } else {
2168         heap()->CreateFillerObjectAt(start, size_in_bytes);
2169       }
2170     }
2171 
2172     // New last in use page was in the middle of the list before
2173     // sorting so it full.
2174     SetTop(new_last_in_use->AllocationTop());
2175 
2176     ASSERT(AllocationTopPage() == new_last_in_use);
2177     ASSERT(AllocationTopPage()->WasInUseBeforeMC());
2178   }
2179 
2180   PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
2181   while (pages_in_use_iterator.has_next()) {
2182     Page* p = pages_in_use_iterator.next();
2183     if (!p->WasInUseBeforeMC()) {
2184       // Empty page is in the middle of a sequence of used pages.
2185       // Allocate it as a whole and deallocate immediately.
2186       int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
2187                                            p->ObjectAreaStart());
2188 
2189       p->SetAllocationWatermark(p->ObjectAreaStart());
2190       Address start = p->ObjectAreaStart();
2191       if (deallocate_blocks) {
2192         accounting_stats_.AllocateBytes(size_in_bytes);
2193         DeallocateBlock(start, size_in_bytes, add_to_freelist);
2194       } else {
2195         heap()->CreateFillerObjectAt(start, size_in_bytes);
2196       }
2197     }
2198   }
2199 
2200   page_list_is_chunk_ordered_ = true;
2201 }
2202 
2203 
PrepareForMarkCompact(bool will_compact)2204 void PagedSpace::PrepareForMarkCompact(bool will_compact) {
2205   if (will_compact) {
2206     RelinkPageListInChunkOrder(false);
2207   }
2208 }
2209 
2210 
ReserveSpace(int bytes)2211 bool PagedSpace::ReserveSpace(int bytes) {
2212   Address limit = allocation_info_.limit;
2213   Address top = allocation_info_.top;
2214   if (limit - top >= bytes) return true;
2215 
2216   // There wasn't enough space in the current page.  Lets put the rest
2217   // of the page on the free list and start a fresh page.
2218   PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
2219 
2220   Page* reserved_page = TopPageOf(allocation_info_);
2221   int bytes_left_to_reserve = bytes;
2222   while (bytes_left_to_reserve > 0) {
2223     if (!reserved_page->next_page()->is_valid()) {
2224       if (heap()->OldGenerationAllocationLimitReached()) return false;
2225       Expand(reserved_page);
2226     }
2227     bytes_left_to_reserve -= Page::kPageSize;
2228     reserved_page = reserved_page->next_page();
2229     if (!reserved_page->is_valid()) return false;
2230   }
2231   ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
2232   TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
2233   SetAllocationInfo(&allocation_info_,
2234                     TopPageOf(allocation_info_)->next_page());
2235   return true;
2236 }
2237 
2238 
2239 // You have to call this last, since the implementation from PagedSpace
2240 // doesn't know that memory was 'promised' to large object space.
ReserveSpace(int bytes)2241 bool LargeObjectSpace::ReserveSpace(int bytes) {
2242   return heap()->OldGenerationSpaceAvailable() >= bytes;
2243 }
2244 
2245 
2246 // Slow case for normal allocation.  Try in order: (1) allocate in the next
2247 // page in the space, (2) allocate off the space's free list, (3) expand the
2248 // space, (4) fail.
SlowAllocateRaw(int size_in_bytes)2249 HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
2250   // Linear allocation in this space has failed.  If there is another page
2251   // in the space, move to that page and allocate there.  This allocation
2252   // should succeed (size_in_bytes should not be greater than a page's
2253   // object area size).
2254   Page* current_page = TopPageOf(allocation_info_);
2255   if (current_page->next_page()->is_valid()) {
2256     return AllocateInNextPage(current_page, size_in_bytes);
2257   }
2258 
2259   // There is no next page in this space.  Try free list allocation unless that
2260   // is currently forbidden.
2261   if (!heap()->linear_allocation()) {
2262     int wasted_bytes;
2263     Object* result;
2264     MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes);
2265     accounting_stats_.WasteBytes(wasted_bytes);
2266     if (maybe->ToObject(&result)) {
2267       accounting_stats_.AllocateBytes(size_in_bytes);
2268 
2269       HeapObject* obj = HeapObject::cast(result);
2270       Page* p = Page::FromAddress(obj->address());
2271 
2272       if (obj->address() >= p->AllocationWatermark()) {
2273         // There should be no hole between the allocation watermark
2274         // and allocated object address.
2275         // Memory above the allocation watermark was not swept and
2276         // might contain garbage pointers to new space.
2277         ASSERT(obj->address() == p->AllocationWatermark());
2278         p->SetAllocationWatermark(obj->address() + size_in_bytes);
2279       }
2280 
2281       return obj;
2282     }
2283   }
2284 
2285   // Free list allocation failed and there is no next page.  Fail if we have
2286   // hit the old generation size limit that should cause a garbage
2287   // collection.
2288   if (!heap()->always_allocate() &&
2289       heap()->OldGenerationAllocationLimitReached()) {
2290     return NULL;
2291   }
2292 
2293   // Try to expand the space and allocate in the new next page.
2294   ASSERT(!current_page->next_page()->is_valid());
2295   if (Expand(current_page)) {
2296     return AllocateInNextPage(current_page, size_in_bytes);
2297   }
2298 
2299   // Finally, fail.
2300   return NULL;
2301 }
2302 
2303 
PutRestOfCurrentPageOnFreeList(Page * current_page)2304 void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
2305   current_page->SetAllocationWatermark(allocation_info_.top);
2306   int free_size =
2307       static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
2308   if (free_size > 0) {
2309     int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
2310     accounting_stats_.WasteBytes(wasted_bytes);
2311   }
2312 }
2313 
2314 
PutRestOfCurrentPageOnFreeList(Page * current_page)2315 void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
2316   current_page->SetAllocationWatermark(allocation_info_.top);
2317   int free_size =
2318       static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
2319   // In the fixed space free list all the free list items have the right size.
2320   // We use up the rest of the page while preserving this invariant.
2321   while (free_size >= object_size_in_bytes_) {
2322     free_list_.Free(allocation_info_.top);
2323     allocation_info_.top += object_size_in_bytes_;
2324     free_size -= object_size_in_bytes_;
2325     accounting_stats_.WasteBytes(object_size_in_bytes_);
2326   }
2327 }
2328 
2329 
2330 // Add the block at the top of the page to the space's free list, set the
2331 // allocation info to the next page (assumed to be one), and allocate
2332 // linearly there.
AllocateInNextPage(Page * current_page,int size_in_bytes)2333 HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
2334                                          int size_in_bytes) {
2335   ASSERT(current_page->next_page()->is_valid());
2336   Page* next_page = current_page->next_page();
2337   next_page->ClearGCFields();
2338   PutRestOfCurrentPageOnFreeList(current_page);
2339   SetAllocationInfo(&allocation_info_, next_page);
2340   return AllocateLinearly(&allocation_info_, size_in_bytes);
2341 }
2342 
2343 
DeallocateBlock(Address start,int size_in_bytes,bool add_to_freelist)2344 void OldSpace::DeallocateBlock(Address start,
2345                                  int size_in_bytes,
2346                                  bool add_to_freelist) {
2347   Free(start, size_in_bytes, add_to_freelist);
2348 }
2349 
2350 
2351 #ifdef DEBUG
ReportCodeStatistics()2352 void PagedSpace::ReportCodeStatistics() {
2353   Isolate* isolate = Isolate::Current();
2354   CommentStatistic* comments_statistics =
2355       isolate->paged_space_comments_statistics();
2356   ReportCodeKindStatistics();
2357   PrintF("Code comment statistics (\"   [ comment-txt   :    size/   "
2358          "count  (average)\"):\n");
2359   for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2360     const CommentStatistic& cs = comments_statistics[i];
2361     if (cs.size > 0) {
2362       PrintF("   %-30s: %10d/%6d     (%d)\n", cs.comment, cs.size, cs.count,
2363              cs.size/cs.count);
2364     }
2365   }
2366   PrintF("\n");
2367 }
2368 
2369 
ResetCodeStatistics()2370 void PagedSpace::ResetCodeStatistics() {
2371   Isolate* isolate = Isolate::Current();
2372   CommentStatistic* comments_statistics =
2373       isolate->paged_space_comments_statistics();
2374   ClearCodeKindStatistics();
2375   for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2376     comments_statistics[i].Clear();
2377   }
2378   comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2379   comments_statistics[CommentStatistic::kMaxComments].size = 0;
2380   comments_statistics[CommentStatistic::kMaxComments].count = 0;
2381 }
2382 
2383 
2384 // Adds comment to 'comment_statistics' table. Performance OK as long as
2385 // 'kMaxComments' is small
EnterComment(Isolate * isolate,const char * comment,int delta)2386 static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2387   CommentStatistic* comments_statistics =
2388       isolate->paged_space_comments_statistics();
2389   // Do not count empty comments
2390   if (delta <= 0) return;
2391   CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2392   // Search for a free or matching entry in 'comments_statistics': 'cs'
2393   // points to result.
2394   for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2395     if (comments_statistics[i].comment == NULL) {
2396       cs = &comments_statistics[i];
2397       cs->comment = comment;
2398       break;
2399     } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2400       cs = &comments_statistics[i];
2401       break;
2402     }
2403   }
2404   // Update entry for 'comment'
2405   cs->size += delta;
2406   cs->count += 1;
2407 }
2408 
2409 
2410 // Call for each nested comment start (start marked with '[ xxx', end marked
2411 // with ']'.  RelocIterator 'it' must point to a comment reloc info.
CollectCommentStatistics(Isolate * isolate,RelocIterator * it)2412 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2413   ASSERT(!it->done());
2414   ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2415   const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2416   if (tmp[0] != '[') {
2417     // Not a nested comment; skip
2418     return;
2419   }
2420 
2421   // Search for end of nested comment or a new nested comment
2422   const char* const comment_txt =
2423       reinterpret_cast<const char*>(it->rinfo()->data());
2424   const byte* prev_pc = it->rinfo()->pc();
2425   int flat_delta = 0;
2426   it->next();
2427   while (true) {
2428     // All nested comments must be terminated properly, and therefore exit
2429     // from loop.
2430     ASSERT(!it->done());
2431     if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2432       const char* const txt =
2433           reinterpret_cast<const char*>(it->rinfo()->data());
2434       flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2435       if (txt[0] == ']') break;  // End of nested  comment
2436       // A new comment
2437       CollectCommentStatistics(isolate, it);
2438       // Skip code that was covered with previous comment
2439       prev_pc = it->rinfo()->pc();
2440     }
2441     it->next();
2442   }
2443   EnterComment(isolate, comment_txt, flat_delta);
2444 }
2445 
2446 
2447 // Collects code size statistics:
2448 // - by code kind
2449 // - by code comment
CollectCodeStatistics()2450 void PagedSpace::CollectCodeStatistics() {
2451   Isolate* isolate = heap()->isolate();
2452   HeapObjectIterator obj_it(this);
2453   for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
2454     if (obj->IsCode()) {
2455       Code* code = Code::cast(obj);
2456       isolate->code_kind_statistics()[code->kind()] += code->Size();
2457       RelocIterator it(code);
2458       int delta = 0;
2459       const byte* prev_pc = code->instruction_start();
2460       while (!it.done()) {
2461         if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2462           delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2463           CollectCommentStatistics(isolate, &it);
2464           prev_pc = it.rinfo()->pc();
2465         }
2466         it.next();
2467       }
2468 
2469       ASSERT(code->instruction_start() <= prev_pc &&
2470              prev_pc <= code->instruction_end());
2471       delta += static_cast<int>(code->instruction_end() - prev_pc);
2472       EnterComment(isolate, "NoComment", delta);
2473     }
2474   }
2475 }
2476 
2477 
ReportStatistics()2478 void OldSpace::ReportStatistics() {
2479   int pct = static_cast<int>(Available() * 100 / Capacity());
2480   PrintF("  capacity: %" V8_PTR_PREFIX "d"
2481              ", waste: %" V8_PTR_PREFIX "d"
2482              ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2483          Capacity(), Waste(), Available(), pct);
2484 
2485   ClearHistograms();
2486   HeapObjectIterator obj_it(this);
2487   for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2488     CollectHistogramInfo(obj);
2489   ReportHistogram(true);
2490 }
2491 #endif
2492 
2493 // -----------------------------------------------------------------------------
2494 // FixedSpace implementation
2495 
PrepareForMarkCompact(bool will_compact)2496 void FixedSpace::PrepareForMarkCompact(bool will_compact) {
2497   // Call prepare of the super class.
2498   PagedSpace::PrepareForMarkCompact(will_compact);
2499 
2500   if (will_compact) {
2501     // Reset relocation info.
2502     MCResetRelocationInfo();
2503 
2504     // During a compacting collection, everything in the space is considered
2505     // 'available' (set by the call to MCResetRelocationInfo) and we will
2506     // rediscover live and wasted bytes during the collection.
2507     ASSERT(Available() == Capacity());
2508   } else {
2509     // During a non-compacting collection, everything below the linear
2510     // allocation pointer except wasted top-of-page blocks is considered
2511     // allocated and we will rediscover available bytes during the
2512     // collection.
2513     accounting_stats_.AllocateBytes(free_list_.available());
2514   }
2515 
2516   // Clear the free list before a full GC---it will be rebuilt afterward.
2517   free_list_.Reset();
2518 }
2519 
2520 
MCCommitRelocationInfo()2521 void FixedSpace::MCCommitRelocationInfo() {
2522   // Update fast allocation info.
2523   allocation_info_.top = mc_forwarding_info_.top;
2524   allocation_info_.limit = mc_forwarding_info_.limit;
2525   ASSERT(allocation_info_.VerifyPagedAllocation());
2526 
2527   // The space is compacted and we haven't yet wasted any space.
2528   ASSERT(Waste() == 0);
2529 
2530   // Update allocation_top of each page in use and compute waste.
2531   int computed_size = 0;
2532   PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2533   while (it.has_next()) {
2534     Page* page = it.next();
2535     Address page_top = page->AllocationTop();
2536     computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
2537     if (it.has_next()) {
2538       accounting_stats_.WasteBytes(
2539           static_cast<int>(page->ObjectAreaEnd() - page_top));
2540       page->SetAllocationWatermark(page_top);
2541     }
2542   }
2543 
2544   // Make sure the computed size - based on the used portion of the
2545   // pages in use - matches the size we adjust during allocation.
2546   ASSERT(computed_size == Size());
2547 }
2548 
2549 
2550 // Slow case for normal allocation. Try in order: (1) allocate in the next
2551 // page in the space, (2) allocate off the space's free list, (3) expand the
2552 // space, (4) fail.
SlowAllocateRaw(int size_in_bytes)2553 HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
2554   ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2555   // Linear allocation in this space has failed.  If there is another page
2556   // in the space, move to that page and allocate there.  This allocation
2557   // should succeed.
2558   Page* current_page = TopPageOf(allocation_info_);
2559   if (current_page->next_page()->is_valid()) {
2560     return AllocateInNextPage(current_page, size_in_bytes);
2561   }
2562 
2563   // There is no next page in this space.  Try free list allocation unless
2564   // that is currently forbidden.  The fixed space free list implicitly assumes
2565   // that all free blocks are of the fixed size.
2566   if (!heap()->linear_allocation()) {
2567     Object* result;
2568     MaybeObject* maybe = free_list_.Allocate();
2569     if (maybe->ToObject(&result)) {
2570       accounting_stats_.AllocateBytes(size_in_bytes);
2571       HeapObject* obj = HeapObject::cast(result);
2572       Page* p = Page::FromAddress(obj->address());
2573 
2574       if (obj->address() >= p->AllocationWatermark()) {
2575         // There should be no hole between the allocation watermark
2576         // and allocated object address.
2577         // Memory above the allocation watermark was not swept and
2578         // might contain garbage pointers to new space.
2579         ASSERT(obj->address() == p->AllocationWatermark());
2580         p->SetAllocationWatermark(obj->address() + size_in_bytes);
2581       }
2582 
2583       return obj;
2584     }
2585   }
2586 
2587   // Free list allocation failed and there is no next page.  Fail if we have
2588   // hit the old generation size limit that should cause a garbage
2589   // collection.
2590   if (!heap()->always_allocate() &&
2591       heap()->OldGenerationAllocationLimitReached()) {
2592     return NULL;
2593   }
2594 
2595   // Try to expand the space and allocate in the new next page.
2596   ASSERT(!current_page->next_page()->is_valid());
2597   if (Expand(current_page)) {
2598     return AllocateInNextPage(current_page, size_in_bytes);
2599   }
2600 
2601   // Finally, fail.
2602   return NULL;
2603 }
2604 
2605 
2606 // Move to the next page (there is assumed to be one) and allocate there.
2607 // The top of page block is always wasted, because it is too small to hold a
2608 // map.
AllocateInNextPage(Page * current_page,int size_in_bytes)2609 HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
2610                                            int size_in_bytes) {
2611   ASSERT(current_page->next_page()->is_valid());
2612   ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
2613   ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2614   Page* next_page = current_page->next_page();
2615   next_page->ClearGCFields();
2616   current_page->SetAllocationWatermark(allocation_info_.top);
2617   accounting_stats_.WasteBytes(page_extra_);
2618   SetAllocationInfo(&allocation_info_, next_page);
2619   return AllocateLinearly(&allocation_info_, size_in_bytes);
2620 }
2621 
2622 
DeallocateBlock(Address start,int size_in_bytes,bool add_to_freelist)2623 void FixedSpace::DeallocateBlock(Address start,
2624                                  int size_in_bytes,
2625                                  bool add_to_freelist) {
2626   // Free-list elements in fixed space are assumed to have a fixed size.
2627   // We break the free block into chunks and add them to the free list
2628   // individually.
2629   int size = object_size_in_bytes();
2630   ASSERT(size_in_bytes % size == 0);
2631   Address end = start + size_in_bytes;
2632   for (Address a = start; a < end; a += size) {
2633     Free(a, add_to_freelist);
2634   }
2635 }
2636 
2637 
2638 #ifdef DEBUG
ReportStatistics()2639 void FixedSpace::ReportStatistics() {
2640   int pct = static_cast<int>(Available() * 100 / Capacity());
2641   PrintF("  capacity: %" V8_PTR_PREFIX "d"
2642              ", waste: %" V8_PTR_PREFIX "d"
2643              ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2644          Capacity(), Waste(), Available(), pct);
2645 
2646   ClearHistograms();
2647   HeapObjectIterator obj_it(this);
2648   for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2649     CollectHistogramInfo(obj);
2650   ReportHistogram(false);
2651 }
2652 #endif
2653 
2654 
2655 // -----------------------------------------------------------------------------
2656 // MapSpace implementation
2657 
PrepareForMarkCompact(bool will_compact)2658 void MapSpace::PrepareForMarkCompact(bool will_compact) {
2659   // Call prepare of the super class.
2660   FixedSpace::PrepareForMarkCompact(will_compact);
2661 
2662   if (will_compact) {
2663     // Initialize map index entry.
2664     int page_count = 0;
2665     PageIterator it(this, PageIterator::ALL_PAGES);
2666     while (it.has_next()) {
2667       ASSERT_MAP_PAGE_INDEX(page_count);
2668 
2669       Page* p = it.next();
2670       ASSERT(p->mc_page_index == page_count);
2671 
2672       page_addresses_[page_count++] = p->address();
2673     }
2674   }
2675 }
2676 
2677 
2678 #ifdef DEBUG
VerifyObject(HeapObject * object)2679 void MapSpace::VerifyObject(HeapObject* object) {
2680   // The object should be a map or a free-list node.
2681   ASSERT(object->IsMap() || object->IsByteArray());
2682 }
2683 #endif
2684 
2685 
2686 // -----------------------------------------------------------------------------
2687 // GlobalPropertyCellSpace implementation
2688 
2689 #ifdef DEBUG
VerifyObject(HeapObject * object)2690 void CellSpace::VerifyObject(HeapObject* object) {
2691   // The object should be a global object property cell or a free-list node.
2692   ASSERT(object->IsJSGlobalPropertyCell() ||
2693          object->map() == heap()->two_pointer_filler_map());
2694 }
2695 #endif
2696 
2697 
2698 // -----------------------------------------------------------------------------
2699 // LargeObjectIterator
2700 
LargeObjectIterator(LargeObjectSpace * space)2701 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2702   current_ = space->first_chunk_;
2703   size_func_ = NULL;
2704 }
2705 
2706 
LargeObjectIterator(LargeObjectSpace * space,HeapObjectCallback size_func)2707 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2708                                          HeapObjectCallback size_func) {
2709   current_ = space->first_chunk_;
2710   size_func_ = size_func;
2711 }
2712 
2713 
next()2714 HeapObject* LargeObjectIterator::next() {
2715   if (current_ == NULL) return NULL;
2716 
2717   HeapObject* object = current_->GetObject();
2718   current_ = current_->next();
2719   return object;
2720 }
2721 
2722 
2723 // -----------------------------------------------------------------------------
2724 // LargeObjectChunk
2725 
New(int size_in_bytes,Executability executable)2726 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
2727                                         Executability executable) {
2728   size_t requested = ChunkSizeFor(size_in_bytes);
2729   size_t size;
2730   Isolate* isolate = Isolate::Current();
2731   void* mem = isolate->memory_allocator()->AllocateRawMemory(
2732       requested, &size, executable);
2733   if (mem == NULL) return NULL;
2734 
2735   // The start of the chunk may be overlayed with a page so we have to
2736   // make sure that the page flags fit in the size field.
2737   ASSERT((size & Page::kPageFlagMask) == 0);
2738 
2739   LOG(isolate, NewEvent("LargeObjectChunk", mem, size));
2740   if (size < requested) {
2741     isolate->memory_allocator()->FreeRawMemory(
2742         mem, size, executable);
2743     LOG(isolate, DeleteEvent("LargeObjectChunk", mem));
2744     return NULL;
2745   }
2746 
2747   ObjectSpace space = (executable == EXECUTABLE)
2748       ? kObjectSpaceCodeSpace
2749       : kObjectSpaceLoSpace;
2750   isolate->memory_allocator()->PerformAllocationCallback(
2751       space, kAllocationActionAllocate, size);
2752 
2753   LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
2754   chunk->size_ = size;
2755   Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2756   page->heap_ = isolate->heap();
2757   return chunk;
2758 }
2759 
2760 
ChunkSizeFor(int size_in_bytes)2761 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
2762   int os_alignment = static_cast<int>(OS::AllocateAlignment());
2763   if (os_alignment < Page::kPageSize) {
2764     size_in_bytes += (Page::kPageSize - os_alignment);
2765   }
2766   return size_in_bytes + Page::kObjectStartOffset;
2767 }
2768 
2769 // -----------------------------------------------------------------------------
2770 // LargeObjectSpace
2771 
LargeObjectSpace(Heap * heap,AllocationSpace id)2772 LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
2773     : Space(heap, id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
2774       first_chunk_(NULL),
2775       size_(0),
2776       page_count_(0),
2777       objects_size_(0) {}
2778 
2779 
Setup()2780 bool LargeObjectSpace::Setup() {
2781   first_chunk_ = NULL;
2782   size_ = 0;
2783   page_count_ = 0;
2784   objects_size_ = 0;
2785   return true;
2786 }
2787 
2788 
TearDown()2789 void LargeObjectSpace::TearDown() {
2790   while (first_chunk_ != NULL) {
2791     LargeObjectChunk* chunk = first_chunk_;
2792     first_chunk_ = first_chunk_->next();
2793     LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk->address()));
2794     Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2795     Executability executable =
2796         page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
2797     ObjectSpace space = kObjectSpaceLoSpace;
2798     if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
2799     size_t size = chunk->size();
2800     heap()->isolate()->memory_allocator()->FreeRawMemory(chunk->address(),
2801                                                          size,
2802                                                          executable);
2803     heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2804         space, kAllocationActionFree, size);
2805   }
2806 
2807   size_ = 0;
2808   page_count_ = 0;
2809   objects_size_ = 0;
2810 }
2811 
2812 
2813 #ifdef ENABLE_HEAP_PROTECTION
2814 
Protect()2815 void LargeObjectSpace::Protect() {
2816   LargeObjectChunk* chunk = first_chunk_;
2817   while (chunk != NULL) {
2818     heap()->isolate()->memory_allocator()->Protect(chunk->address(),
2819                                                    chunk->size());
2820     chunk = chunk->next();
2821   }
2822 }
2823 
2824 
Unprotect()2825 void LargeObjectSpace::Unprotect() {
2826   LargeObjectChunk* chunk = first_chunk_;
2827   while (chunk != NULL) {
2828     bool is_code = chunk->GetObject()->IsCode();
2829     heap()->isolate()->memory_allocator()->Unprotect(chunk->address(),
2830         chunk->size(), is_code ? EXECUTABLE : NOT_EXECUTABLE);
2831     chunk = chunk->next();
2832   }
2833 }
2834 
2835 #endif
2836 
2837 
AllocateRawInternal(int requested_size,int object_size,Executability executable)2838 MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
2839                                                    int object_size,
2840                                                    Executability executable) {
2841   ASSERT(0 < object_size && object_size <= requested_size);
2842 
2843   // Check if we want to force a GC before growing the old space further.
2844   // If so, fail the allocation.
2845   if (!heap()->always_allocate() &&
2846       heap()->OldGenerationAllocationLimitReached()) {
2847     return Failure::RetryAfterGC(identity());
2848   }
2849 
2850   LargeObjectChunk* chunk = LargeObjectChunk::New(requested_size, executable);
2851   if (chunk == NULL) {
2852     return Failure::RetryAfterGC(identity());
2853   }
2854 
2855   size_ += static_cast<int>(chunk->size());
2856   objects_size_ += requested_size;
2857   page_count_++;
2858   chunk->set_next(first_chunk_);
2859   first_chunk_ = chunk;
2860 
2861   // Initialize page header.
2862   Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2863   Address object_address = page->ObjectAreaStart();
2864 
2865   // Clear the low order bit of the second word in the page to flag it as a
2866   // large object page.  If the chunk_size happened to be written there, its
2867   // low order bit should already be clear.
2868   page->SetIsLargeObjectPage(true);
2869   page->SetIsPageExecutable(executable);
2870   page->SetRegionMarks(Page::kAllRegionsCleanMarks);
2871   return HeapObject::FromAddress(object_address);
2872 }
2873 
2874 
AllocateRawCode(int size_in_bytes)2875 MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
2876   ASSERT(0 < size_in_bytes);
2877   return AllocateRawInternal(size_in_bytes,
2878                              size_in_bytes,
2879                              EXECUTABLE);
2880 }
2881 
2882 
AllocateRawFixedArray(int size_in_bytes)2883 MaybeObject* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
2884   ASSERT(0 < size_in_bytes);
2885   return AllocateRawInternal(size_in_bytes,
2886                              size_in_bytes,
2887                              NOT_EXECUTABLE);
2888 }
2889 
2890 
AllocateRaw(int size_in_bytes)2891 MaybeObject* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
2892   ASSERT(0 < size_in_bytes);
2893   return AllocateRawInternal(size_in_bytes,
2894                              size_in_bytes,
2895                              NOT_EXECUTABLE);
2896 }
2897 
2898 
2899 // GC support
FindObject(Address a)2900 MaybeObject* LargeObjectSpace::FindObject(Address a) {
2901   for (LargeObjectChunk* chunk = first_chunk_;
2902        chunk != NULL;
2903        chunk = chunk->next()) {
2904     Address chunk_address = chunk->address();
2905     if (chunk_address <= a && a < chunk_address + chunk->size()) {
2906       return chunk->GetObject();
2907     }
2908   }
2909   return Failure::Exception();
2910 }
2911 
2912 
FindChunkContainingPc(Address pc)2913 LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) {
2914   // TODO(853): Change this implementation to only find executable
2915   // chunks and use some kind of hash-based approach to speed it up.
2916   for (LargeObjectChunk* chunk = first_chunk_;
2917        chunk != NULL;
2918        chunk = chunk->next()) {
2919     Address chunk_address = chunk->address();
2920     if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
2921       return chunk;
2922     }
2923   }
2924   return NULL;
2925 }
2926 
2927 
IterateDirtyRegions(ObjectSlotCallback copy_object)2928 void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
2929   LargeObjectIterator it(this);
2930   for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
2931     // We only have code, sequential strings, or fixed arrays in large
2932     // object space, and only fixed arrays can possibly contain pointers to
2933     // the young generation.
2934     if (object->IsFixedArray()) {
2935       Page* page = Page::FromAddress(object->address());
2936       uint32_t marks = page->GetRegionMarks();
2937       uint32_t newmarks = Page::kAllRegionsCleanMarks;
2938 
2939       if (marks != Page::kAllRegionsCleanMarks) {
2940         // For a large page a single dirty mark corresponds to several
2941         // regions (modulo 32). So we treat a large page as a sequence of
2942         // normal pages of size Page::kPageSize having same dirty marks
2943         // and subsequently iterate dirty regions on each of these pages.
2944         Address start = object->address();
2945         Address end = page->ObjectAreaEnd();
2946         Address object_end = start + object->Size();
2947 
2948         // Iterate regions of the first normal page covering object.
2949         uint32_t first_region_number = page->GetRegionNumberForAddress(start);
2950         newmarks |=
2951             heap()->IterateDirtyRegions(marks >> first_region_number,
2952                                         start,
2953                                         end,
2954                                         &Heap::IteratePointersInDirtyRegion,
2955                                         copy_object) << first_region_number;
2956 
2957         start = end;
2958         end = start + Page::kPageSize;
2959         while (end <= object_end) {
2960           // Iterate next 32 regions.
2961           newmarks |=
2962               heap()->IterateDirtyRegions(marks,
2963                                           start,
2964                                           end,
2965                                           &Heap::IteratePointersInDirtyRegion,
2966                                           copy_object);
2967           start = end;
2968           end = start + Page::kPageSize;
2969         }
2970 
2971         if (start != object_end) {
2972           // Iterate the last piece of an object which is less than
2973           // Page::kPageSize.
2974           newmarks |=
2975               heap()->IterateDirtyRegions(marks,
2976                                           start,
2977                                           object_end,
2978                                           &Heap::IteratePointersInDirtyRegion,
2979                                           copy_object);
2980         }
2981 
2982         page->SetRegionMarks(newmarks);
2983       }
2984     }
2985   }
2986 }
2987 
2988 
FreeUnmarkedObjects()2989 void LargeObjectSpace::FreeUnmarkedObjects() {
2990   LargeObjectChunk* previous = NULL;
2991   LargeObjectChunk* current = first_chunk_;
2992   while (current != NULL) {
2993     HeapObject* object = current->GetObject();
2994     if (object->IsMarked()) {
2995       object->ClearMark();
2996       heap()->mark_compact_collector()->tracer()->decrement_marked_count();
2997       previous = current;
2998       current = current->next();
2999     } else {
3000       Page* page = Page::FromAddress(RoundUp(current->address(),
3001                                      Page::kPageSize));
3002       Executability executable =
3003           page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
3004       Address chunk_address = current->address();
3005       size_t chunk_size = current->size();
3006 
3007       // Cut the chunk out from the chunk list.
3008       current = current->next();
3009       if (previous == NULL) {
3010         first_chunk_ = current;
3011       } else {
3012         previous->set_next(current);
3013       }
3014 
3015       // Free the chunk.
3016       heap()->mark_compact_collector()->ReportDeleteIfNeeded(
3017           object, heap()->isolate());
3018       LiveObjectList::ProcessNonLive(object);
3019 
3020       size_ -= static_cast<int>(chunk_size);
3021       objects_size_ -= object->Size();
3022       page_count_--;
3023       ObjectSpace space = kObjectSpaceLoSpace;
3024       if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
3025       heap()->isolate()->memory_allocator()->FreeRawMemory(chunk_address,
3026                                                            chunk_size,
3027                                                            executable);
3028       heap()->isolate()->memory_allocator()->PerformAllocationCallback(
3029           space, kAllocationActionFree, size_);
3030       LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk_address));
3031     }
3032   }
3033 }
3034 
3035 
Contains(HeapObject * object)3036 bool LargeObjectSpace::Contains(HeapObject* object) {
3037   Address address = object->address();
3038   if (heap()->new_space()->Contains(address)) {
3039     return false;
3040   }
3041   Page* page = Page::FromAddress(address);
3042 
3043   SLOW_ASSERT(!page->IsLargeObjectPage()
3044               || !FindObject(address)->IsFailure());
3045 
3046   return page->IsLargeObjectPage();
3047 }
3048 
3049 
3050 #ifdef DEBUG
3051 // We do not assume that the large object iterator works, because it depends
3052 // on the invariants we are checking during verification.
Verify()3053 void LargeObjectSpace::Verify() {
3054   for (LargeObjectChunk* chunk = first_chunk_;
3055        chunk != NULL;
3056        chunk = chunk->next()) {
3057     // Each chunk contains an object that starts at the large object page's
3058     // object area start.
3059     HeapObject* object = chunk->GetObject();
3060     Page* page = Page::FromAddress(object->address());
3061     ASSERT(object->address() == page->ObjectAreaStart());
3062 
3063     // The first word should be a map, and we expect all map pointers to be
3064     // in map space.
3065     Map* map = object->map();
3066     ASSERT(map->IsMap());
3067     ASSERT(heap()->map_space()->Contains(map));
3068 
3069     // We have only code, sequential strings, external strings
3070     // (sequential strings that have been morphed into external
3071     // strings), fixed arrays, and byte arrays in large object space.
3072     ASSERT(object->IsCode() || object->IsSeqString() ||
3073            object->IsExternalString() || object->IsFixedArray() ||
3074            object->IsByteArray());
3075 
3076     // The object itself should look OK.
3077     object->Verify();
3078 
3079     // Byte arrays and strings don't have interior pointers.
3080     if (object->IsCode()) {
3081       VerifyPointersVisitor code_visitor;
3082       object->IterateBody(map->instance_type(),
3083                           object->Size(),
3084                           &code_visitor);
3085     } else if (object->IsFixedArray()) {
3086       // We loop over fixed arrays ourselves, rather then using the visitor,
3087       // because the visitor doesn't support the start/offset iteration
3088       // needed for IsRegionDirty.
3089       FixedArray* array = FixedArray::cast(object);
3090       for (int j = 0; j < array->length(); j++) {
3091         Object* element = array->get(j);
3092         if (element->IsHeapObject()) {
3093           HeapObject* element_object = HeapObject::cast(element);
3094           ASSERT(heap()->Contains(element_object));
3095           ASSERT(element_object->map()->IsMap());
3096           if (heap()->InNewSpace(element_object)) {
3097             Address array_addr = object->address();
3098             Address element_addr = array_addr + FixedArray::kHeaderSize +
3099                 j * kPointerSize;
3100 
3101             ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
3102           }
3103         }
3104       }
3105     }
3106   }
3107 }
3108 
3109 
Print()3110 void LargeObjectSpace::Print() {
3111   LargeObjectIterator it(this);
3112   for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
3113     obj->Print();
3114   }
3115 }
3116 
3117 
ReportStatistics()3118 void LargeObjectSpace::ReportStatistics() {
3119   PrintF("  size: %" V8_PTR_PREFIX "d\n", size_);
3120   int num_objects = 0;
3121   ClearHistograms();
3122   LargeObjectIterator it(this);
3123   for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
3124     num_objects++;
3125     CollectHistogramInfo(obj);
3126   }
3127 
3128   PrintF("  number of objects %d, "
3129          "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
3130   if (num_objects > 0) ReportHistogram(false);
3131 }
3132 
3133 
CollectCodeStatistics()3134 void LargeObjectSpace::CollectCodeStatistics() {
3135   Isolate* isolate = heap()->isolate();
3136   LargeObjectIterator obj_it(this);
3137   for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
3138     if (obj->IsCode()) {
3139       Code* code = Code::cast(obj);
3140       isolate->code_kind_statistics()[code->kind()] += code->Size();
3141     }
3142   }
3143 }
3144 #endif  // DEBUG
3145 
3146 } }  // namespace v8::internal
3147