• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #include "src/base/bits.h"
8 #include "src/base/platform/platform.h"
9 #include "src/full-codegen.h"
10 #include "src/heap/mark-compact.h"
11 #include "src/macro-assembler.h"
12 #include "src/msan.h"
13 
14 namespace v8 {
15 namespace internal {
16 
17 
18 // ----------------------------------------------------------------------------
19 // HeapObjectIterator
20 
HeapObjectIterator(PagedSpace * space)21 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
22   // You can't actually iterate over the anchor page.  It is not a real page,
23   // just an anchor for the double linked page list.  Initialize as if we have
24   // reached the end of the anchor page, then the first iteration will move on
25   // to the first page.
26   Initialize(space, NULL, NULL, kAllPagesInSpace, NULL);
27 }
28 
29 
HeapObjectIterator(PagedSpace * space,HeapObjectCallback size_func)30 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
31                                        HeapObjectCallback size_func) {
32   // You can't actually iterate over the anchor page.  It is not a real page,
33   // just an anchor for the double linked page list.  Initialize the current
34   // address and end as NULL, then the first iteration will move on
35   // to the first page.
36   Initialize(space, NULL, NULL, kAllPagesInSpace, size_func);
37 }
38 
39 
HeapObjectIterator(Page * page,HeapObjectCallback size_func)40 HeapObjectIterator::HeapObjectIterator(Page* page,
41                                        HeapObjectCallback size_func) {
42   Space* owner = page->owner();
43   DCHECK(owner == page->heap()->old_pointer_space() ||
44          owner == page->heap()->old_data_space() ||
45          owner == page->heap()->map_space() ||
46          owner == page->heap()->cell_space() ||
47          owner == page->heap()->property_cell_space() ||
48          owner == page->heap()->code_space());
49   Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
50              page->area_end(), kOnePageOnly, size_func);
51   DCHECK(page->WasSwept() || page->SweepingCompleted());
52 }
53 
54 
Initialize(PagedSpace * space,Address cur,Address end,HeapObjectIterator::PageMode mode,HeapObjectCallback size_f)55 void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
56                                     HeapObjectIterator::PageMode mode,
57                                     HeapObjectCallback size_f) {
58   space_ = space;
59   cur_addr_ = cur;
60   cur_end_ = end;
61   page_mode_ = mode;
62   size_func_ = size_f;
63 }
64 
65 
66 // We have hit the end of the page and should advance to the next block of
67 // objects.  This happens at the end of the page.
AdvanceToNextPage()68 bool HeapObjectIterator::AdvanceToNextPage() {
69   DCHECK(cur_addr_ == cur_end_);
70   if (page_mode_ == kOnePageOnly) return false;
71   Page* cur_page;
72   if (cur_addr_ == NULL) {
73     cur_page = space_->anchor();
74   } else {
75     cur_page = Page::FromAddress(cur_addr_ - 1);
76     DCHECK(cur_addr_ == cur_page->area_end());
77   }
78   cur_page = cur_page->next_page();
79   if (cur_page == space_->anchor()) return false;
80   cur_addr_ = cur_page->area_start();
81   cur_end_ = cur_page->area_end();
82   DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
83   return true;
84 }
85 
86 
87 // -----------------------------------------------------------------------------
88 // CodeRange
89 
90 
CodeRange(Isolate * isolate)91 CodeRange::CodeRange(Isolate* isolate)
92     : isolate_(isolate),
93       code_range_(NULL),
94       free_list_(0),
95       allocation_list_(0),
96       current_allocation_block_index_(0) {}
97 
98 
SetUp(size_t requested)99 bool CodeRange::SetUp(size_t requested) {
100   DCHECK(code_range_ == NULL);
101 
102   if (requested == 0) {
103     // When a target requires the code range feature, we put all code objects
104     // in a kMaximalCodeRangeSize range of virtual address space, so that
105     // they can call each other with near calls.
106     if (kRequiresCodeRange) {
107       requested = kMaximalCodeRangeSize;
108     } else {
109       return true;
110     }
111   }
112 
113   DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
114   code_range_ = new base::VirtualMemory(requested);
115   CHECK(code_range_ != NULL);
116   if (!code_range_->IsReserved()) {
117     delete code_range_;
118     code_range_ = NULL;
119     return false;
120   }
121 
122   // We are sure that we have mapped a block of requested addresses.
123   DCHECK(code_range_->size() == requested);
124   LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
125   Address base = reinterpret_cast<Address>(code_range_->address());
126   Address aligned_base =
127       RoundUp(reinterpret_cast<Address>(code_range_->address()),
128               MemoryChunk::kAlignment);
129   size_t size = code_range_->size() - (aligned_base - base);
130   allocation_list_.Add(FreeBlock(aligned_base, size));
131   current_allocation_block_index_ = 0;
132   return true;
133 }
134 
135 
CompareFreeBlockAddress(const FreeBlock * left,const FreeBlock * right)136 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
137                                        const FreeBlock* right) {
138   // The entire point of CodeRange is that the difference between two
139   // addresses in the range can be represented as a signed 32-bit int,
140   // so the cast is semantically correct.
141   return static_cast<int>(left->start - right->start);
142 }
143 
144 
GetNextAllocationBlock(size_t requested)145 bool CodeRange::GetNextAllocationBlock(size_t requested) {
146   for (current_allocation_block_index_++;
147        current_allocation_block_index_ < allocation_list_.length();
148        current_allocation_block_index_++) {
149     if (requested <= allocation_list_[current_allocation_block_index_].size) {
150       return true;  // Found a large enough allocation block.
151     }
152   }
153 
154   // Sort and merge the free blocks on the free list and the allocation list.
155   free_list_.AddAll(allocation_list_);
156   allocation_list_.Clear();
157   free_list_.Sort(&CompareFreeBlockAddress);
158   for (int i = 0; i < free_list_.length();) {
159     FreeBlock merged = free_list_[i];
160     i++;
161     // Add adjacent free blocks to the current merged block.
162     while (i < free_list_.length() &&
163            free_list_[i].start == merged.start + merged.size) {
164       merged.size += free_list_[i].size;
165       i++;
166     }
167     if (merged.size > 0) {
168       allocation_list_.Add(merged);
169     }
170   }
171   free_list_.Clear();
172 
173   for (current_allocation_block_index_ = 0;
174        current_allocation_block_index_ < allocation_list_.length();
175        current_allocation_block_index_++) {
176     if (requested <= allocation_list_[current_allocation_block_index_].size) {
177       return true;  // Found a large enough allocation block.
178     }
179   }
180   current_allocation_block_index_ = 0;
181   // Code range is full or too fragmented.
182   return false;
183 }
184 
185 
AllocateRawMemory(const size_t requested_size,const size_t commit_size,size_t * allocated)186 Address CodeRange::AllocateRawMemory(const size_t requested_size,
187                                      const size_t commit_size,
188                                      size_t* allocated) {
189   DCHECK(commit_size <= requested_size);
190   DCHECK(allocation_list_.length() == 0 ||
191          current_allocation_block_index_ < allocation_list_.length());
192   if (allocation_list_.length() == 0 ||
193       requested_size > allocation_list_[current_allocation_block_index_].size) {
194     // Find an allocation block large enough.
195     if (!GetNextAllocationBlock(requested_size)) return NULL;
196   }
197   // Commit the requested memory at the start of the current allocation block.
198   size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
199   FreeBlock current = allocation_list_[current_allocation_block_index_];
200   if (aligned_requested >= (current.size - Page::kPageSize)) {
201     // Don't leave a small free block, useless for a large object or chunk.
202     *allocated = current.size;
203   } else {
204     *allocated = aligned_requested;
205   }
206   DCHECK(*allocated <= current.size);
207   DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
208   if (!isolate_->memory_allocator()->CommitExecutableMemory(
209           code_range_, current.start, commit_size, *allocated)) {
210     *allocated = 0;
211     return NULL;
212   }
213   allocation_list_[current_allocation_block_index_].start += *allocated;
214   allocation_list_[current_allocation_block_index_].size -= *allocated;
215   if (*allocated == current.size) {
216     // This block is used up, get the next one.
217     GetNextAllocationBlock(0);
218   }
219   return current.start;
220 }
221 
222 
CommitRawMemory(Address start,size_t length)223 bool CodeRange::CommitRawMemory(Address start, size_t length) {
224   return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
225 }
226 
227 
UncommitRawMemory(Address start,size_t length)228 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
229   return code_range_->Uncommit(start, length);
230 }
231 
232 
FreeRawMemory(Address address,size_t length)233 void CodeRange::FreeRawMemory(Address address, size_t length) {
234   DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
235   free_list_.Add(FreeBlock(address, length));
236   code_range_->Uncommit(address, length);
237 }
238 
239 
TearDown()240 void CodeRange::TearDown() {
241   delete code_range_;  // Frees all memory in the virtual memory range.
242   code_range_ = NULL;
243   free_list_.Free();
244   allocation_list_.Free();
245 }
246 
247 
248 // -----------------------------------------------------------------------------
249 // MemoryAllocator
250 //
251 
MemoryAllocator(Isolate * isolate)252 MemoryAllocator::MemoryAllocator(Isolate* isolate)
253     : isolate_(isolate),
254       capacity_(0),
255       capacity_executable_(0),
256       size_(0),
257       size_executable_(0),
258       lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
259       highest_ever_allocated_(reinterpret_cast<void*>(0)) {}
260 
261 
SetUp(intptr_t capacity,intptr_t capacity_executable)262 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
263   capacity_ = RoundUp(capacity, Page::kPageSize);
264   capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
265   DCHECK_GE(capacity_, capacity_executable_);
266 
267   size_ = 0;
268   size_executable_ = 0;
269 
270   return true;
271 }
272 
273 
TearDown()274 void MemoryAllocator::TearDown() {
275   // Check that spaces were torn down before MemoryAllocator.
276   DCHECK(size_ == 0);
277   // TODO(gc) this will be true again when we fix FreeMemory.
278   // DCHECK(size_executable_ == 0);
279   capacity_ = 0;
280   capacity_executable_ = 0;
281 }
282 
283 
CommitMemory(Address base,size_t size,Executability executable)284 bool MemoryAllocator::CommitMemory(Address base, size_t size,
285                                    Executability executable) {
286   if (!base::VirtualMemory::CommitRegion(base, size,
287                                          executable == EXECUTABLE)) {
288     return false;
289   }
290   UpdateAllocatedSpaceLimits(base, base + size);
291   return true;
292 }
293 
294 
FreeMemory(base::VirtualMemory * reservation,Executability executable)295 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
296                                  Executability executable) {
297   // TODO(gc) make code_range part of memory allocator?
298   DCHECK(reservation->IsReserved());
299   size_t size = reservation->size();
300   DCHECK(size_ >= size);
301   size_ -= size;
302 
303   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
304 
305   if (executable == EXECUTABLE) {
306     DCHECK(size_executable_ >= size);
307     size_executable_ -= size;
308   }
309   // Code which is part of the code-range does not have its own VirtualMemory.
310   DCHECK(isolate_->code_range() == NULL ||
311          !isolate_->code_range()->contains(
312              static_cast<Address>(reservation->address())));
313   DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
314          !isolate_->code_range()->valid());
315   reservation->Release();
316 }
317 
318 
FreeMemory(Address base,size_t size,Executability executable)319 void MemoryAllocator::FreeMemory(Address base, size_t size,
320                                  Executability executable) {
321   // TODO(gc) make code_range part of memory allocator?
322   DCHECK(size_ >= size);
323   size_ -= size;
324 
325   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
326 
327   if (executable == EXECUTABLE) {
328     DCHECK(size_executable_ >= size);
329     size_executable_ -= size;
330   }
331   if (isolate_->code_range() != NULL &&
332       isolate_->code_range()->contains(static_cast<Address>(base))) {
333     DCHECK(executable == EXECUTABLE);
334     isolate_->code_range()->FreeRawMemory(base, size);
335   } else {
336     DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
337            !isolate_->code_range()->valid());
338     bool result = base::VirtualMemory::ReleaseRegion(base, size);
339     USE(result);
340     DCHECK(result);
341   }
342 }
343 
344 
ReserveAlignedMemory(size_t size,size_t alignment,base::VirtualMemory * controller)345 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
346                                               base::VirtualMemory* controller) {
347   base::VirtualMemory reservation(size, alignment);
348 
349   if (!reservation.IsReserved()) return NULL;
350   size_ += reservation.size();
351   Address base =
352       RoundUp(static_cast<Address>(reservation.address()), alignment);
353   controller->TakeControl(&reservation);
354   return base;
355 }
356 
357 
AllocateAlignedMemory(size_t reserve_size,size_t commit_size,size_t alignment,Executability executable,base::VirtualMemory * controller)358 Address MemoryAllocator::AllocateAlignedMemory(
359     size_t reserve_size, size_t commit_size, size_t alignment,
360     Executability executable, base::VirtualMemory* controller) {
361   DCHECK(commit_size <= reserve_size);
362   base::VirtualMemory reservation;
363   Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
364   if (base == NULL) return NULL;
365 
366   if (executable == EXECUTABLE) {
367     if (!CommitExecutableMemory(&reservation, base, commit_size,
368                                 reserve_size)) {
369       base = NULL;
370     }
371   } else {
372     if (reservation.Commit(base, commit_size, false)) {
373       UpdateAllocatedSpaceLimits(base, base + commit_size);
374     } else {
375       base = NULL;
376     }
377   }
378 
379   if (base == NULL) {
380     // Failed to commit the body. Release the mapping and any partially
381     // commited regions inside it.
382     reservation.Release();
383     return NULL;
384   }
385 
386   controller->TakeControl(&reservation);
387   return base;
388 }
389 
390 
InitializeAsAnchor(PagedSpace * owner)391 void Page::InitializeAsAnchor(PagedSpace* owner) {
392   set_owner(owner);
393   set_prev_page(this);
394   set_next_page(this);
395 }
396 
397 
Initialize(Heap * heap,Address start,SemiSpace * semi_space)398 NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start,
399                                        SemiSpace* semi_space) {
400   Address area_start = start + NewSpacePage::kObjectStartOffset;
401   Address area_end = start + Page::kPageSize;
402 
403   MemoryChunk* chunk =
404       MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
405                               area_end, NOT_EXECUTABLE, semi_space);
406   chunk->set_next_chunk(NULL);
407   chunk->set_prev_chunk(NULL);
408   chunk->initialize_scan_on_scavenge(true);
409   bool in_to_space = (semi_space->id() != kFromSpace);
410   chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
411                              : MemoryChunk::IN_FROM_SPACE);
412   DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
413                                        : MemoryChunk::IN_TO_SPACE));
414   NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
415   heap->incremental_marking()->SetNewSpacePageFlags(page);
416   return page;
417 }
418 
419 
InitializeAsAnchor(SemiSpace * semi_space)420 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
421   set_owner(semi_space);
422   set_next_chunk(this);
423   set_prev_chunk(this);
424   // Flags marks this invalid page as not being in new-space.
425   // All real new-space pages will be in new-space.
426   SetFlags(0, ~0);
427 }
428 
429 
Initialize(Heap * heap,Address base,size_t size,Address area_start,Address area_end,Executability executable,Space * owner)430 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
431                                      Address area_start, Address area_end,
432                                      Executability executable, Space* owner) {
433   MemoryChunk* chunk = FromAddress(base);
434 
435   DCHECK(base == chunk->address());
436 
437   chunk->heap_ = heap;
438   chunk->size_ = size;
439   chunk->area_start_ = area_start;
440   chunk->area_end_ = area_end;
441   chunk->flags_ = 0;
442   chunk->set_owner(owner);
443   chunk->InitializeReservedMemory();
444   chunk->slots_buffer_ = NULL;
445   chunk->skip_list_ = NULL;
446   chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
447   chunk->progress_bar_ = 0;
448   chunk->high_water_mark_ = static_cast<int>(area_start - base);
449   chunk->set_parallel_sweeping(SWEEPING_DONE);
450   chunk->available_in_small_free_list_ = 0;
451   chunk->available_in_medium_free_list_ = 0;
452   chunk->available_in_large_free_list_ = 0;
453   chunk->available_in_huge_free_list_ = 0;
454   chunk->non_available_small_blocks_ = 0;
455   chunk->ResetLiveBytes();
456   Bitmap::Clear(chunk);
457   chunk->initialize_scan_on_scavenge(false);
458   chunk->SetFlag(WAS_SWEPT);
459 
460   DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
461   DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
462 
463   if (executable == EXECUTABLE) {
464     chunk->SetFlag(IS_EXECUTABLE);
465   }
466 
467   if (owner == heap->old_data_space()) {
468     chunk->SetFlag(CONTAINS_ONLY_DATA);
469   }
470 
471   return chunk;
472 }
473 
474 
475 // Commit MemoryChunk area to the requested size.
CommitArea(size_t requested)476 bool MemoryChunk::CommitArea(size_t requested) {
477   size_t guard_size =
478       IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
479   size_t header_size = area_start() - address() - guard_size;
480   size_t commit_size =
481       RoundUp(header_size + requested, base::OS::CommitPageSize());
482   size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
483                                   base::OS::CommitPageSize());
484 
485   if (commit_size > committed_size) {
486     // Commit size should be less or equal than the reserved size.
487     DCHECK(commit_size <= size() - 2 * guard_size);
488     // Append the committed area.
489     Address start = address() + committed_size + guard_size;
490     size_t length = commit_size - committed_size;
491     if (reservation_.IsReserved()) {
492       Executability executable =
493           IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
494       if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length,
495                                                                executable)) {
496         return false;
497       }
498     } else {
499       CodeRange* code_range = heap_->isolate()->code_range();
500       DCHECK(code_range != NULL && code_range->valid() &&
501              IsFlagSet(IS_EXECUTABLE));
502       if (!code_range->CommitRawMemory(start, length)) return false;
503     }
504 
505     if (Heap::ShouldZapGarbage()) {
506       heap_->isolate()->memory_allocator()->ZapBlock(start, length);
507     }
508   } else if (commit_size < committed_size) {
509     DCHECK(commit_size > 0);
510     // Shrink the committed area.
511     size_t length = committed_size - commit_size;
512     Address start = address() + committed_size + guard_size - length;
513     if (reservation_.IsReserved()) {
514       if (!reservation_.Uncommit(start, length)) return false;
515     } else {
516       CodeRange* code_range = heap_->isolate()->code_range();
517       DCHECK(code_range != NULL && code_range->valid() &&
518              IsFlagSet(IS_EXECUTABLE));
519       if (!code_range->UncommitRawMemory(start, length)) return false;
520     }
521   }
522 
523   area_end_ = area_start_ + requested;
524   return true;
525 }
526 
527 
InsertAfter(MemoryChunk * other)528 void MemoryChunk::InsertAfter(MemoryChunk* other) {
529   MemoryChunk* other_next = other->next_chunk();
530 
531   set_next_chunk(other_next);
532   set_prev_chunk(other);
533   other_next->set_prev_chunk(this);
534   other->set_next_chunk(this);
535 }
536 
537 
Unlink()538 void MemoryChunk::Unlink() {
539   MemoryChunk* next_element = next_chunk();
540   MemoryChunk* prev_element = prev_chunk();
541   next_element->set_prev_chunk(prev_element);
542   prev_element->set_next_chunk(next_element);
543   set_prev_chunk(NULL);
544   set_next_chunk(NULL);
545 }
546 
547 
AllocateChunk(intptr_t reserve_area_size,intptr_t commit_area_size,Executability executable,Space * owner)548 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
549                                             intptr_t commit_area_size,
550                                             Executability executable,
551                                             Space* owner) {
552   DCHECK(commit_area_size <= reserve_area_size);
553 
554   size_t chunk_size;
555   Heap* heap = isolate_->heap();
556   Address base = NULL;
557   base::VirtualMemory reservation;
558   Address area_start = NULL;
559   Address area_end = NULL;
560 
561   //
562   // MemoryChunk layout:
563   //
564   //             Executable
565   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
566   // |           Header           |
567   // +----------------------------+<- base + CodePageGuardStartOffset
568   // |           Guard            |
569   // +----------------------------+<- area_start_
570   // |           Area             |
571   // +----------------------------+<- area_end_ (area_start + commit_area_size)
572   // |   Committed but not used   |
573   // +----------------------------+<- aligned at OS page boundary
574   // | Reserved but not committed |
575   // +----------------------------+<- aligned at OS page boundary
576   // |           Guard            |
577   // +----------------------------+<- base + chunk_size
578   //
579   //           Non-executable
580   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
581   // |          Header            |
582   // +----------------------------+<- area_start_ (base + kObjectStartOffset)
583   // |           Area             |
584   // +----------------------------+<- area_end_ (area_start + commit_area_size)
585   // |  Committed but not used    |
586   // +----------------------------+<- aligned at OS page boundary
587   // | Reserved but not committed |
588   // +----------------------------+<- base + chunk_size
589   //
590 
591   if (executable == EXECUTABLE) {
592     chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
593                          base::OS::CommitPageSize()) +
594                  CodePageGuardSize();
595 
596     // Check executable memory limit.
597     if (size_executable_ + chunk_size > capacity_executable_) {
598       LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
599                                 "V8 Executable Allocation capacity exceeded"));
600       return NULL;
601     }
602 
603     // Size of header (not executable) plus area (executable).
604     size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
605                                  base::OS::CommitPageSize());
606     // Allocate executable memory either from code range or from the
607     // OS.
608     if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
609       base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size,
610                                                        &chunk_size);
611       DCHECK(
612           IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
613       if (base == NULL) return NULL;
614       size_ += chunk_size;
615       // Update executable memory size.
616       size_executable_ += chunk_size;
617     } else {
618       base = AllocateAlignedMemory(chunk_size, commit_size,
619                                    MemoryChunk::kAlignment, executable,
620                                    &reservation);
621       if (base == NULL) return NULL;
622       // Update executable memory size.
623       size_executable_ += reservation.size();
624     }
625 
626     if (Heap::ShouldZapGarbage()) {
627       ZapBlock(base, CodePageGuardStartOffset());
628       ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
629     }
630 
631     area_start = base + CodePageAreaStartOffset();
632     area_end = area_start + commit_area_size;
633   } else {
634     chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
635                          base::OS::CommitPageSize());
636     size_t commit_size =
637         RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
638                 base::OS::CommitPageSize());
639     base =
640         AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
641                               executable, &reservation);
642 
643     if (base == NULL) return NULL;
644 
645     if (Heap::ShouldZapGarbage()) {
646       ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
647     }
648 
649     area_start = base + Page::kObjectStartOffset;
650     area_end = area_start + commit_area_size;
651   }
652 
653   // Use chunk_size for statistics and callbacks because we assume that they
654   // treat reserved but not-yet committed memory regions of chunks as allocated.
655   isolate_->counters()->memory_allocated()->Increment(
656       static_cast<int>(chunk_size));
657 
658   LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
659   if (owner != NULL) {
660     ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
661     PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
662   }
663 
664   MemoryChunk* result = MemoryChunk::Initialize(
665       heap, base, chunk_size, area_start, area_end, executable, owner);
666   result->set_reserved_memory(&reservation);
667   return result;
668 }
669 
670 
ResetFreeListStatistics()671 void Page::ResetFreeListStatistics() {
672   non_available_small_blocks_ = 0;
673   available_in_small_free_list_ = 0;
674   available_in_medium_free_list_ = 0;
675   available_in_large_free_list_ = 0;
676   available_in_huge_free_list_ = 0;
677 }
678 
679 
AllocatePage(intptr_t size,PagedSpace * owner,Executability executable)680 Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner,
681                                     Executability executable) {
682   MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
683 
684   if (chunk == NULL) return NULL;
685 
686   return Page::Initialize(isolate_->heap(), chunk, executable, owner);
687 }
688 
689 
AllocateLargePage(intptr_t object_size,Space * owner,Executability executable)690 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
691                                               Space* owner,
692                                               Executability executable) {
693   MemoryChunk* chunk =
694       AllocateChunk(object_size, object_size, executable, owner);
695   if (chunk == NULL) return NULL;
696   return LargePage::Initialize(isolate_->heap(), chunk);
697 }
698 
699 
Free(MemoryChunk * chunk)700 void MemoryAllocator::Free(MemoryChunk* chunk) {
701   LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
702   if (chunk->owner() != NULL) {
703     ObjectSpace space =
704         static_cast<ObjectSpace>(1 << chunk->owner()->identity());
705     PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
706   }
707 
708   isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
709                                          chunk->IsEvacuationCandidate());
710 
711   delete chunk->slots_buffer();
712   delete chunk->skip_list();
713 
714   base::VirtualMemory* reservation = chunk->reserved_memory();
715   if (reservation->IsReserved()) {
716     FreeMemory(reservation, chunk->executable());
717   } else {
718     FreeMemory(chunk->address(), chunk->size(), chunk->executable());
719   }
720 }
721 
722 
CommitBlock(Address start,size_t size,Executability executable)723 bool MemoryAllocator::CommitBlock(Address start, size_t size,
724                                   Executability executable) {
725   if (!CommitMemory(start, size, executable)) return false;
726 
727   if (Heap::ShouldZapGarbage()) {
728     ZapBlock(start, size);
729   }
730 
731   isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
732   return true;
733 }
734 
735 
UncommitBlock(Address start,size_t size)736 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
737   if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
738   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
739   return true;
740 }
741 
742 
ZapBlock(Address start,size_t size)743 void MemoryAllocator::ZapBlock(Address start, size_t size) {
744   for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
745     Memory::Address_at(start + s) = kZapValue;
746   }
747 }
748 
749 
PerformAllocationCallback(ObjectSpace space,AllocationAction action,size_t size)750 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
751                                                 AllocationAction action,
752                                                 size_t size) {
753   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
754     MemoryAllocationCallbackRegistration registration =
755         memory_allocation_callbacks_[i];
756     if ((registration.space & space) == space &&
757         (registration.action & action) == action)
758       registration.callback(space, action, static_cast<int>(size));
759   }
760 }
761 
762 
MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback)763 bool MemoryAllocator::MemoryAllocationCallbackRegistered(
764     MemoryAllocationCallback callback) {
765   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
766     if (memory_allocation_callbacks_[i].callback == callback) return true;
767   }
768   return false;
769 }
770 
771 
AddMemoryAllocationCallback(MemoryAllocationCallback callback,ObjectSpace space,AllocationAction action)772 void MemoryAllocator::AddMemoryAllocationCallback(
773     MemoryAllocationCallback callback, ObjectSpace space,
774     AllocationAction action) {
775   DCHECK(callback != NULL);
776   MemoryAllocationCallbackRegistration registration(callback, space, action);
777   DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
778   return memory_allocation_callbacks_.Add(registration);
779 }
780 
781 
RemoveMemoryAllocationCallback(MemoryAllocationCallback callback)782 void MemoryAllocator::RemoveMemoryAllocationCallback(
783     MemoryAllocationCallback callback) {
784   DCHECK(callback != NULL);
785   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
786     if (memory_allocation_callbacks_[i].callback == callback) {
787       memory_allocation_callbacks_.Remove(i);
788       return;
789     }
790   }
791   UNREACHABLE();
792 }
793 
794 
795 #ifdef DEBUG
ReportStatistics()796 void MemoryAllocator::ReportStatistics() {
797   float pct = static_cast<float>(capacity_ - size_) / capacity_;
798   PrintF("  capacity: %" V8_PTR_PREFIX
799          "d"
800          ", used: %" V8_PTR_PREFIX
801          "d"
802          ", available: %%%d\n\n",
803          capacity_, size_, static_cast<int>(pct * 100));
804 }
805 #endif
806 
807 
CodePageGuardStartOffset()808 int MemoryAllocator::CodePageGuardStartOffset() {
809   // We are guarding code pages: the first OS page after the header
810   // will be protected as non-writable.
811   return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
812 }
813 
814 
CodePageGuardSize()815 int MemoryAllocator::CodePageGuardSize() {
816   return static_cast<int>(base::OS::CommitPageSize());
817 }
818 
819 
CodePageAreaStartOffset()820 int MemoryAllocator::CodePageAreaStartOffset() {
821   // We are guarding code pages: the first OS page after the header
822   // will be protected as non-writable.
823   return CodePageGuardStartOffset() + CodePageGuardSize();
824 }
825 
826 
CodePageAreaEndOffset()827 int MemoryAllocator::CodePageAreaEndOffset() {
828   // We are guarding code pages: the last OS page will be protected as
829   // non-writable.
830   return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
831 }
832 
833 
CommitExecutableMemory(base::VirtualMemory * vm,Address start,size_t commit_size,size_t reserved_size)834 bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
835                                              Address start, size_t commit_size,
836                                              size_t reserved_size) {
837   // Commit page header (not executable).
838   if (!vm->Commit(start, CodePageGuardStartOffset(), false)) {
839     return false;
840   }
841 
842   // Create guard page after the header.
843   if (!vm->Guard(start + CodePageGuardStartOffset())) {
844     return false;
845   }
846 
847   // Commit page body (executable).
848   if (!vm->Commit(start + CodePageAreaStartOffset(),
849                   commit_size - CodePageGuardStartOffset(), true)) {
850     return false;
851   }
852 
853   // Create guard page before the end.
854   if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
855     return false;
856   }
857 
858   UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
859                                         commit_size -
860                                         CodePageGuardStartOffset());
861   return true;
862 }
863 
864 
865 // -----------------------------------------------------------------------------
866 // MemoryChunk implementation
867 
IncrementLiveBytesFromMutator(Address address,int by)868 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
869   MemoryChunk* chunk = MemoryChunk::FromAddress(address);
870   if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
871     static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
872   }
873   chunk->IncrementLiveBytes(by);
874 }
875 
876 
877 // -----------------------------------------------------------------------------
878 // PagedSpace implementation
879 
PagedSpace(Heap * heap,intptr_t max_capacity,AllocationSpace id,Executability executable)880 PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
881                        Executability executable)
882     : Space(heap, id, executable),
883       free_list_(this),
884       unswept_free_bytes_(0),
885       end_of_unswept_pages_(NULL),
886       emergency_memory_(NULL) {
887   if (id == CODE_SPACE) {
888     area_size_ = heap->isolate()->memory_allocator()->CodePageAreaSize();
889   } else {
890     area_size_ = Page::kPageSize - Page::kObjectStartOffset;
891   }
892   max_capacity_ =
893       (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) * AreaSize();
894   accounting_stats_.Clear();
895 
896   allocation_info_.set_top(NULL);
897   allocation_info_.set_limit(NULL);
898 
899   anchor_.InitializeAsAnchor(this);
900 }
901 
902 
SetUp()903 bool PagedSpace::SetUp() { return true; }
904 
905 
HasBeenSetUp()906 bool PagedSpace::HasBeenSetUp() { return true; }
907 
908 
TearDown()909 void PagedSpace::TearDown() {
910   PageIterator iterator(this);
911   while (iterator.has_next()) {
912     heap()->isolate()->memory_allocator()->Free(iterator.next());
913   }
914   anchor_.set_next_page(&anchor_);
915   anchor_.set_prev_page(&anchor_);
916   accounting_stats_.Clear();
917 }
918 
919 
CommittedPhysicalMemory()920 size_t PagedSpace::CommittedPhysicalMemory() {
921   if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
922   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
923   size_t size = 0;
924   PageIterator it(this);
925   while (it.has_next()) {
926     size += it.next()->CommittedPhysicalMemory();
927   }
928   return size;
929 }
930 
931 
FindObject(Address addr)932 Object* PagedSpace::FindObject(Address addr) {
933   // Note: this function can only be called on iterable spaces.
934   DCHECK(!heap()->mark_compact_collector()->in_use());
935 
936   if (!Contains(addr)) return Smi::FromInt(0);  // Signaling not found.
937 
938   Page* p = Page::FromAddress(addr);
939   HeapObjectIterator it(p, NULL);
940   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
941     Address cur = obj->address();
942     Address next = cur + obj->Size();
943     if ((cur <= addr) && (addr < next)) return obj;
944   }
945 
946   UNREACHABLE();
947   return Smi::FromInt(0);
948 }
949 
950 
CanExpand()951 bool PagedSpace::CanExpand() {
952   DCHECK(max_capacity_ % AreaSize() == 0);
953 
954   if (Capacity() == max_capacity_) return false;
955 
956   DCHECK(Capacity() < max_capacity_);
957 
958   // Are we going to exceed capacity for this space?
959   if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
960 
961   return true;
962 }
963 
964 
Expand()965 bool PagedSpace::Expand() {
966   if (!CanExpand()) return false;
967 
968   intptr_t size = AreaSize();
969 
970   if (anchor_.next_page() == &anchor_) {
971     size = SizeOfFirstPage();
972   }
973 
974   Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
975                                                                 executable());
976   if (p == NULL) return false;
977 
978   DCHECK(Capacity() <= max_capacity_);
979 
980   p->InsertAfter(anchor_.prev_page());
981 
982   return true;
983 }
984 
985 
SizeOfFirstPage()986 intptr_t PagedSpace::SizeOfFirstPage() {
987   // If using an ool constant pool then transfer the constant pool allowance
988   // from the code space to the old pointer space.
989   static const int constant_pool_delta = FLAG_enable_ool_constant_pool ? 48 : 0;
990   int size = 0;
991   switch (identity()) {
992     case OLD_POINTER_SPACE:
993       size = (112 + constant_pool_delta) * kPointerSize * KB;
994       break;
995     case OLD_DATA_SPACE:
996       size = 192 * KB;
997       break;
998     case MAP_SPACE:
999       size = 16 * kPointerSize * KB;
1000       break;
1001     case CELL_SPACE:
1002       size = 16 * kPointerSize * KB;
1003       break;
1004     case PROPERTY_CELL_SPACE:
1005       size = 8 * kPointerSize * KB;
1006       break;
1007     case CODE_SPACE: {
1008       CodeRange* code_range = heap()->isolate()->code_range();
1009       if (code_range != NULL && code_range->valid()) {
1010         // When code range exists, code pages are allocated in a special way
1011         // (from the reserved code range). That part of the code is not yet
1012         // upgraded to handle small pages.
1013         size = AreaSize();
1014       } else {
1015         size = RoundUp((480 - constant_pool_delta) * KB *
1016                            FullCodeGenerator::kBootCodeSizeMultiplier / 100,
1017                        kPointerSize);
1018       }
1019       break;
1020     }
1021     default:
1022       UNREACHABLE();
1023   }
1024   return Min(size, AreaSize());
1025 }
1026 
1027 
CountTotalPages()1028 int PagedSpace::CountTotalPages() {
1029   PageIterator it(this);
1030   int count = 0;
1031   while (it.has_next()) {
1032     it.next();
1033     count++;
1034   }
1035   return count;
1036 }
1037 
1038 
ObtainFreeListStatistics(Page * page,SizeStats * sizes)1039 void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
1040   sizes->huge_size_ = page->available_in_huge_free_list();
1041   sizes->small_size_ = page->available_in_small_free_list();
1042   sizes->medium_size_ = page->available_in_medium_free_list();
1043   sizes->large_size_ = page->available_in_large_free_list();
1044 }
1045 
1046 
ResetFreeListStatistics()1047 void PagedSpace::ResetFreeListStatistics() {
1048   PageIterator page_iterator(this);
1049   while (page_iterator.has_next()) {
1050     Page* page = page_iterator.next();
1051     page->ResetFreeListStatistics();
1052   }
1053 }
1054 
1055 
IncreaseCapacity(int size)1056 void PagedSpace::IncreaseCapacity(int size) {
1057   accounting_stats_.ExpandSpace(size);
1058 }
1059 
1060 
ReleasePage(Page * page)1061 void PagedSpace::ReleasePage(Page* page) {
1062   DCHECK(page->LiveBytes() == 0);
1063   DCHECK(AreaSize() == page->area_size());
1064 
1065   if (page->WasSwept()) {
1066     intptr_t size = free_list_.EvictFreeListItems(page);
1067     accounting_stats_.AllocateBytes(size);
1068     DCHECK_EQ(AreaSize(), static_cast<int>(size));
1069   } else {
1070     DecreaseUnsweptFreeBytes(page);
1071   }
1072 
1073   if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
1074     heap()->decrement_scan_on_scavenge_pages();
1075     page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE);
1076   }
1077 
1078   DCHECK(!free_list_.ContainsPageFreeListItems(page));
1079 
1080   if (Page::FromAllocationTop(allocation_info_.top()) == page) {
1081     allocation_info_.set_top(NULL);
1082     allocation_info_.set_limit(NULL);
1083   }
1084 
1085   page->Unlink();
1086   if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
1087     heap()->isolate()->memory_allocator()->Free(page);
1088   } else {
1089     heap()->QueueMemoryChunkForFree(page);
1090   }
1091 
1092   DCHECK(Capacity() > 0);
1093   accounting_stats_.ShrinkSpace(AreaSize());
1094 }
1095 
1096 
CreateEmergencyMemory()1097 void PagedSpace::CreateEmergencyMemory() {
1098   emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
1099       AreaSize(), AreaSize(), executable(), this);
1100 }
1101 
1102 
FreeEmergencyMemory()1103 void PagedSpace::FreeEmergencyMemory() {
1104   Page* page = static_cast<Page*>(emergency_memory_);
1105   DCHECK(page->LiveBytes() == 0);
1106   DCHECK(AreaSize() == page->area_size());
1107   DCHECK(!free_list_.ContainsPageFreeListItems(page));
1108   heap()->isolate()->memory_allocator()->Free(page);
1109   emergency_memory_ = NULL;
1110 }
1111 
1112 
UseEmergencyMemory()1113 void PagedSpace::UseEmergencyMemory() {
1114   Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this);
1115   page->InsertAfter(anchor_.prev_page());
1116   emergency_memory_ = NULL;
1117 }
1118 
1119 
1120 #ifdef DEBUG
Print()1121 void PagedSpace::Print() {}
1122 #endif
1123 
1124 #ifdef VERIFY_HEAP
Verify(ObjectVisitor * visitor)1125 void PagedSpace::Verify(ObjectVisitor* visitor) {
1126   bool allocation_pointer_found_in_space =
1127       (allocation_info_.top() == allocation_info_.limit());
1128   PageIterator page_iterator(this);
1129   while (page_iterator.has_next()) {
1130     Page* page = page_iterator.next();
1131     CHECK(page->owner() == this);
1132     if (page == Page::FromAllocationTop(allocation_info_.top())) {
1133       allocation_pointer_found_in_space = true;
1134     }
1135     CHECK(page->WasSwept());
1136     HeapObjectIterator it(page, NULL);
1137     Address end_of_previous_object = page->area_start();
1138     Address top = page->area_end();
1139     int black_size = 0;
1140     for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1141       CHECK(end_of_previous_object <= object->address());
1142 
1143       // The first word should be a map, and we expect all map pointers to
1144       // be in map space.
1145       Map* map = object->map();
1146       CHECK(map->IsMap());
1147       CHECK(heap()->map_space()->Contains(map));
1148 
1149       // Perform space-specific object verification.
1150       VerifyObject(object);
1151 
1152       // The object itself should look OK.
1153       object->ObjectVerify();
1154 
1155       // All the interior pointers should be contained in the heap.
1156       int size = object->Size();
1157       object->IterateBody(map->instance_type(), size, visitor);
1158       if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
1159         black_size += size;
1160       }
1161 
1162       CHECK(object->address() + size <= top);
1163       end_of_previous_object = object->address() + size;
1164     }
1165     CHECK_LE(black_size, page->LiveBytes());
1166   }
1167   CHECK(allocation_pointer_found_in_space);
1168 }
1169 #endif  // VERIFY_HEAP
1170 
1171 // -----------------------------------------------------------------------------
1172 // NewSpace implementation
1173 
1174 
SetUp(int reserved_semispace_capacity,int maximum_semispace_capacity)1175 bool NewSpace::SetUp(int reserved_semispace_capacity,
1176                      int maximum_semispace_capacity) {
1177   // Set up new space based on the preallocated memory block defined by
1178   // start and size. The provided space is divided into two semi-spaces.
1179   // To support fast containment testing in the new space, the size of
1180   // this chunk must be a power of two and it must be aligned to its size.
1181   int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1182 
1183   size_t size = 2 * reserved_semispace_capacity;
1184   Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
1185       size, size, &reservation_);
1186   if (base == NULL) return false;
1187 
1188   chunk_base_ = base;
1189   chunk_size_ = static_cast<uintptr_t>(size);
1190   LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1191 
1192   DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
1193   DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
1194 
1195   // Allocate and set up the histogram arrays if necessary.
1196   allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1197   promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1198 
1199 #define SET_NAME(name)                        \
1200   allocated_histogram_[name].set_name(#name); \
1201   promoted_histogram_[name].set_name(#name);
1202   INSTANCE_TYPE_LIST(SET_NAME)
1203 #undef SET_NAME
1204 
1205   DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1206   DCHECK(static_cast<intptr_t>(chunk_size_) >=
1207          2 * heap()->ReservedSemiSpaceSize());
1208   DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1209 
1210   to_space_.SetUp(chunk_base_, initial_semispace_capacity,
1211                   maximum_semispace_capacity);
1212   from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1213                     initial_semispace_capacity, maximum_semispace_capacity);
1214   if (!to_space_.Commit()) {
1215     return false;
1216   }
1217   DCHECK(!from_space_.is_committed());  // No need to use memory yet.
1218 
1219   start_ = chunk_base_;
1220   address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1221   object_mask_ = address_mask_ | kHeapObjectTagMask;
1222   object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
1223 
1224   ResetAllocationInfo();
1225 
1226   return true;
1227 }
1228 
1229 
TearDown()1230 void NewSpace::TearDown() {
1231   if (allocated_histogram_) {
1232     DeleteArray(allocated_histogram_);
1233     allocated_histogram_ = NULL;
1234   }
1235   if (promoted_histogram_) {
1236     DeleteArray(promoted_histogram_);
1237     promoted_histogram_ = NULL;
1238   }
1239 
1240   start_ = NULL;
1241   allocation_info_.set_top(NULL);
1242   allocation_info_.set_limit(NULL);
1243 
1244   to_space_.TearDown();
1245   from_space_.TearDown();
1246 
1247   LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1248 
1249   DCHECK(reservation_.IsReserved());
1250   heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1251                                                     NOT_EXECUTABLE);
1252   chunk_base_ = NULL;
1253   chunk_size_ = 0;
1254 }
1255 
1256 
Flip()1257 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
1258 
1259 
Grow()1260 void NewSpace::Grow() {
1261   // Double the semispace size but only up to maximum capacity.
1262   DCHECK(TotalCapacity() < MaximumCapacity());
1263   int new_capacity =
1264       Min(MaximumCapacity(), 2 * static_cast<int>(TotalCapacity()));
1265   if (to_space_.GrowTo(new_capacity)) {
1266     // Only grow from space if we managed to grow to-space.
1267     if (!from_space_.GrowTo(new_capacity)) {
1268       // If we managed to grow to-space but couldn't grow from-space,
1269       // attempt to shrink to-space.
1270       if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) {
1271         // We are in an inconsistent state because we could not
1272         // commit/uncommit memory from new space.
1273         V8::FatalProcessOutOfMemory("Failed to grow new space.");
1274       }
1275     }
1276   }
1277   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1278 }
1279 
1280 
Shrink()1281 void NewSpace::Shrink() {
1282   int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt());
1283   int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1284   if (rounded_new_capacity < TotalCapacity() &&
1285       to_space_.ShrinkTo(rounded_new_capacity)) {
1286     // Only shrink from-space if we managed to shrink to-space.
1287     from_space_.Reset();
1288     if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1289       // If we managed to shrink to-space but couldn't shrink from
1290       // space, attempt to grow to-space again.
1291       if (!to_space_.GrowTo(from_space_.TotalCapacity())) {
1292         // We are in an inconsistent state because we could not
1293         // commit/uncommit memory from new space.
1294         V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1295       }
1296     }
1297   }
1298   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1299 }
1300 
1301 
UpdateAllocationInfo()1302 void NewSpace::UpdateAllocationInfo() {
1303   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1304   allocation_info_.set_top(to_space_.page_low());
1305   allocation_info_.set_limit(to_space_.page_high());
1306   UpdateInlineAllocationLimit(0);
1307   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1308 }
1309 
1310 
ResetAllocationInfo()1311 void NewSpace::ResetAllocationInfo() {
1312   to_space_.Reset();
1313   UpdateAllocationInfo();
1314   pages_used_ = 0;
1315   // Clear all mark-bits in the to-space.
1316   NewSpacePageIterator it(&to_space_);
1317   while (it.has_next()) {
1318     Bitmap::Clear(it.next());
1319   }
1320 }
1321 
1322 
UpdateInlineAllocationLimit(int size_in_bytes)1323 void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
1324   if (heap()->inline_allocation_disabled()) {
1325     // Lowest limit when linear allocation was disabled.
1326     Address high = to_space_.page_high();
1327     Address new_top = allocation_info_.top() + size_in_bytes;
1328     allocation_info_.set_limit(Min(new_top, high));
1329   } else if (inline_allocation_limit_step() == 0) {
1330     // Normal limit is the end of the current page.
1331     allocation_info_.set_limit(to_space_.page_high());
1332   } else {
1333     // Lower limit during incremental marking.
1334     Address high = to_space_.page_high();
1335     Address new_top = allocation_info_.top() + size_in_bytes;
1336     Address new_limit = new_top + inline_allocation_limit_step_;
1337     allocation_info_.set_limit(Min(new_limit, high));
1338   }
1339   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1340 }
1341 
1342 
AddFreshPage()1343 bool NewSpace::AddFreshPage() {
1344   Address top = allocation_info_.top();
1345   if (NewSpacePage::IsAtStart(top)) {
1346     // The current page is already empty. Don't try to make another.
1347 
1348     // We should only get here if someone asks to allocate more
1349     // than what can be stored in a single page.
1350     // TODO(gc): Change the limit on new-space allocation to prevent this
1351     // from happening (all such allocations should go directly to LOSpace).
1352     return false;
1353   }
1354   if (!to_space_.AdvancePage()) {
1355     // Failed to get a new page in to-space.
1356     return false;
1357   }
1358 
1359   // Clear remainder of current page.
1360   Address limit = NewSpacePage::FromLimit(top)->area_end();
1361   if (heap()->gc_state() == Heap::SCAVENGE) {
1362     heap()->promotion_queue()->SetNewLimit(limit);
1363   }
1364 
1365   int remaining_in_page = static_cast<int>(limit - top);
1366   heap()->CreateFillerObjectAt(top, remaining_in_page);
1367   pages_used_++;
1368   UpdateAllocationInfo();
1369 
1370   return true;
1371 }
1372 
1373 
SlowAllocateRaw(int size_in_bytes)1374 AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) {
1375   Address old_top = allocation_info_.top();
1376   Address high = to_space_.page_high();
1377   if (allocation_info_.limit() < high) {
1378     // Either the limit has been lowered because linear allocation was disabled
1379     // or because incremental marking wants to get a chance to do a step. Set
1380     // the new limit accordingly.
1381     Address new_top = old_top + size_in_bytes;
1382     int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1383     heap()->incremental_marking()->Step(bytes_allocated,
1384                                         IncrementalMarking::GC_VIA_STACK_GUARD);
1385     UpdateInlineAllocationLimit(size_in_bytes);
1386     top_on_previous_step_ = new_top;
1387     return AllocateRaw(size_in_bytes);
1388   } else if (AddFreshPage()) {
1389     // Switched to new page. Try allocating again.
1390     int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1391     heap()->incremental_marking()->Step(bytes_allocated,
1392                                         IncrementalMarking::GC_VIA_STACK_GUARD);
1393     top_on_previous_step_ = to_space_.page_low();
1394     return AllocateRaw(size_in_bytes);
1395   } else {
1396     return AllocationResult::Retry();
1397   }
1398 }
1399 
1400 
1401 #ifdef VERIFY_HEAP
1402 // We do not use the SemiSpaceIterator because verification doesn't assume
1403 // that it works (it depends on the invariants we are checking).
Verify()1404 void NewSpace::Verify() {
1405   // The allocation pointer should be in the space or at the very end.
1406   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1407 
1408   // There should be objects packed in from the low address up to the
1409   // allocation pointer.
1410   Address current = to_space_.first_page()->area_start();
1411   CHECK_EQ(current, to_space_.space_start());
1412 
1413   while (current != top()) {
1414     if (!NewSpacePage::IsAtEnd(current)) {
1415       // The allocation pointer should not be in the middle of an object.
1416       CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1417             current < top());
1418 
1419       HeapObject* object = HeapObject::FromAddress(current);
1420 
1421       // The first word should be a map, and we expect all map pointers to
1422       // be in map space.
1423       Map* map = object->map();
1424       CHECK(map->IsMap());
1425       CHECK(heap()->map_space()->Contains(map));
1426 
1427       // The object should not be code or a map.
1428       CHECK(!object->IsMap());
1429       CHECK(!object->IsCode());
1430 
1431       // The object itself should look OK.
1432       object->ObjectVerify();
1433 
1434       // All the interior pointers should be contained in the heap.
1435       VerifyPointersVisitor visitor;
1436       int size = object->Size();
1437       object->IterateBody(map->instance_type(), size, &visitor);
1438 
1439       current += size;
1440     } else {
1441       // At end of page, switch to next page.
1442       NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1443       // Next page should be valid.
1444       CHECK(!page->is_anchor());
1445       current = page->area_start();
1446     }
1447   }
1448 
1449   // Check semi-spaces.
1450   CHECK_EQ(from_space_.id(), kFromSpace);
1451   CHECK_EQ(to_space_.id(), kToSpace);
1452   from_space_.Verify();
1453   to_space_.Verify();
1454 }
1455 #endif
1456 
1457 // -----------------------------------------------------------------------------
1458 // SemiSpace implementation
1459 
SetUp(Address start,int initial_capacity,int maximum_capacity)1460 void SemiSpace::SetUp(Address start, int initial_capacity,
1461                       int maximum_capacity) {
1462   // Creates a space in the young generation. The constructor does not
1463   // allocate memory from the OS.  A SemiSpace is given a contiguous chunk of
1464   // memory of size 'capacity' when set up, and does not grow or shrink
1465   // otherwise.  In the mark-compact collector, the memory region of the from
1466   // space is used as the marking stack. It requires contiguous memory
1467   // addresses.
1468   DCHECK(maximum_capacity >= Page::kPageSize);
1469   initial_total_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1470   total_capacity_ = initial_capacity;
1471   maximum_total_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1472   maximum_committed_ = 0;
1473   committed_ = false;
1474   start_ = start;
1475   address_mask_ = ~(maximum_capacity - 1);
1476   object_mask_ = address_mask_ | kHeapObjectTagMask;
1477   object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1478   age_mark_ = start_;
1479 }
1480 
1481 
TearDown()1482 void SemiSpace::TearDown() {
1483   start_ = NULL;
1484   total_capacity_ = 0;
1485 }
1486 
1487 
Commit()1488 bool SemiSpace::Commit() {
1489   DCHECK(!is_committed());
1490   int pages = total_capacity_ / Page::kPageSize;
1491   if (!heap()->isolate()->memory_allocator()->CommitBlock(
1492           start_, total_capacity_, executable())) {
1493     return false;
1494   }
1495 
1496   NewSpacePage* current = anchor();
1497   for (int i = 0; i < pages; i++) {
1498     NewSpacePage* new_page =
1499         NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
1500     new_page->InsertAfter(current);
1501     current = new_page;
1502   }
1503 
1504   SetCapacity(total_capacity_);
1505   committed_ = true;
1506   Reset();
1507   return true;
1508 }
1509 
1510 
Uncommit()1511 bool SemiSpace::Uncommit() {
1512   DCHECK(is_committed());
1513   Address start = start_ + maximum_total_capacity_ - total_capacity_;
1514   if (!heap()->isolate()->memory_allocator()->UncommitBlock(start,
1515                                                             total_capacity_)) {
1516     return false;
1517   }
1518   anchor()->set_next_page(anchor());
1519   anchor()->set_prev_page(anchor());
1520 
1521   committed_ = false;
1522   return true;
1523 }
1524 
1525 
CommittedPhysicalMemory()1526 size_t SemiSpace::CommittedPhysicalMemory() {
1527   if (!is_committed()) return 0;
1528   size_t size = 0;
1529   NewSpacePageIterator it(this);
1530   while (it.has_next()) {
1531     size += it.next()->CommittedPhysicalMemory();
1532   }
1533   return size;
1534 }
1535 
1536 
GrowTo(int new_capacity)1537 bool SemiSpace::GrowTo(int new_capacity) {
1538   if (!is_committed()) {
1539     if (!Commit()) return false;
1540   }
1541   DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
1542   DCHECK(new_capacity <= maximum_total_capacity_);
1543   DCHECK(new_capacity > total_capacity_);
1544   int pages_before = total_capacity_ / Page::kPageSize;
1545   int pages_after = new_capacity / Page::kPageSize;
1546 
1547   size_t delta = new_capacity - total_capacity_;
1548 
1549   DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1550   if (!heap()->isolate()->memory_allocator()->CommitBlock(
1551           start_ + total_capacity_, delta, executable())) {
1552     return false;
1553   }
1554   SetCapacity(new_capacity);
1555   NewSpacePage* last_page = anchor()->prev_page();
1556   DCHECK(last_page != anchor());
1557   for (int i = pages_before; i < pages_after; i++) {
1558     Address page_address = start_ + i * Page::kPageSize;
1559     NewSpacePage* new_page =
1560         NewSpacePage::Initialize(heap(), page_address, this);
1561     new_page->InsertAfter(last_page);
1562     Bitmap::Clear(new_page);
1563     // Duplicate the flags that was set on the old page.
1564     new_page->SetFlags(last_page->GetFlags(),
1565                        NewSpacePage::kCopyOnFlipFlagsMask);
1566     last_page = new_page;
1567   }
1568   return true;
1569 }
1570 
1571 
ShrinkTo(int new_capacity)1572 bool SemiSpace::ShrinkTo(int new_capacity) {
1573   DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
1574   DCHECK(new_capacity >= initial_total_capacity_);
1575   DCHECK(new_capacity < total_capacity_);
1576   if (is_committed()) {
1577     size_t delta = total_capacity_ - new_capacity;
1578     DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1579 
1580     MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1581     if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
1582       return false;
1583     }
1584 
1585     int pages_after = new_capacity / Page::kPageSize;
1586     NewSpacePage* new_last_page =
1587         NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
1588     new_last_page->set_next_page(anchor());
1589     anchor()->set_prev_page(new_last_page);
1590     DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page));
1591   }
1592 
1593   SetCapacity(new_capacity);
1594 
1595   return true;
1596 }
1597 
1598 
FlipPages(intptr_t flags,intptr_t mask)1599 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1600   anchor_.set_owner(this);
1601   // Fixup back-pointers to anchor. Address of anchor changes
1602   // when we swap.
1603   anchor_.prev_page()->set_next_page(&anchor_);
1604   anchor_.next_page()->set_prev_page(&anchor_);
1605 
1606   bool becomes_to_space = (id_ == kFromSpace);
1607   id_ = becomes_to_space ? kToSpace : kFromSpace;
1608   NewSpacePage* page = anchor_.next_page();
1609   while (page != &anchor_) {
1610     page->set_owner(this);
1611     page->SetFlags(flags, mask);
1612     if (becomes_to_space) {
1613       page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1614       page->SetFlag(MemoryChunk::IN_TO_SPACE);
1615       page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1616       page->ResetLiveBytes();
1617     } else {
1618       page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1619       page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1620     }
1621     DCHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1622     DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1623            page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1624     page = page->next_page();
1625   }
1626 }
1627 
1628 
Reset()1629 void SemiSpace::Reset() {
1630   DCHECK(anchor_.next_page() != &anchor_);
1631   current_page_ = anchor_.next_page();
1632 }
1633 
1634 
Swap(SemiSpace * from,SemiSpace * to)1635 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1636   // We won't be swapping semispaces without data in them.
1637   DCHECK(from->anchor_.next_page() != &from->anchor_);
1638   DCHECK(to->anchor_.next_page() != &to->anchor_);
1639 
1640   // Swap bits.
1641   SemiSpace tmp = *from;
1642   *from = *to;
1643   *to = tmp;
1644 
1645   // Fixup back-pointers to the page list anchor now that its address
1646   // has changed.
1647   // Swap to/from-space bits on pages.
1648   // Copy GC flags from old active space (from-space) to new (to-space).
1649   intptr_t flags = from->current_page()->GetFlags();
1650   to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1651 
1652   from->FlipPages(0, 0);
1653 }
1654 
1655 
SetCapacity(int new_capacity)1656 void SemiSpace::SetCapacity(int new_capacity) {
1657   total_capacity_ = new_capacity;
1658   if (total_capacity_ > maximum_committed_) {
1659     maximum_committed_ = total_capacity_;
1660   }
1661 }
1662 
1663 
set_age_mark(Address mark)1664 void SemiSpace::set_age_mark(Address mark) {
1665   DCHECK(NewSpacePage::FromLimit(mark)->semi_space() == this);
1666   age_mark_ = mark;
1667   // Mark all pages up to the one containing mark.
1668   NewSpacePageIterator it(space_start(), mark);
1669   while (it.has_next()) {
1670     it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1671   }
1672 }
1673 
1674 
1675 #ifdef DEBUG
Print()1676 void SemiSpace::Print() {}
1677 #endif
1678 
1679 #ifdef VERIFY_HEAP
Verify()1680 void SemiSpace::Verify() {
1681   bool is_from_space = (id_ == kFromSpace);
1682   NewSpacePage* page = anchor_.next_page();
1683   CHECK(anchor_.semi_space() == this);
1684   while (page != &anchor_) {
1685     CHECK(page->semi_space() == this);
1686     CHECK(page->InNewSpace());
1687     CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1688                                         : MemoryChunk::IN_TO_SPACE));
1689     CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1690                                          : MemoryChunk::IN_FROM_SPACE));
1691     CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1692     if (!is_from_space) {
1693       // The pointers-from-here-are-interesting flag isn't updated dynamically
1694       // on from-space pages, so it might be out of sync with the marking state.
1695       if (page->heap()->incremental_marking()->IsMarking()) {
1696         CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1697       } else {
1698         CHECK(
1699             !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1700       }
1701       // TODO(gc): Check that the live_bytes_count_ field matches the
1702       // black marking on the page (if we make it match in new-space).
1703     }
1704     CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1705     CHECK(page->prev_page()->next_page() == page);
1706     page = page->next_page();
1707   }
1708 }
1709 #endif
1710 
1711 #ifdef DEBUG
AssertValidRange(Address start,Address end)1712 void SemiSpace::AssertValidRange(Address start, Address end) {
1713   // Addresses belong to same semi-space
1714   NewSpacePage* page = NewSpacePage::FromLimit(start);
1715   NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1716   SemiSpace* space = page->semi_space();
1717   CHECK_EQ(space, end_page->semi_space());
1718   // Start address is before end address, either on same page,
1719   // or end address is on a later page in the linked list of
1720   // semi-space pages.
1721   if (page == end_page) {
1722     CHECK(start <= end);
1723   } else {
1724     while (page != end_page) {
1725       page = page->next_page();
1726       CHECK_NE(page, space->anchor());
1727     }
1728   }
1729 }
1730 #endif
1731 
1732 
1733 // -----------------------------------------------------------------------------
1734 // SemiSpaceIterator implementation.
SemiSpaceIterator(NewSpace * space)1735 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1736   Initialize(space->bottom(), space->top(), NULL);
1737 }
1738 
1739 
SemiSpaceIterator(NewSpace * space,HeapObjectCallback size_func)1740 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1741                                      HeapObjectCallback size_func) {
1742   Initialize(space->bottom(), space->top(), size_func);
1743 }
1744 
1745 
SemiSpaceIterator(NewSpace * space,Address start)1746 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1747   Initialize(start, space->top(), NULL);
1748 }
1749 
1750 
SemiSpaceIterator(Address from,Address to)1751 SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
1752   Initialize(from, to, NULL);
1753 }
1754 
1755 
Initialize(Address start,Address end,HeapObjectCallback size_func)1756 void SemiSpaceIterator::Initialize(Address start, Address end,
1757                                    HeapObjectCallback size_func) {
1758   SemiSpace::AssertValidRange(start, end);
1759   current_ = start;
1760   limit_ = end;
1761   size_func_ = size_func;
1762 }
1763 
1764 
1765 #ifdef DEBUG
1766 // heap_histograms is shared, always clear it before using it.
ClearHistograms(Isolate * isolate)1767 static void ClearHistograms(Isolate* isolate) {
1768 // We reset the name each time, though it hasn't changed.
1769 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1770   INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1771 #undef DEF_TYPE_NAME
1772 
1773 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1774   INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1775 #undef CLEAR_HISTOGRAM
1776 
1777   isolate->js_spill_information()->Clear();
1778 }
1779 
1780 
ClearCodeKindStatistics(int * code_kind_statistics)1781 static void ClearCodeKindStatistics(int* code_kind_statistics) {
1782   for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1783     code_kind_statistics[i] = 0;
1784   }
1785 }
1786 
1787 
ReportCodeKindStatistics(int * code_kind_statistics)1788 static void ReportCodeKindStatistics(int* code_kind_statistics) {
1789   PrintF("\n   Code kind histograms: \n");
1790   for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1791     if (code_kind_statistics[i] > 0) {
1792       PrintF("     %-20s: %10d bytes\n",
1793              Code::Kind2String(static_cast<Code::Kind>(i)),
1794              code_kind_statistics[i]);
1795     }
1796   }
1797   PrintF("\n");
1798 }
1799 
1800 
CollectHistogramInfo(HeapObject * obj)1801 static int CollectHistogramInfo(HeapObject* obj) {
1802   Isolate* isolate = obj->GetIsolate();
1803   InstanceType type = obj->map()->instance_type();
1804   DCHECK(0 <= type && type <= LAST_TYPE);
1805   DCHECK(isolate->heap_histograms()[type].name() != NULL);
1806   isolate->heap_histograms()[type].increment_number(1);
1807   isolate->heap_histograms()[type].increment_bytes(obj->Size());
1808 
1809   if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1810     JSObject::cast(obj)
1811         ->IncrementSpillStatistics(isolate->js_spill_information());
1812   }
1813 
1814   return obj->Size();
1815 }
1816 
1817 
ReportHistogram(Isolate * isolate,bool print_spill)1818 static void ReportHistogram(Isolate* isolate, bool print_spill) {
1819   PrintF("\n  Object Histogram:\n");
1820   for (int i = 0; i <= LAST_TYPE; i++) {
1821     if (isolate->heap_histograms()[i].number() > 0) {
1822       PrintF("    %-34s%10d (%10d bytes)\n",
1823              isolate->heap_histograms()[i].name(),
1824              isolate->heap_histograms()[i].number(),
1825              isolate->heap_histograms()[i].bytes());
1826     }
1827   }
1828   PrintF("\n");
1829 
1830   // Summarize string types.
1831   int string_number = 0;
1832   int string_bytes = 0;
1833 #define INCREMENT(type, size, name, camel_name)               \
1834   string_number += isolate->heap_histograms()[type].number(); \
1835   string_bytes += isolate->heap_histograms()[type].bytes();
1836   STRING_TYPE_LIST(INCREMENT)
1837 #undef INCREMENT
1838   if (string_number > 0) {
1839     PrintF("    %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1840            string_bytes);
1841   }
1842 
1843   if (FLAG_collect_heap_spill_statistics && print_spill) {
1844     isolate->js_spill_information()->Print();
1845   }
1846 }
1847 #endif  // DEBUG
1848 
1849 
1850 // Support for statistics gathering for --heap-stats and --log-gc.
ClearHistograms()1851 void NewSpace::ClearHistograms() {
1852   for (int i = 0; i <= LAST_TYPE; i++) {
1853     allocated_histogram_[i].clear();
1854     promoted_histogram_[i].clear();
1855   }
1856 }
1857 
1858 
1859 // Because the copying collector does not touch garbage objects, we iterate
1860 // the new space before a collection to get a histogram of allocated objects.
1861 // This only happens when --log-gc flag is set.
CollectStatistics()1862 void NewSpace::CollectStatistics() {
1863   ClearHistograms();
1864   SemiSpaceIterator it(this);
1865   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
1866     RecordAllocation(obj);
1867 }
1868 
1869 
DoReportStatistics(Isolate * isolate,HistogramInfo * info,const char * description)1870 static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
1871                                const char* description) {
1872   LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1873   // Lump all the string types together.
1874   int string_number = 0;
1875   int string_bytes = 0;
1876 #define INCREMENT(type, size, name, camel_name) \
1877   string_number += info[type].number();         \
1878   string_bytes += info[type].bytes();
1879   STRING_TYPE_LIST(INCREMENT)
1880 #undef INCREMENT
1881   if (string_number > 0) {
1882     LOG(isolate,
1883         HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1884   }
1885 
1886   // Then do the other types.
1887   for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1888     if (info[i].number() > 0) {
1889       LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
1890                                        info[i].bytes()));
1891     }
1892   }
1893   LOG(isolate, HeapSampleEndEvent("NewSpace", description));
1894 }
1895 
1896 
ReportStatistics()1897 void NewSpace::ReportStatistics() {
1898 #ifdef DEBUG
1899   if (FLAG_heap_stats) {
1900     float pct = static_cast<float>(Available()) / TotalCapacity();
1901     PrintF("  capacity: %" V8_PTR_PREFIX
1902            "d"
1903            ", available: %" V8_PTR_PREFIX "d, %%%d\n",
1904            TotalCapacity(), Available(), static_cast<int>(pct * 100));
1905     PrintF("\n  Object Histogram:\n");
1906     for (int i = 0; i <= LAST_TYPE; i++) {
1907       if (allocated_histogram_[i].number() > 0) {
1908         PrintF("    %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
1909                allocated_histogram_[i].number(),
1910                allocated_histogram_[i].bytes());
1911       }
1912     }
1913     PrintF("\n");
1914   }
1915 #endif  // DEBUG
1916 
1917   if (FLAG_log_gc) {
1918     Isolate* isolate = heap()->isolate();
1919     DoReportStatistics(isolate, allocated_histogram_, "allocated");
1920     DoReportStatistics(isolate, promoted_histogram_, "promoted");
1921   }
1922 }
1923 
1924 
RecordAllocation(HeapObject * obj)1925 void NewSpace::RecordAllocation(HeapObject* obj) {
1926   InstanceType type = obj->map()->instance_type();
1927   DCHECK(0 <= type && type <= LAST_TYPE);
1928   allocated_histogram_[type].increment_number(1);
1929   allocated_histogram_[type].increment_bytes(obj->Size());
1930 }
1931 
1932 
RecordPromotion(HeapObject * obj)1933 void NewSpace::RecordPromotion(HeapObject* obj) {
1934   InstanceType type = obj->map()->instance_type();
1935   DCHECK(0 <= type && type <= LAST_TYPE);
1936   promoted_histogram_[type].increment_number(1);
1937   promoted_histogram_[type].increment_bytes(obj->Size());
1938 }
1939 
1940 
CommittedPhysicalMemory()1941 size_t NewSpace::CommittedPhysicalMemory() {
1942   if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
1943   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1944   size_t size = to_space_.CommittedPhysicalMemory();
1945   if (from_space_.is_committed()) {
1946     size += from_space_.CommittedPhysicalMemory();
1947   }
1948   return size;
1949 }
1950 
1951 
1952 // -----------------------------------------------------------------------------
1953 // Free lists for old object spaces implementation
1954 
set_size(Heap * heap,int size_in_bytes)1955 void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
1956   DCHECK(size_in_bytes > 0);
1957   DCHECK(IsAligned(size_in_bytes, kPointerSize));
1958 
1959   // We write a map and possibly size information to the block.  If the block
1960   // is big enough to be a FreeSpace with at least one extra word (the next
1961   // pointer), we set its map to be the free space map and its size to an
1962   // appropriate array length for the desired size from HeapObject::Size().
1963   // If the block is too small (eg, one or two words), to hold both a size
1964   // field and a next pointer, we give it a filler map that gives it the
1965   // correct size.
1966   if (size_in_bytes > FreeSpace::kHeaderSize) {
1967     // Can't use FreeSpace::cast because it fails during deserialization.
1968     // We have to set the size first with a release store before we store
1969     // the map because a concurrent store buffer scan on scavenge must not
1970     // observe a map with an invalid size.
1971     FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
1972     this_as_free_space->nobarrier_set_size(size_in_bytes);
1973     synchronized_set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
1974   } else if (size_in_bytes == kPointerSize) {
1975     set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
1976   } else if (size_in_bytes == 2 * kPointerSize) {
1977     set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
1978   } else {
1979     UNREACHABLE();
1980   }
1981   // We would like to DCHECK(Size() == size_in_bytes) but this would fail during
1982   // deserialization because the free space map is not done yet.
1983 }
1984 
1985 
next()1986 FreeListNode* FreeListNode::next() {
1987   DCHECK(IsFreeListNode(this));
1988   if (map() == GetHeap()->raw_unchecked_free_space_map()) {
1989     DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
1990     return reinterpret_cast<FreeListNode*>(
1991         Memory::Address_at(address() + kNextOffset));
1992   } else {
1993     return reinterpret_cast<FreeListNode*>(
1994         Memory::Address_at(address() + kPointerSize));
1995   }
1996 }
1997 
1998 
next_address()1999 FreeListNode** FreeListNode::next_address() {
2000   DCHECK(IsFreeListNode(this));
2001   if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2002     DCHECK(Size() >= kNextOffset + kPointerSize);
2003     return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
2004   } else {
2005     return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
2006   }
2007 }
2008 
2009 
set_next(FreeListNode * next)2010 void FreeListNode::set_next(FreeListNode* next) {
2011   DCHECK(IsFreeListNode(this));
2012   // While we are booting the VM the free space map will actually be null.  So
2013   // we have to make sure that we don't try to use it for anything at that
2014   // stage.
2015   if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2016     DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
2017     base::NoBarrier_Store(
2018         reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
2019         reinterpret_cast<base::AtomicWord>(next));
2020   } else {
2021     base::NoBarrier_Store(
2022         reinterpret_cast<base::AtomicWord*>(address() + kPointerSize),
2023         reinterpret_cast<base::AtomicWord>(next));
2024   }
2025 }
2026 
2027 
Concatenate(FreeListCategory * category)2028 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
2029   intptr_t free_bytes = 0;
2030   if (category->top() != NULL) {
2031     // This is safe (not going to deadlock) since Concatenate operations
2032     // are never performed on the same free lists at the same time in
2033     // reverse order.
2034     base::LockGuard<base::Mutex> target_lock_guard(mutex());
2035     base::LockGuard<base::Mutex> source_lock_guard(category->mutex());
2036     DCHECK(category->end_ != NULL);
2037     free_bytes = category->available();
2038     if (end_ == NULL) {
2039       end_ = category->end();
2040     } else {
2041       category->end()->set_next(top());
2042     }
2043     set_top(category->top());
2044     base::NoBarrier_Store(&top_, category->top_);
2045     available_ += category->available();
2046     category->Reset();
2047   }
2048   return free_bytes;
2049 }
2050 
2051 
Reset()2052 void FreeListCategory::Reset() {
2053   set_top(NULL);
2054   set_end(NULL);
2055   set_available(0);
2056 }
2057 
2058 
EvictFreeListItemsInList(Page * p)2059 intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
2060   int sum = 0;
2061   FreeListNode* t = top();
2062   FreeListNode** n = &t;
2063   while (*n != NULL) {
2064     if (Page::FromAddress((*n)->address()) == p) {
2065       FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
2066       sum += free_space->Size();
2067       *n = (*n)->next();
2068     } else {
2069       n = (*n)->next_address();
2070     }
2071   }
2072   set_top(t);
2073   if (top() == NULL) {
2074     set_end(NULL);
2075   }
2076   available_ -= sum;
2077   return sum;
2078 }
2079 
2080 
ContainsPageFreeListItemsInList(Page * p)2081 bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
2082   FreeListNode* node = top();
2083   while (node != NULL) {
2084     if (Page::FromAddress(node->address()) == p) return true;
2085     node = node->next();
2086   }
2087   return false;
2088 }
2089 
2090 
PickNodeFromList(int * node_size)2091 FreeListNode* FreeListCategory::PickNodeFromList(int* node_size) {
2092   FreeListNode* node = top();
2093 
2094   if (node == NULL) return NULL;
2095 
2096   while (node != NULL &&
2097          Page::FromAddress(node->address())->IsEvacuationCandidate()) {
2098     available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
2099     node = node->next();
2100   }
2101 
2102   if (node != NULL) {
2103     set_top(node->next());
2104     *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
2105     available_ -= *node_size;
2106   } else {
2107     set_top(NULL);
2108   }
2109 
2110   if (top() == NULL) {
2111     set_end(NULL);
2112   }
2113 
2114   return node;
2115 }
2116 
2117 
PickNodeFromList(int size_in_bytes,int * node_size)2118 FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
2119                                                  int* node_size) {
2120   FreeListNode* node = PickNodeFromList(node_size);
2121   if (node != NULL && *node_size < size_in_bytes) {
2122     Free(node, *node_size);
2123     *node_size = 0;
2124     return NULL;
2125   }
2126   return node;
2127 }
2128 
2129 
Free(FreeListNode * node,int size_in_bytes)2130 void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
2131   node->set_next(top());
2132   set_top(node);
2133   if (end_ == NULL) {
2134     end_ = node;
2135   }
2136   available_ += size_in_bytes;
2137 }
2138 
2139 
RepairFreeList(Heap * heap)2140 void FreeListCategory::RepairFreeList(Heap* heap) {
2141   FreeListNode* n = top();
2142   while (n != NULL) {
2143     Map** map_location = reinterpret_cast<Map**>(n->address());
2144     if (*map_location == NULL) {
2145       *map_location = heap->free_space_map();
2146     } else {
2147       DCHECK(*map_location == heap->free_space_map());
2148     }
2149     n = n->next();
2150   }
2151 }
2152 
2153 
FreeList(PagedSpace * owner)2154 FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) {
2155   Reset();
2156 }
2157 
2158 
Concatenate(FreeList * free_list)2159 intptr_t FreeList::Concatenate(FreeList* free_list) {
2160   intptr_t free_bytes = 0;
2161   free_bytes += small_list_.Concatenate(free_list->small_list());
2162   free_bytes += medium_list_.Concatenate(free_list->medium_list());
2163   free_bytes += large_list_.Concatenate(free_list->large_list());
2164   free_bytes += huge_list_.Concatenate(free_list->huge_list());
2165   return free_bytes;
2166 }
2167 
2168 
Reset()2169 void FreeList::Reset() {
2170   small_list_.Reset();
2171   medium_list_.Reset();
2172   large_list_.Reset();
2173   huge_list_.Reset();
2174 }
2175 
2176 
Free(Address start,int size_in_bytes)2177 int FreeList::Free(Address start, int size_in_bytes) {
2178   if (size_in_bytes == 0) return 0;
2179 
2180   FreeListNode* node = FreeListNode::FromAddress(start);
2181   node->set_size(heap_, size_in_bytes);
2182   Page* page = Page::FromAddress(start);
2183 
2184   // Early return to drop too-small blocks on the floor.
2185   if (size_in_bytes < kSmallListMin) {
2186     page->add_non_available_small_blocks(size_in_bytes);
2187     return size_in_bytes;
2188   }
2189 
2190   // Insert other blocks at the head of a free list of the appropriate
2191   // magnitude.
2192   if (size_in_bytes <= kSmallListMax) {
2193     small_list_.Free(node, size_in_bytes);
2194     page->add_available_in_small_free_list(size_in_bytes);
2195   } else if (size_in_bytes <= kMediumListMax) {
2196     medium_list_.Free(node, size_in_bytes);
2197     page->add_available_in_medium_free_list(size_in_bytes);
2198   } else if (size_in_bytes <= kLargeListMax) {
2199     large_list_.Free(node, size_in_bytes);
2200     page->add_available_in_large_free_list(size_in_bytes);
2201   } else {
2202     huge_list_.Free(node, size_in_bytes);
2203     page->add_available_in_huge_free_list(size_in_bytes);
2204   }
2205 
2206   DCHECK(IsVeryLong() || available() == SumFreeLists());
2207   return 0;
2208 }
2209 
2210 
FindNodeFor(int size_in_bytes,int * node_size)2211 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2212   FreeListNode* node = NULL;
2213   Page* page = NULL;
2214 
2215   if (size_in_bytes <= kSmallAllocationMax) {
2216     node = small_list_.PickNodeFromList(node_size);
2217     if (node != NULL) {
2218       DCHECK(size_in_bytes <= *node_size);
2219       page = Page::FromAddress(node->address());
2220       page->add_available_in_small_free_list(-(*node_size));
2221       DCHECK(IsVeryLong() || available() == SumFreeLists());
2222       return node;
2223     }
2224   }
2225 
2226   if (size_in_bytes <= kMediumAllocationMax) {
2227     node = medium_list_.PickNodeFromList(node_size);
2228     if (node != NULL) {
2229       DCHECK(size_in_bytes <= *node_size);
2230       page = Page::FromAddress(node->address());
2231       page->add_available_in_medium_free_list(-(*node_size));
2232       DCHECK(IsVeryLong() || available() == SumFreeLists());
2233       return node;
2234     }
2235   }
2236 
2237   if (size_in_bytes <= kLargeAllocationMax) {
2238     node = large_list_.PickNodeFromList(node_size);
2239     if (node != NULL) {
2240       DCHECK(size_in_bytes <= *node_size);
2241       page = Page::FromAddress(node->address());
2242       page->add_available_in_large_free_list(-(*node_size));
2243       DCHECK(IsVeryLong() || available() == SumFreeLists());
2244       return node;
2245     }
2246   }
2247 
2248   int huge_list_available = huge_list_.available();
2249   FreeListNode* top_node = huge_list_.top();
2250   for (FreeListNode** cur = &top_node; *cur != NULL;
2251        cur = (*cur)->next_address()) {
2252     FreeListNode* cur_node = *cur;
2253     while (cur_node != NULL &&
2254            Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
2255       int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
2256       huge_list_available -= size;
2257       page = Page::FromAddress(cur_node->address());
2258       page->add_available_in_huge_free_list(-size);
2259       cur_node = cur_node->next();
2260     }
2261 
2262     *cur = cur_node;
2263     if (cur_node == NULL) {
2264       huge_list_.set_end(NULL);
2265       break;
2266     }
2267 
2268     DCHECK((*cur)->map() == heap_->raw_unchecked_free_space_map());
2269     FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
2270     int size = cur_as_free_space->Size();
2271     if (size >= size_in_bytes) {
2272       // Large enough node found.  Unlink it from the list.
2273       node = *cur;
2274       *cur = node->next();
2275       *node_size = size;
2276       huge_list_available -= size;
2277       page = Page::FromAddress(node->address());
2278       page->add_available_in_huge_free_list(-size);
2279       break;
2280     }
2281   }
2282 
2283   huge_list_.set_top(top_node);
2284   if (huge_list_.top() == NULL) {
2285     huge_list_.set_end(NULL);
2286   }
2287   huge_list_.set_available(huge_list_available);
2288 
2289   if (node != NULL) {
2290     DCHECK(IsVeryLong() || available() == SumFreeLists());
2291     return node;
2292   }
2293 
2294   if (size_in_bytes <= kSmallListMax) {
2295     node = small_list_.PickNodeFromList(size_in_bytes, node_size);
2296     if (node != NULL) {
2297       DCHECK(size_in_bytes <= *node_size);
2298       page = Page::FromAddress(node->address());
2299       page->add_available_in_small_free_list(-(*node_size));
2300     }
2301   } else if (size_in_bytes <= kMediumListMax) {
2302     node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
2303     if (node != NULL) {
2304       DCHECK(size_in_bytes <= *node_size);
2305       page = Page::FromAddress(node->address());
2306       page->add_available_in_medium_free_list(-(*node_size));
2307     }
2308   } else if (size_in_bytes <= kLargeListMax) {
2309     node = large_list_.PickNodeFromList(size_in_bytes, node_size);
2310     if (node != NULL) {
2311       DCHECK(size_in_bytes <= *node_size);
2312       page = Page::FromAddress(node->address());
2313       page->add_available_in_large_free_list(-(*node_size));
2314     }
2315   }
2316 
2317   DCHECK(IsVeryLong() || available() == SumFreeLists());
2318   return node;
2319 }
2320 
2321 
2322 // Allocation on the old space free list.  If it succeeds then a new linear
2323 // allocation space has been set up with the top and limit of the space.  If
2324 // the allocation fails then NULL is returned, and the caller can perform a GC
2325 // or allocate a new page before retrying.
Allocate(int size_in_bytes)2326 HeapObject* FreeList::Allocate(int size_in_bytes) {
2327   DCHECK(0 < size_in_bytes);
2328   DCHECK(size_in_bytes <= kMaxBlockSize);
2329   DCHECK(IsAligned(size_in_bytes, kPointerSize));
2330   // Don't free list allocate if there is linear space available.
2331   DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
2332 
2333   int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2334   // Mark the old linear allocation area with a free space map so it can be
2335   // skipped when scanning the heap.  This also puts it back in the free list
2336   // if it is big enough.
2337   owner_->Free(owner_->top(), old_linear_size);
2338 
2339   owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
2340                                                       old_linear_size);
2341 
2342   int new_node_size = 0;
2343   FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2344   if (new_node == NULL) {
2345     owner_->SetTopAndLimit(NULL, NULL);
2346     return NULL;
2347   }
2348 
2349   int bytes_left = new_node_size - size_in_bytes;
2350   DCHECK(bytes_left >= 0);
2351 
2352 #ifdef DEBUG
2353   for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2354     reinterpret_cast<Object**>(new_node->address())[i] =
2355         Smi::FromInt(kCodeZapValue);
2356   }
2357 #endif
2358 
2359   // The old-space-step might have finished sweeping and restarted marking.
2360   // Verify that it did not turn the page of the new node into an evacuation
2361   // candidate.
2362   DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2363 
2364   const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2365 
2366   // Memory in the linear allocation area is counted as allocated.  We may free
2367   // a little of this again immediately - see below.
2368   owner_->Allocate(new_node_size);
2369 
2370   if (owner_->heap()->inline_allocation_disabled()) {
2371     // Keep the linear allocation area empty if requested to do so, just
2372     // return area back to the free list instead.
2373     owner_->Free(new_node->address() + size_in_bytes, bytes_left);
2374     DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
2375   } else if (bytes_left > kThreshold &&
2376              owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2377              FLAG_incremental_marking_steps) {
2378     int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2379     // We don't want to give too large linear areas to the allocator while
2380     // incremental marking is going on, because we won't check again whether
2381     // we want to do another increment until the linear area is used up.
2382     owner_->Free(new_node->address() + size_in_bytes + linear_size,
2383                  new_node_size - size_in_bytes - linear_size);
2384     owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2385                            new_node->address() + size_in_bytes + linear_size);
2386   } else if (bytes_left > 0) {
2387     // Normally we give the rest of the node to the allocator as its new
2388     // linear allocation area.
2389     owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2390                            new_node->address() + new_node_size);
2391   } else {
2392     // TODO(gc) Try not freeing linear allocation region when bytes_left
2393     // are zero.
2394     owner_->SetTopAndLimit(NULL, NULL);
2395   }
2396 
2397   return new_node;
2398 }
2399 
2400 
EvictFreeListItems(Page * p)2401 intptr_t FreeList::EvictFreeListItems(Page* p) {
2402   intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
2403   p->set_available_in_huge_free_list(0);
2404 
2405   if (sum < p->area_size()) {
2406     sum += small_list_.EvictFreeListItemsInList(p) +
2407            medium_list_.EvictFreeListItemsInList(p) +
2408            large_list_.EvictFreeListItemsInList(p);
2409     p->set_available_in_small_free_list(0);
2410     p->set_available_in_medium_free_list(0);
2411     p->set_available_in_large_free_list(0);
2412   }
2413 
2414   return sum;
2415 }
2416 
2417 
ContainsPageFreeListItems(Page * p)2418 bool FreeList::ContainsPageFreeListItems(Page* p) {
2419   return huge_list_.EvictFreeListItemsInList(p) ||
2420          small_list_.EvictFreeListItemsInList(p) ||
2421          medium_list_.EvictFreeListItemsInList(p) ||
2422          large_list_.EvictFreeListItemsInList(p);
2423 }
2424 
2425 
RepairLists(Heap * heap)2426 void FreeList::RepairLists(Heap* heap) {
2427   small_list_.RepairFreeList(heap);
2428   medium_list_.RepairFreeList(heap);
2429   large_list_.RepairFreeList(heap);
2430   huge_list_.RepairFreeList(heap);
2431 }
2432 
2433 
2434 #ifdef DEBUG
SumFreeList()2435 intptr_t FreeListCategory::SumFreeList() {
2436   intptr_t sum = 0;
2437   FreeListNode* cur = top();
2438   while (cur != NULL) {
2439     DCHECK(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
2440     FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
2441     sum += cur_as_free_space->nobarrier_size();
2442     cur = cur->next();
2443   }
2444   return sum;
2445 }
2446 
2447 
2448 static const int kVeryLongFreeList = 500;
2449 
2450 
FreeListLength()2451 int FreeListCategory::FreeListLength() {
2452   int length = 0;
2453   FreeListNode* cur = top();
2454   while (cur != NULL) {
2455     length++;
2456     cur = cur->next();
2457     if (length == kVeryLongFreeList) return length;
2458   }
2459   return length;
2460 }
2461 
2462 
IsVeryLong()2463 bool FreeList::IsVeryLong() {
2464   if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
2465   if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
2466   if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
2467   if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
2468   return false;
2469 }
2470 
2471 
2472 // This can take a very long time because it is linear in the number of entries
2473 // on the free list, so it should not be called if FreeListLength returns
2474 // kVeryLongFreeList.
SumFreeLists()2475 intptr_t FreeList::SumFreeLists() {
2476   intptr_t sum = small_list_.SumFreeList();
2477   sum += medium_list_.SumFreeList();
2478   sum += large_list_.SumFreeList();
2479   sum += huge_list_.SumFreeList();
2480   return sum;
2481 }
2482 #endif
2483 
2484 
2485 // -----------------------------------------------------------------------------
2486 // OldSpace implementation
2487 
PrepareForMarkCompact()2488 void PagedSpace::PrepareForMarkCompact() {
2489   // We don't have a linear allocation area while sweeping.  It will be restored
2490   // on the first allocation after the sweep.
2491   EmptyAllocationInfo();
2492 
2493   // This counter will be increased for pages which will be swept by the
2494   // sweeper threads.
2495   unswept_free_bytes_ = 0;
2496 
2497   // Clear the free list before a full GC---it will be rebuilt afterward.
2498   free_list_.Reset();
2499 }
2500 
2501 
SizeOfObjects()2502 intptr_t PagedSpace::SizeOfObjects() {
2503   DCHECK(heap()->mark_compact_collector()->sweeping_in_progress() ||
2504          (unswept_free_bytes_ == 0));
2505   return Size() - unswept_free_bytes_ - (limit() - top());
2506 }
2507 
2508 
2509 // After we have booted, we have created a map which represents free space
2510 // on the heap.  If there was already a free list then the elements on it
2511 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
2512 // fix them.
RepairFreeListsAfterBoot()2513 void PagedSpace::RepairFreeListsAfterBoot() { free_list_.RepairLists(heap()); }
2514 
2515 
EvictEvacuationCandidatesFromFreeLists()2516 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2517   if (allocation_info_.top() >= allocation_info_.limit()) return;
2518 
2519   if (Page::FromAllocationTop(allocation_info_.top())
2520           ->IsEvacuationCandidate()) {
2521     // Create filler object to keep page iterable if it was iterable.
2522     int remaining =
2523         static_cast<int>(allocation_info_.limit() - allocation_info_.top());
2524     heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
2525 
2526     allocation_info_.set_top(NULL);
2527     allocation_info_.set_limit(NULL);
2528   }
2529 }
2530 
2531 
WaitForSweeperThreadsAndRetryAllocation(int size_in_bytes)2532 HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
2533     int size_in_bytes) {
2534   MarkCompactCollector* collector = heap()->mark_compact_collector();
2535   if (collector->sweeping_in_progress()) {
2536     // Wait for the sweeper threads here and complete the sweeping phase.
2537     collector->EnsureSweepingCompleted();
2538 
2539     // After waiting for the sweeper threads, there may be new free-list
2540     // entries.
2541     return free_list_.Allocate(size_in_bytes);
2542   }
2543   return NULL;
2544 }
2545 
2546 
SlowAllocateRaw(int size_in_bytes)2547 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2548   // Allocation in this space has failed.
2549 
2550   MarkCompactCollector* collector = heap()->mark_compact_collector();
2551   // Sweeping is still in progress.
2552   if (collector->sweeping_in_progress()) {
2553     // First try to refill the free-list, concurrent sweeper threads
2554     // may have freed some objects in the meantime.
2555     collector->RefillFreeList(this);
2556 
2557     // Retry the free list allocation.
2558     HeapObject* object = free_list_.Allocate(size_in_bytes);
2559     if (object != NULL) return object;
2560 
2561     // If sweeping is still in progress try to sweep pages on the main thread.
2562     int free_chunk = collector->SweepInParallel(this, size_in_bytes);
2563     collector->RefillFreeList(this);
2564     if (free_chunk >= size_in_bytes) {
2565       HeapObject* object = free_list_.Allocate(size_in_bytes);
2566       // We should be able to allocate an object here since we just freed that
2567       // much memory.
2568       DCHECK(object != NULL);
2569       if (object != NULL) return object;
2570     }
2571   }
2572 
2573   // Free list allocation failed and there is no next page.  Fail if we have
2574   // hit the old generation size limit that should cause a garbage
2575   // collection.
2576   if (!heap()->always_allocate() &&
2577       heap()->OldGenerationAllocationLimitReached()) {
2578     // If sweeper threads are active, wait for them at that point and steal
2579     // elements form their free-lists.
2580     HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
2581     if (object != NULL) return object;
2582   }
2583 
2584   // Try to expand the space and allocate in the new next page.
2585   if (Expand()) {
2586     DCHECK(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
2587     return free_list_.Allocate(size_in_bytes);
2588   }
2589 
2590   // If sweeper threads are active, wait for them at that point and steal
2591   // elements form their free-lists. Allocation may still fail their which
2592   // would indicate that there is not enough memory for the given allocation.
2593   return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
2594 }
2595 
2596 
2597 #ifdef DEBUG
ReportCodeStatistics(Isolate * isolate)2598 void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
2599   CommentStatistic* comments_statistics =
2600       isolate->paged_space_comments_statistics();
2601   ReportCodeKindStatistics(isolate->code_kind_statistics());
2602   PrintF(
2603       "Code comment statistics (\"   [ comment-txt   :    size/   "
2604       "count  (average)\"):\n");
2605   for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2606     const CommentStatistic& cs = comments_statistics[i];
2607     if (cs.size > 0) {
2608       PrintF("   %-30s: %10d/%6d     (%d)\n", cs.comment, cs.size, cs.count,
2609              cs.size / cs.count);
2610     }
2611   }
2612   PrintF("\n");
2613 }
2614 
2615 
ResetCodeStatistics(Isolate * isolate)2616 void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
2617   CommentStatistic* comments_statistics =
2618       isolate->paged_space_comments_statistics();
2619   ClearCodeKindStatistics(isolate->code_kind_statistics());
2620   for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2621     comments_statistics[i].Clear();
2622   }
2623   comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2624   comments_statistics[CommentStatistic::kMaxComments].size = 0;
2625   comments_statistics[CommentStatistic::kMaxComments].count = 0;
2626 }
2627 
2628 
2629 // Adds comment to 'comment_statistics' table. Performance OK as long as
2630 // 'kMaxComments' is small
EnterComment(Isolate * isolate,const char * comment,int delta)2631 static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2632   CommentStatistic* comments_statistics =
2633       isolate->paged_space_comments_statistics();
2634   // Do not count empty comments
2635   if (delta <= 0) return;
2636   CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2637   // Search for a free or matching entry in 'comments_statistics': 'cs'
2638   // points to result.
2639   for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2640     if (comments_statistics[i].comment == NULL) {
2641       cs = &comments_statistics[i];
2642       cs->comment = comment;
2643       break;
2644     } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2645       cs = &comments_statistics[i];
2646       break;
2647     }
2648   }
2649   // Update entry for 'comment'
2650   cs->size += delta;
2651   cs->count += 1;
2652 }
2653 
2654 
2655 // Call for each nested comment start (start marked with '[ xxx', end marked
2656 // with ']'.  RelocIterator 'it' must point to a comment reloc info.
CollectCommentStatistics(Isolate * isolate,RelocIterator * it)2657 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2658   DCHECK(!it->done());
2659   DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT);
2660   const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2661   if (tmp[0] != '[') {
2662     // Not a nested comment; skip
2663     return;
2664   }
2665 
2666   // Search for end of nested comment or a new nested comment
2667   const char* const comment_txt =
2668       reinterpret_cast<const char*>(it->rinfo()->data());
2669   const byte* prev_pc = it->rinfo()->pc();
2670   int flat_delta = 0;
2671   it->next();
2672   while (true) {
2673     // All nested comments must be terminated properly, and therefore exit
2674     // from loop.
2675     DCHECK(!it->done());
2676     if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2677       const char* const txt =
2678           reinterpret_cast<const char*>(it->rinfo()->data());
2679       flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2680       if (txt[0] == ']') break;  // End of nested  comment
2681       // A new comment
2682       CollectCommentStatistics(isolate, it);
2683       // Skip code that was covered with previous comment
2684       prev_pc = it->rinfo()->pc();
2685     }
2686     it->next();
2687   }
2688   EnterComment(isolate, comment_txt, flat_delta);
2689 }
2690 
2691 
2692 // Collects code size statistics:
2693 // - by code kind
2694 // - by code comment
CollectCodeStatistics()2695 void PagedSpace::CollectCodeStatistics() {
2696   Isolate* isolate = heap()->isolate();
2697   HeapObjectIterator obj_it(this);
2698   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2699     if (obj->IsCode()) {
2700       Code* code = Code::cast(obj);
2701       isolate->code_kind_statistics()[code->kind()] += code->Size();
2702       RelocIterator it(code);
2703       int delta = 0;
2704       const byte* prev_pc = code->instruction_start();
2705       while (!it.done()) {
2706         if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2707           delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2708           CollectCommentStatistics(isolate, &it);
2709           prev_pc = it.rinfo()->pc();
2710         }
2711         it.next();
2712       }
2713 
2714       DCHECK(code->instruction_start() <= prev_pc &&
2715              prev_pc <= code->instruction_end());
2716       delta += static_cast<int>(code->instruction_end() - prev_pc);
2717       EnterComment(isolate, "NoComment", delta);
2718     }
2719   }
2720 }
2721 
2722 
ReportStatistics()2723 void PagedSpace::ReportStatistics() {
2724   int pct = static_cast<int>(Available() * 100 / Capacity());
2725   PrintF("  capacity: %" V8_PTR_PREFIX
2726          "d"
2727          ", waste: %" V8_PTR_PREFIX
2728          "d"
2729          ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2730          Capacity(), Waste(), Available(), pct);
2731 
2732   if (heap()->mark_compact_collector()->sweeping_in_progress()) {
2733     heap()->mark_compact_collector()->EnsureSweepingCompleted();
2734   }
2735   ClearHistograms(heap()->isolate());
2736   HeapObjectIterator obj_it(this);
2737   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2738     CollectHistogramInfo(obj);
2739   ReportHistogram(heap()->isolate(), true);
2740 }
2741 #endif
2742 
2743 
2744 // -----------------------------------------------------------------------------
2745 // MapSpace implementation
2746 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2747 // there is at least one non-inlined virtual function. I would prefer to hide
2748 // the VerifyObject definition behind VERIFY_HEAP.
2749 
VerifyObject(HeapObject * object)2750 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
2751 
2752 
2753 // -----------------------------------------------------------------------------
2754 // CellSpace and PropertyCellSpace implementation
2755 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2756 // there is at least one non-inlined virtual function. I would prefer to hide
2757 // the VerifyObject definition behind VERIFY_HEAP.
2758 
VerifyObject(HeapObject * object)2759 void CellSpace::VerifyObject(HeapObject* object) { CHECK(object->IsCell()); }
2760 
2761 
VerifyObject(HeapObject * object)2762 void PropertyCellSpace::VerifyObject(HeapObject* object) {
2763   CHECK(object->IsPropertyCell());
2764 }
2765 
2766 
2767 // -----------------------------------------------------------------------------
2768 // LargeObjectIterator
2769 
LargeObjectIterator(LargeObjectSpace * space)2770 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2771   current_ = space->first_page_;
2772   size_func_ = NULL;
2773 }
2774 
2775 
LargeObjectIterator(LargeObjectSpace * space,HeapObjectCallback size_func)2776 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2777                                          HeapObjectCallback size_func) {
2778   current_ = space->first_page_;
2779   size_func_ = size_func;
2780 }
2781 
2782 
Next()2783 HeapObject* LargeObjectIterator::Next() {
2784   if (current_ == NULL) return NULL;
2785 
2786   HeapObject* object = current_->GetObject();
2787   current_ = current_->next_page();
2788   return object;
2789 }
2790 
2791 
2792 // -----------------------------------------------------------------------------
2793 // LargeObjectSpace
ComparePointers(void * key1,void * key2)2794 static bool ComparePointers(void* key1, void* key2) { return key1 == key2; }
2795 
2796 
LargeObjectSpace(Heap * heap,intptr_t max_capacity,AllocationSpace id)2797 LargeObjectSpace::LargeObjectSpace(Heap* heap, intptr_t max_capacity,
2798                                    AllocationSpace id)
2799     : Space(heap, id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
2800       max_capacity_(max_capacity),
2801       first_page_(NULL),
2802       size_(0),
2803       page_count_(0),
2804       objects_size_(0),
2805       chunk_map_(ComparePointers, 1024) {}
2806 
2807 
SetUp()2808 bool LargeObjectSpace::SetUp() {
2809   first_page_ = NULL;
2810   size_ = 0;
2811   maximum_committed_ = 0;
2812   page_count_ = 0;
2813   objects_size_ = 0;
2814   chunk_map_.Clear();
2815   return true;
2816 }
2817 
2818 
TearDown()2819 void LargeObjectSpace::TearDown() {
2820   while (first_page_ != NULL) {
2821     LargePage* page = first_page_;
2822     first_page_ = first_page_->next_page();
2823     LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2824 
2825     ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2826     heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2827         space, kAllocationActionFree, page->size());
2828     heap()->isolate()->memory_allocator()->Free(page);
2829   }
2830   SetUp();
2831 }
2832 
2833 
AllocateRaw(int object_size,Executability executable)2834 AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
2835                                                Executability executable) {
2836   // Check if we want to force a GC before growing the old space further.
2837   // If so, fail the allocation.
2838   if (!heap()->always_allocate() &&
2839       heap()->OldGenerationAllocationLimitReached()) {
2840     return AllocationResult::Retry(identity());
2841   }
2842 
2843   if (Size() + object_size > max_capacity_) {
2844     return AllocationResult::Retry(identity());
2845   }
2846 
2847   LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
2848       object_size, this, executable);
2849   if (page == NULL) return AllocationResult::Retry(identity());
2850   DCHECK(page->area_size() >= object_size);
2851 
2852   size_ += static_cast<int>(page->size());
2853   objects_size_ += object_size;
2854   page_count_++;
2855   page->set_next_page(first_page_);
2856   first_page_ = page;
2857 
2858   if (size_ > maximum_committed_) {
2859     maximum_committed_ = size_;
2860   }
2861 
2862   // Register all MemoryChunk::kAlignment-aligned chunks covered by
2863   // this large page in the chunk map.
2864   uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2865   uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2866   for (uintptr_t key = base; key <= limit; key++) {
2867     HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2868                                               static_cast<uint32_t>(key), true);
2869     DCHECK(entry != NULL);
2870     entry->value = page;
2871   }
2872 
2873   HeapObject* object = page->GetObject();
2874 
2875   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
2876 
2877   if (Heap::ShouldZapGarbage()) {
2878     // Make the object consistent so the heap can be verified in OldSpaceStep.
2879     // We only need to do this in debug builds or if verify_heap is on.
2880     reinterpret_cast<Object**>(object->address())[0] =
2881         heap()->fixed_array_map();
2882     reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2883   }
2884 
2885   heap()->incremental_marking()->OldSpaceStep(object_size);
2886   return object;
2887 }
2888 
2889 
CommittedPhysicalMemory()2890 size_t LargeObjectSpace::CommittedPhysicalMemory() {
2891   if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2892   size_t size = 0;
2893   LargePage* current = first_page_;
2894   while (current != NULL) {
2895     size += current->CommittedPhysicalMemory();
2896     current = current->next_page();
2897   }
2898   return size;
2899 }
2900 
2901 
2902 // GC support
FindObject(Address a)2903 Object* LargeObjectSpace::FindObject(Address a) {
2904   LargePage* page = FindPage(a);
2905   if (page != NULL) {
2906     return page->GetObject();
2907   }
2908   return Smi::FromInt(0);  // Signaling not found.
2909 }
2910 
2911 
FindPage(Address a)2912 LargePage* LargeObjectSpace::FindPage(Address a) {
2913   uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
2914   HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2915                                         static_cast<uint32_t>(key), false);
2916   if (e != NULL) {
2917     DCHECK(e->value != NULL);
2918     LargePage* page = reinterpret_cast<LargePage*>(e->value);
2919     DCHECK(page->is_valid());
2920     if (page->Contains(a)) {
2921       return page;
2922     }
2923   }
2924   return NULL;
2925 }
2926 
2927 
FreeUnmarkedObjects()2928 void LargeObjectSpace::FreeUnmarkedObjects() {
2929   LargePage* previous = NULL;
2930   LargePage* current = first_page_;
2931   while (current != NULL) {
2932     HeapObject* object = current->GetObject();
2933     // Can this large page contain pointers to non-trivial objects.  No other
2934     // pointer object is this big.
2935     bool is_pointer_object = object->IsFixedArray();
2936     MarkBit mark_bit = Marking::MarkBitFrom(object);
2937     if (mark_bit.Get()) {
2938       mark_bit.Clear();
2939       Page::FromAddress(object->address())->ResetProgressBar();
2940       Page::FromAddress(object->address())->ResetLiveBytes();
2941       previous = current;
2942       current = current->next_page();
2943     } else {
2944       LargePage* page = current;
2945       // Cut the chunk out from the chunk list.
2946       current = current->next_page();
2947       if (previous == NULL) {
2948         first_page_ = current;
2949       } else {
2950         previous->set_next_page(current);
2951       }
2952 
2953       // Free the chunk.
2954       heap()->mark_compact_collector()->ReportDeleteIfNeeded(object,
2955                                                              heap()->isolate());
2956       size_ -= static_cast<int>(page->size());
2957       objects_size_ -= object->Size();
2958       page_count_--;
2959 
2960       // Remove entries belonging to this page.
2961       // Use variable alignment to help pass length check (<= 80 characters)
2962       // of single line in tools/presubmit.py.
2963       const intptr_t alignment = MemoryChunk::kAlignment;
2964       uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
2965       uintptr_t limit = base + (page->size() - 1) / alignment;
2966       for (uintptr_t key = base; key <= limit; key++) {
2967         chunk_map_.Remove(reinterpret_cast<void*>(key),
2968                           static_cast<uint32_t>(key));
2969       }
2970 
2971       if (is_pointer_object) {
2972         heap()->QueueMemoryChunkForFree(page);
2973       } else {
2974         heap()->isolate()->memory_allocator()->Free(page);
2975       }
2976     }
2977   }
2978   heap()->FreeQueuedChunks();
2979 }
2980 
2981 
Contains(HeapObject * object)2982 bool LargeObjectSpace::Contains(HeapObject* object) {
2983   Address address = object->address();
2984   MemoryChunk* chunk = MemoryChunk::FromAddress(address);
2985 
2986   bool owned = (chunk->owner() == this);
2987 
2988   SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
2989 
2990   return owned;
2991 }
2992 
2993 
2994 #ifdef VERIFY_HEAP
2995 // We do not assume that the large object iterator works, because it depends
2996 // on the invariants we are checking during verification.
Verify()2997 void LargeObjectSpace::Verify() {
2998   for (LargePage* chunk = first_page_; chunk != NULL;
2999        chunk = chunk->next_page()) {
3000     // Each chunk contains an object that starts at the large object page's
3001     // object area start.
3002     HeapObject* object = chunk->GetObject();
3003     Page* page = Page::FromAddress(object->address());
3004     CHECK(object->address() == page->area_start());
3005 
3006     // The first word should be a map, and we expect all map pointers to be
3007     // in map space.
3008     Map* map = object->map();
3009     CHECK(map->IsMap());
3010     CHECK(heap()->map_space()->Contains(map));
3011 
3012     // We have only code, sequential strings, external strings
3013     // (sequential strings that have been morphed into external
3014     // strings), fixed arrays, byte arrays, and constant pool arrays in the
3015     // large object space.
3016     CHECK(object->IsCode() || object->IsSeqString() ||
3017           object->IsExternalString() || object->IsFixedArray() ||
3018           object->IsFixedDoubleArray() || object->IsByteArray() ||
3019           object->IsConstantPoolArray());
3020 
3021     // The object itself should look OK.
3022     object->ObjectVerify();
3023 
3024     // Byte arrays and strings don't have interior pointers.
3025     if (object->IsCode()) {
3026       VerifyPointersVisitor code_visitor;
3027       object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
3028     } else if (object->IsFixedArray()) {
3029       FixedArray* array = FixedArray::cast(object);
3030       for (int j = 0; j < array->length(); j++) {
3031         Object* element = array->get(j);
3032         if (element->IsHeapObject()) {
3033           HeapObject* element_object = HeapObject::cast(element);
3034           CHECK(heap()->Contains(element_object));
3035           CHECK(element_object->map()->IsMap());
3036         }
3037       }
3038     }
3039   }
3040 }
3041 #endif
3042 
3043 
3044 #ifdef DEBUG
Print()3045 void LargeObjectSpace::Print() {
3046   OFStream os(stdout);
3047   LargeObjectIterator it(this);
3048   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3049     obj->Print(os);
3050   }
3051 }
3052 
3053 
ReportStatistics()3054 void LargeObjectSpace::ReportStatistics() {
3055   PrintF("  size: %" V8_PTR_PREFIX "d\n", size_);
3056   int num_objects = 0;
3057   ClearHistograms(heap()->isolate());
3058   LargeObjectIterator it(this);
3059   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3060     num_objects++;
3061     CollectHistogramInfo(obj);
3062   }
3063 
3064   PrintF(
3065       "  number of objects %d, "
3066       "size of objects %" V8_PTR_PREFIX "d\n",
3067       num_objects, objects_size_);
3068   if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3069 }
3070 
3071 
CollectCodeStatistics()3072 void LargeObjectSpace::CollectCodeStatistics() {
3073   Isolate* isolate = heap()->isolate();
3074   LargeObjectIterator obj_it(this);
3075   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
3076     if (obj->IsCode()) {
3077       Code* code = Code::cast(obj);
3078       isolate->code_kind_statistics()[code->kind()] += code->Size();
3079     }
3080   }
3081 }
3082 
3083 
Print()3084 void Page::Print() {
3085   // Make a best-effort to print the objects in the page.
3086   PrintF("Page@%p in %s\n", this->address(),
3087          AllocationSpaceName(this->owner()->identity()));
3088   printf(" --------------------------------------\n");
3089   HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
3090   unsigned mark_size = 0;
3091   for (HeapObject* object = objects.Next(); object != NULL;
3092        object = objects.Next()) {
3093     bool is_marked = Marking::MarkBitFrom(object).Get();
3094     PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
3095     if (is_marked) {
3096       mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
3097     }
3098     object->ShortPrint();
3099     PrintF("\n");
3100   }
3101   printf(" --------------------------------------\n");
3102   printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3103 }
3104 
3105 #endif  // DEBUG
3106 }
3107 }  // namespace v8::internal
3108