• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  // Copyright 2011 the V8 project authors. All rights reserved.
2  // Use of this source code is governed by a BSD-style license that can be
3  // found in the LICENSE file.
4  
5  #include "src/heap/spaces.h"
6  
7  #include <utility>
8  
9  #include "src/base/bits.h"
10  #include "src/base/platform/platform.h"
11  #include "src/base/platform/semaphore.h"
12  #include "src/full-codegen/full-codegen.h"
13  #include "src/heap/array-buffer-tracker.h"
14  #include "src/heap/slot-set.h"
15  #include "src/macro-assembler.h"
16  #include "src/msan.h"
17  #include "src/snapshot/snapshot.h"
18  #include "src/v8.h"
19  
20  namespace v8 {
21  namespace internal {
22  
23  
24  // ----------------------------------------------------------------------------
25  // HeapObjectIterator
26  
HeapObjectIterator(PagedSpace * space)27  HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
28      : cur_addr_(nullptr),
29        cur_end_(nullptr),
30        space_(space),
31        page_range_(space->anchor()->next_page(), space->anchor()),
32        current_page_(page_range_.begin()) {}
33  
HeapObjectIterator(Page * page)34  HeapObjectIterator::HeapObjectIterator(Page* page)
35      : cur_addr_(nullptr),
36        cur_end_(nullptr),
37        space_(reinterpret_cast<PagedSpace*>(page->owner())),
38        page_range_(page),
39        current_page_(page_range_.begin()) {
40  #ifdef DEBUG
41    Space* owner = page->owner();
42    DCHECK(owner == page->heap()->old_space() ||
43           owner == page->heap()->map_space() ||
44           owner == page->heap()->code_space());
45  #endif  // DEBUG
46  }
47  
48  // We have hit the end of the page and should advance to the next block of
49  // objects.  This happens at the end of the page.
AdvanceToNextPage()50  bool HeapObjectIterator::AdvanceToNextPage() {
51    DCHECK_EQ(cur_addr_, cur_end_);
52    if (current_page_ == page_range_.end()) return false;
53    Page* cur_page = *(current_page_++);
54    space_->heap()
55        ->mark_compact_collector()
56        ->sweeper()
57        .SweepOrWaitUntilSweepingCompleted(cur_page);
58    cur_addr_ = cur_page->area_start();
59    cur_end_ = cur_page->area_end();
60    DCHECK(cur_page->SweepingDone());
61    return true;
62  }
63  
PauseAllocationObserversScope(Heap * heap)64  PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
65      : heap_(heap) {
66    AllSpaces spaces(heap_);
67    for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
68      space->PauseAllocationObservers();
69    }
70  }
71  
~PauseAllocationObserversScope()72  PauseAllocationObserversScope::~PauseAllocationObserversScope() {
73    AllSpaces spaces(heap_);
74    for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
75      space->ResumeAllocationObservers();
76    }
77  }
78  
79  // -----------------------------------------------------------------------------
80  // CodeRange
81  
82  
CodeRange(Isolate * isolate)83  CodeRange::CodeRange(Isolate* isolate)
84      : isolate_(isolate),
85        code_range_(NULL),
86        free_list_(0),
87        allocation_list_(0),
88        current_allocation_block_index_(0) {}
89  
90  
SetUp(size_t requested)91  bool CodeRange::SetUp(size_t requested) {
92    DCHECK(code_range_ == NULL);
93  
94    if (requested == 0) {
95      // When a target requires the code range feature, we put all code objects
96      // in a kMaximalCodeRangeSize range of virtual address space, so that
97      // they can call each other with near calls.
98      if (kRequiresCodeRange) {
99        requested = kMaximalCodeRangeSize;
100      } else {
101        return true;
102      }
103    }
104  
105    if (requested <= kMinimumCodeRangeSize) {
106      requested = kMinimumCodeRangeSize;
107    }
108  
109    const size_t reserved_area =
110        kReservedCodeRangePages * base::OS::CommitPageSize();
111    if (requested < (kMaximalCodeRangeSize - reserved_area))
112      requested += reserved_area;
113  
114    DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
115  
116    code_range_ = new base::VirtualMemory(
117        requested, Max(kCodeRangeAreaAlignment,
118                       static_cast<size_t>(base::OS::AllocateAlignment())));
119    CHECK(code_range_ != NULL);
120    if (!code_range_->IsReserved()) {
121      delete code_range_;
122      code_range_ = NULL;
123      return false;
124    }
125  
126    // We are sure that we have mapped a block of requested addresses.
127    DCHECK(code_range_->size() == requested);
128    Address base = reinterpret_cast<Address>(code_range_->address());
129  
130    // On some platforms, specifically Win64, we need to reserve some pages at
131    // the beginning of an executable space.
132    if (reserved_area > 0) {
133      if (!code_range_->Commit(base, reserved_area, true)) {
134        delete code_range_;
135        code_range_ = NULL;
136        return false;
137      }
138      base += reserved_area;
139    }
140    Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
141    size_t size = code_range_->size() - (aligned_base - base) - reserved_area;
142    allocation_list_.Add(FreeBlock(aligned_base, size));
143    current_allocation_block_index_ = 0;
144  
145    LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
146    return true;
147  }
148  
149  
CompareFreeBlockAddress(const FreeBlock * left,const FreeBlock * right)150  int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
151                                         const FreeBlock* right) {
152    // The entire point of CodeRange is that the difference between two
153    // addresses in the range can be represented as a signed 32-bit int,
154    // so the cast is semantically correct.
155    return static_cast<int>(left->start - right->start);
156  }
157  
158  
GetNextAllocationBlock(size_t requested)159  bool CodeRange::GetNextAllocationBlock(size_t requested) {
160    for (current_allocation_block_index_++;
161         current_allocation_block_index_ < allocation_list_.length();
162         current_allocation_block_index_++) {
163      if (requested <= allocation_list_[current_allocation_block_index_].size) {
164        return true;  // Found a large enough allocation block.
165      }
166    }
167  
168    // Sort and merge the free blocks on the free list and the allocation list.
169    free_list_.AddAll(allocation_list_);
170    allocation_list_.Clear();
171    free_list_.Sort(&CompareFreeBlockAddress);
172    for (int i = 0; i < free_list_.length();) {
173      FreeBlock merged = free_list_[i];
174      i++;
175      // Add adjacent free blocks to the current merged block.
176      while (i < free_list_.length() &&
177             free_list_[i].start == merged.start + merged.size) {
178        merged.size += free_list_[i].size;
179        i++;
180      }
181      if (merged.size > 0) {
182        allocation_list_.Add(merged);
183      }
184    }
185    free_list_.Clear();
186  
187    for (current_allocation_block_index_ = 0;
188         current_allocation_block_index_ < allocation_list_.length();
189         current_allocation_block_index_++) {
190      if (requested <= allocation_list_[current_allocation_block_index_].size) {
191        return true;  // Found a large enough allocation block.
192      }
193    }
194    current_allocation_block_index_ = 0;
195    // Code range is full or too fragmented.
196    return false;
197  }
198  
199  
AllocateRawMemory(const size_t requested_size,const size_t commit_size,size_t * allocated)200  Address CodeRange::AllocateRawMemory(const size_t requested_size,
201                                       const size_t commit_size,
202                                       size_t* allocated) {
203    // request_size includes guards while committed_size does not. Make sure
204    // callers know about the invariant.
205    CHECK_LE(commit_size,
206             requested_size - 2 * MemoryAllocator::CodePageGuardSize());
207    FreeBlock current;
208    if (!ReserveBlock(requested_size, &current)) {
209      *allocated = 0;
210      return NULL;
211    }
212    *allocated = current.size;
213    DCHECK(*allocated <= current.size);
214    DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
215    if (!isolate_->heap()->memory_allocator()->CommitExecutableMemory(
216            code_range_, current.start, commit_size, *allocated)) {
217      *allocated = 0;
218      ReleaseBlock(&current);
219      return NULL;
220    }
221    return current.start;
222  }
223  
224  
CommitRawMemory(Address start,size_t length)225  bool CodeRange::CommitRawMemory(Address start, size_t length) {
226    return isolate_->heap()->memory_allocator()->CommitMemory(start, length,
227                                                              EXECUTABLE);
228  }
229  
230  
UncommitRawMemory(Address start,size_t length)231  bool CodeRange::UncommitRawMemory(Address start, size_t length) {
232    return code_range_->Uncommit(start, length);
233  }
234  
235  
FreeRawMemory(Address address,size_t length)236  void CodeRange::FreeRawMemory(Address address, size_t length) {
237    DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
238    base::LockGuard<base::Mutex> guard(&code_range_mutex_);
239    free_list_.Add(FreeBlock(address, length));
240    code_range_->Uncommit(address, length);
241  }
242  
243  
TearDown()244  void CodeRange::TearDown() {
245    delete code_range_;  // Frees all memory in the virtual memory range.
246    code_range_ = NULL;
247    base::LockGuard<base::Mutex> guard(&code_range_mutex_);
248    free_list_.Free();
249    allocation_list_.Free();
250  }
251  
252  
ReserveBlock(const size_t requested_size,FreeBlock * block)253  bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
254    base::LockGuard<base::Mutex> guard(&code_range_mutex_);
255    DCHECK(allocation_list_.length() == 0 ||
256           current_allocation_block_index_ < allocation_list_.length());
257    if (allocation_list_.length() == 0 ||
258        requested_size > allocation_list_[current_allocation_block_index_].size) {
259      // Find an allocation block large enough.
260      if (!GetNextAllocationBlock(requested_size)) return false;
261    }
262    // Commit the requested memory at the start of the current allocation block.
263    size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
264    *block = allocation_list_[current_allocation_block_index_];
265    // Don't leave a small free block, useless for a large object or chunk.
266    if (aligned_requested < (block->size - Page::kPageSize)) {
267      block->size = aligned_requested;
268    }
269    DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
270    allocation_list_[current_allocation_block_index_].start += block->size;
271    allocation_list_[current_allocation_block_index_].size -= block->size;
272    return true;
273  }
274  
275  
ReleaseBlock(const FreeBlock * block)276  void CodeRange::ReleaseBlock(const FreeBlock* block) {
277    base::LockGuard<base::Mutex> guard(&code_range_mutex_);
278    free_list_.Add(*block);
279  }
280  
281  
282  // -----------------------------------------------------------------------------
283  // MemoryAllocator
284  //
285  
MemoryAllocator(Isolate * isolate)286  MemoryAllocator::MemoryAllocator(Isolate* isolate)
287      : isolate_(isolate),
288        code_range_(nullptr),
289        capacity_(0),
290        capacity_executable_(0),
291        size_(0),
292        size_executable_(0),
293        lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
294        highest_ever_allocated_(reinterpret_cast<void*>(0)),
295        unmapper_(this) {}
296  
SetUp(intptr_t capacity,intptr_t capacity_executable,intptr_t code_range_size)297  bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable,
298                              intptr_t code_range_size) {
299    capacity_ = RoundUp(capacity, Page::kPageSize);
300    capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
301    DCHECK_GE(capacity_, capacity_executable_);
302  
303    size_ = 0;
304    size_executable_ = 0;
305  
306    code_range_ = new CodeRange(isolate_);
307    if (!code_range_->SetUp(static_cast<size_t>(code_range_size))) return false;
308  
309    return true;
310  }
311  
312  
TearDown()313  void MemoryAllocator::TearDown() {
314    unmapper()->WaitUntilCompleted();
315  
316    MemoryChunk* chunk = nullptr;
317    while ((chunk = unmapper()->TryGetPooledMemoryChunkSafe()) != nullptr) {
318      FreeMemory(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize,
319                 NOT_EXECUTABLE);
320    }
321  
322    // Check that spaces were torn down before MemoryAllocator.
323    DCHECK_EQ(size_.Value(), 0);
324    // TODO(gc) this will be true again when we fix FreeMemory.
325    // DCHECK(size_executable_ == 0);
326    capacity_ = 0;
327    capacity_executable_ = 0;
328  
329    if (last_chunk_.IsReserved()) {
330      last_chunk_.Release();
331    }
332  
333    delete code_range_;
334    code_range_ = nullptr;
335  }
336  
337  class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task {
338   public:
UnmapFreeMemoryTask(Unmapper * unmapper)339    explicit UnmapFreeMemoryTask(Unmapper* unmapper) : unmapper_(unmapper) {}
340  
341   private:
342    // v8::Task overrides.
Run()343    void Run() override {
344      unmapper_->PerformFreeMemoryOnQueuedChunks();
345      unmapper_->pending_unmapping_tasks_semaphore_.Signal();
346    }
347  
348    Unmapper* unmapper_;
349    DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
350  };
351  
FreeQueuedChunks()352  void MemoryAllocator::Unmapper::FreeQueuedChunks() {
353    ReconsiderDelayedChunks();
354    if (FLAG_concurrent_sweeping) {
355      V8::GetCurrentPlatform()->CallOnBackgroundThread(
356          new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
357      concurrent_unmapping_tasks_active_++;
358    } else {
359      PerformFreeMemoryOnQueuedChunks();
360    }
361  }
362  
WaitUntilCompleted()363  bool MemoryAllocator::Unmapper::WaitUntilCompleted() {
364    bool waited = false;
365    while (concurrent_unmapping_tasks_active_ > 0) {
366      pending_unmapping_tasks_semaphore_.Wait();
367      concurrent_unmapping_tasks_active_--;
368      waited = true;
369    }
370    return waited;
371  }
372  
PerformFreeMemoryOnQueuedChunks()373  void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
374    MemoryChunk* chunk = nullptr;
375    // Regular chunks.
376    while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
377      bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
378      allocator_->PerformFreeMemory(chunk);
379      if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
380    }
381    // Non-regular chunks.
382    while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
383      allocator_->PerformFreeMemory(chunk);
384    }
385  }
386  
ReconsiderDelayedChunks()387  void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
388    std::list<MemoryChunk*> delayed_chunks(std::move(delayed_regular_chunks_));
389    // Move constructed, so the permanent list should be empty.
390    DCHECK(delayed_regular_chunks_.empty());
391    for (auto it = delayed_chunks.begin(); it != delayed_chunks.end(); ++it) {
392      AddMemoryChunkSafe<kRegular>(*it);
393    }
394  }
395  
CanFreeMemoryChunk(MemoryChunk * chunk)396  bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
397    MarkCompactCollector* mc = isolate_->heap()->mark_compact_collector();
398    // We cannot free memory chunks in new space while the sweeper is running
399    // since a sweeper thread might be stuck right before trying to lock the
400    // corresponding page.
401    return !chunk->InNewSpace() || (mc == nullptr) ||
402           mc->sweeper().IsSweepingCompleted();
403  }
404  
CommitMemory(Address base,size_t size,Executability executable)405  bool MemoryAllocator::CommitMemory(Address base, size_t size,
406                                     Executability executable) {
407    if (!base::VirtualMemory::CommitRegion(base, size,
408                                           executable == EXECUTABLE)) {
409      return false;
410    }
411    UpdateAllocatedSpaceLimits(base, base + size);
412    return true;
413  }
414  
415  
FreeMemory(base::VirtualMemory * reservation,Executability executable)416  void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
417                                   Executability executable) {
418    // TODO(gc) make code_range part of memory allocator?
419    // Code which is part of the code-range does not have its own VirtualMemory.
420    DCHECK(code_range() == NULL ||
421           !code_range()->contains(static_cast<Address>(reservation->address())));
422    DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid() ||
423           reservation->size() <= Page::kPageSize);
424  
425    reservation->Release();
426  }
427  
428  
FreeMemory(Address base,size_t size,Executability executable)429  void MemoryAllocator::FreeMemory(Address base, size_t size,
430                                   Executability executable) {
431    // TODO(gc) make code_range part of memory allocator?
432    if (code_range() != NULL &&
433        code_range()->contains(static_cast<Address>(base))) {
434      DCHECK(executable == EXECUTABLE);
435      code_range()->FreeRawMemory(base, size);
436    } else {
437      DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
438      bool result = base::VirtualMemory::ReleaseRegion(base, size);
439      USE(result);
440      DCHECK(result);
441    }
442  }
443  
ReserveAlignedMemory(size_t size,size_t alignment,base::VirtualMemory * controller)444  Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
445                                                base::VirtualMemory* controller) {
446    base::VirtualMemory reservation(size, alignment);
447  
448    if (!reservation.IsReserved()) return NULL;
449    size_.Increment(static_cast<intptr_t>(reservation.size()));
450    Address base =
451        RoundUp(static_cast<Address>(reservation.address()), alignment);
452    controller->TakeControl(&reservation);
453    return base;
454  }
455  
AllocateAlignedMemory(size_t reserve_size,size_t commit_size,size_t alignment,Executability executable,base::VirtualMemory * controller)456  Address MemoryAllocator::AllocateAlignedMemory(
457      size_t reserve_size, size_t commit_size, size_t alignment,
458      Executability executable, base::VirtualMemory* controller) {
459    DCHECK(commit_size <= reserve_size);
460    base::VirtualMemory reservation;
461    Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
462    if (base == NULL) return NULL;
463  
464    if (executable == EXECUTABLE) {
465      if (!CommitExecutableMemory(&reservation, base, commit_size,
466                                  reserve_size)) {
467        base = NULL;
468      }
469    } else {
470      if (reservation.Commit(base, commit_size, false)) {
471        UpdateAllocatedSpaceLimits(base, base + commit_size);
472      } else {
473        base = NULL;
474      }
475    }
476  
477    if (base == NULL) {
478      // Failed to commit the body. Release the mapping and any partially
479      // commited regions inside it.
480      reservation.Release();
481      return NULL;
482    }
483  
484    controller->TakeControl(&reservation);
485    return base;
486  }
487  
InitializeAsAnchor(Space * space)488  void Page::InitializeAsAnchor(Space* space) {
489    set_owner(space);
490    set_next_chunk(this);
491    set_prev_chunk(this);
492    SetFlags(0, ~0);
493    SetFlag(ANCHOR);
494  }
495  
Initialize(Heap * heap,Address base,size_t size,Address area_start,Address area_end,Executability executable,Space * owner,base::VirtualMemory * reservation)496  MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
497                                       Address area_start, Address area_end,
498                                       Executability executable, Space* owner,
499                                       base::VirtualMemory* reservation) {
500    MemoryChunk* chunk = FromAddress(base);
501  
502    DCHECK(base == chunk->address());
503  
504    chunk->heap_ = heap;
505    chunk->size_ = size;
506    chunk->area_start_ = area_start;
507    chunk->area_end_ = area_end;
508    chunk->flags_ = 0;
509    chunk->set_owner(owner);
510    chunk->InitializeReservedMemory();
511    chunk->old_to_new_slots_ = nullptr;
512    chunk->old_to_old_slots_ = nullptr;
513    chunk->typed_old_to_new_slots_ = nullptr;
514    chunk->typed_old_to_old_slots_ = nullptr;
515    chunk->skip_list_ = nullptr;
516    chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
517    chunk->progress_bar_ = 0;
518    chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
519    chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
520    chunk->mutex_ = new base::Mutex();
521    chunk->available_in_free_list_ = 0;
522    chunk->wasted_memory_ = 0;
523    chunk->ResetLiveBytes();
524    Bitmap::Clear(chunk);
525    chunk->set_next_chunk(nullptr);
526    chunk->set_prev_chunk(nullptr);
527    chunk->local_tracker_ = nullptr;
528  
529    DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
530    DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
531  
532    if (executable == EXECUTABLE) {
533      chunk->SetFlag(IS_EXECUTABLE);
534    }
535  
536    if (reservation != nullptr) {
537      chunk->reservation_.TakeControl(reservation);
538    }
539  
540    return chunk;
541  }
542  
543  
544  // Commit MemoryChunk area to the requested size.
CommitArea(size_t requested)545  bool MemoryChunk::CommitArea(size_t requested) {
546    size_t guard_size =
547        IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
548    size_t header_size = area_start() - address() - guard_size;
549    size_t commit_size =
550        RoundUp(header_size + requested, base::OS::CommitPageSize());
551    size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
552                                    base::OS::CommitPageSize());
553  
554    if (commit_size > committed_size) {
555      // Commit size should be less or equal than the reserved size.
556      DCHECK(commit_size <= size() - 2 * guard_size);
557      // Append the committed area.
558      Address start = address() + committed_size + guard_size;
559      size_t length = commit_size - committed_size;
560      if (reservation_.IsReserved()) {
561        Executability executable =
562            IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
563        if (!heap()->memory_allocator()->CommitMemory(start, length,
564                                                      executable)) {
565          return false;
566        }
567      } else {
568        CodeRange* code_range = heap_->memory_allocator()->code_range();
569        DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE));
570        if (!code_range->CommitRawMemory(start, length)) return false;
571      }
572  
573      if (Heap::ShouldZapGarbage()) {
574        heap_->memory_allocator()->ZapBlock(start, length);
575      }
576    } else if (commit_size < committed_size) {
577      DCHECK(commit_size > 0);
578      // Shrink the committed area.
579      size_t length = committed_size - commit_size;
580      Address start = address() + committed_size + guard_size - length;
581      if (reservation_.IsReserved()) {
582        if (!reservation_.Uncommit(start, length)) return false;
583      } else {
584        CodeRange* code_range = heap_->memory_allocator()->code_range();
585        DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE));
586        if (!code_range->UncommitRawMemory(start, length)) return false;
587      }
588    }
589  
590    area_end_ = area_start_ + requested;
591    return true;
592  }
593  
594  
InsertAfter(MemoryChunk * other)595  void MemoryChunk::InsertAfter(MemoryChunk* other) {
596    MemoryChunk* other_next = other->next_chunk();
597  
598    set_next_chunk(other_next);
599    set_prev_chunk(other);
600    other_next->set_prev_chunk(this);
601    other->set_next_chunk(this);
602  }
603  
604  
Unlink()605  void MemoryChunk::Unlink() {
606    MemoryChunk* next_element = next_chunk();
607    MemoryChunk* prev_element = prev_chunk();
608    next_element->set_prev_chunk(prev_element);
609    prev_element->set_next_chunk(next_element);
610    set_prev_chunk(NULL);
611    set_next_chunk(NULL);
612  }
613  
614  
AllocateChunk(intptr_t reserve_area_size,intptr_t commit_area_size,Executability executable,Space * owner)615  MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
616                                              intptr_t commit_area_size,
617                                              Executability executable,
618                                              Space* owner) {
619    DCHECK(commit_area_size <= reserve_area_size);
620  
621    size_t chunk_size;
622    Heap* heap = isolate_->heap();
623    Address base = NULL;
624    base::VirtualMemory reservation;
625    Address area_start = NULL;
626    Address area_end = NULL;
627  
628    //
629    // MemoryChunk layout:
630    //
631    //             Executable
632    // +----------------------------+<- base aligned with MemoryChunk::kAlignment
633    // |           Header           |
634    // +----------------------------+<- base + CodePageGuardStartOffset
635    // |           Guard            |
636    // +----------------------------+<- area_start_
637    // |           Area             |
638    // +----------------------------+<- area_end_ (area_start + commit_area_size)
639    // |   Committed but not used   |
640    // +----------------------------+<- aligned at OS page boundary
641    // | Reserved but not committed |
642    // +----------------------------+<- aligned at OS page boundary
643    // |           Guard            |
644    // +----------------------------+<- base + chunk_size
645    //
646    //           Non-executable
647    // +----------------------------+<- base aligned with MemoryChunk::kAlignment
648    // |          Header            |
649    // +----------------------------+<- area_start_ (base + kObjectStartOffset)
650    // |           Area             |
651    // +----------------------------+<- area_end_ (area_start + commit_area_size)
652    // |  Committed but not used    |
653    // +----------------------------+<- aligned at OS page boundary
654    // | Reserved but not committed |
655    // +----------------------------+<- base + chunk_size
656    //
657  
658    if (executable == EXECUTABLE) {
659      chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
660                           base::OS::CommitPageSize()) +
661                   CodePageGuardSize();
662  
663      // Check executable memory limit.
664      if ((size_executable_.Value() + static_cast<intptr_t>(chunk_size)) >
665          capacity_executable_) {
666        LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
667                                  "V8 Executable Allocation capacity exceeded"));
668        return NULL;
669      }
670  
671      // Size of header (not executable) plus area (executable).
672      size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
673                                   base::OS::CommitPageSize());
674      // Allocate executable memory either from code range or from the
675      // OS.
676  #ifdef V8_TARGET_ARCH_MIPS64
677      // Use code range only for large object space on mips64 to keep address
678      // range within 256-MB memory region.
679      if (code_range()->valid() && reserve_area_size > CodePageAreaSize()) {
680  #else
681      if (code_range()->valid()) {
682  #endif
683        base =
684            code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size);
685        DCHECK(
686            IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
687        if (base == NULL) return NULL;
688        size_.Increment(static_cast<intptr_t>(chunk_size));
689        // Update executable memory size.
690        size_executable_.Increment(static_cast<intptr_t>(chunk_size));
691      } else {
692        base = AllocateAlignedMemory(chunk_size, commit_size,
693                                     MemoryChunk::kAlignment, executable,
694                                     &reservation);
695        if (base == NULL) return NULL;
696        // Update executable memory size.
697        size_executable_.Increment(static_cast<intptr_t>(reservation.size()));
698      }
699  
700      if (Heap::ShouldZapGarbage()) {
701        ZapBlock(base, CodePageGuardStartOffset());
702        ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
703      }
704  
705      area_start = base + CodePageAreaStartOffset();
706      area_end = area_start + commit_area_size;
707    } else {
708      chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
709                           base::OS::CommitPageSize());
710      size_t commit_size =
711          RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
712                  base::OS::CommitPageSize());
713      base =
714          AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
715                                executable, &reservation);
716  
717      if (base == NULL) return NULL;
718  
719      if (Heap::ShouldZapGarbage()) {
720        ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
721      }
722  
723      area_start = base + Page::kObjectStartOffset;
724      area_end = area_start + commit_area_size;
725    }
726  
727    // Use chunk_size for statistics and callbacks because we assume that they
728    // treat reserved but not-yet committed memory regions of chunks as allocated.
729    isolate_->counters()->memory_allocated()->Increment(
730        static_cast<int>(chunk_size));
731  
732    LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
733  
734    // We cannot use the last chunk in the address space because we would
735    // overflow when comparing top and limit if this chunk is used for a
736    // linear allocation area.
737    if ((reinterpret_cast<uintptr_t>(base) + chunk_size) == 0u) {
738      CHECK(!last_chunk_.IsReserved());
739      last_chunk_.TakeControl(&reservation);
740      UncommitBlock(reinterpret_cast<Address>(last_chunk_.address()),
741                    last_chunk_.size());
742      size_.Increment(-static_cast<intptr_t>(chunk_size));
743      if (executable == EXECUTABLE) {
744        size_executable_.Increment(-static_cast<intptr_t>(chunk_size));
745      }
746      CHECK(last_chunk_.IsReserved());
747      return AllocateChunk(reserve_area_size, commit_area_size, executable,
748                           owner);
749    }
750  
751    return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
752                                   executable, owner, &reservation);
753  }
754  
755  
756  void Page::ResetFreeListStatistics() {
757    wasted_memory_ = 0;
758    available_in_free_list_ = 0;
759  }
760  
761  void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
762    DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
763    LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
764  
765    isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
766                                           chunk->IsEvacuationCandidate());
767  
768    intptr_t size;
769    base::VirtualMemory* reservation = chunk->reserved_memory();
770    if (reservation->IsReserved()) {
771      size = static_cast<intptr_t>(reservation->size());
772    } else {
773      size = static_cast<intptr_t>(chunk->size());
774    }
775    DCHECK(size_.Value() >= size);
776    size_.Increment(-size);
777    isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
778  
779    if (chunk->executable() == EXECUTABLE) {
780      DCHECK(size_executable_.Value() >= size);
781      size_executable_.Increment(-size);
782    }
783  
784    chunk->SetFlag(MemoryChunk::PRE_FREED);
785  }
786  
787  
788  void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
789    DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
790    chunk->ReleaseAllocatedMemory();
791  
792    base::VirtualMemory* reservation = chunk->reserved_memory();
793    if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
794      UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
795    } else {
796      if (reservation->IsReserved()) {
797        FreeMemory(reservation, chunk->executable());
798      } else {
799        FreeMemory(chunk->address(), chunk->size(), chunk->executable());
800      }
801    }
802  }
803  
804  template <MemoryAllocator::FreeMode mode>
805  void MemoryAllocator::Free(MemoryChunk* chunk) {
806    switch (mode) {
807      case kFull:
808        PreFreeMemory(chunk);
809        PerformFreeMemory(chunk);
810        break;
811      case kPooledAndQueue:
812        DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
813        DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
814        chunk->SetFlag(MemoryChunk::POOLED);
815      // Fall through to kPreFreeAndQueue.
816      case kPreFreeAndQueue:
817        PreFreeMemory(chunk);
818        // The chunks added to this queue will be freed by a concurrent thread.
819        unmapper()->AddMemoryChunkSafe(chunk);
820        break;
821      default:
822        UNREACHABLE();
823    }
824  }
825  
826  template void MemoryAllocator::Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
827  
828  template void MemoryAllocator::Free<MemoryAllocator::kPreFreeAndQueue>(
829      MemoryChunk* chunk);
830  
831  template void MemoryAllocator::Free<MemoryAllocator::kPooledAndQueue>(
832      MemoryChunk* chunk);
833  
834  template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
835  Page* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
836                                      Executability executable) {
837    MemoryChunk* chunk = nullptr;
838    if (alloc_mode == kPooled) {
839      DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory));
840      DCHECK_EQ(executable, NOT_EXECUTABLE);
841      chunk = AllocatePagePooled(owner);
842    }
843    if (chunk == nullptr) {
844      chunk = AllocateChunk(size, size, executable, owner);
845    }
846    if (chunk == nullptr) return nullptr;
847    return Page::Initialize(isolate_->heap(), chunk, executable, owner);
848  }
849  
850  template Page*
851  MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
852      intptr_t size, PagedSpace* owner, Executability executable);
853  template Page*
854  MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
855      intptr_t size, SemiSpace* owner, Executability executable);
856  template Page*
857  MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
858      intptr_t size, SemiSpace* owner, Executability executable);
859  
860  LargePage* MemoryAllocator::AllocateLargePage(intptr_t size,
861                                                LargeObjectSpace* owner,
862                                                Executability executable) {
863    MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
864    if (chunk == nullptr) return nullptr;
865    return LargePage::Initialize(isolate_->heap(), chunk, executable, owner);
866  }
867  
868  template <typename SpaceType>
869  MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
870    MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
871    if (chunk == nullptr) return nullptr;
872    const int size = MemoryChunk::kPageSize;
873    const Address start = reinterpret_cast<Address>(chunk);
874    const Address area_start = start + MemoryChunk::kObjectStartOffset;
875    const Address area_end = start + size;
876    if (!CommitBlock(reinterpret_cast<Address>(chunk), size, NOT_EXECUTABLE)) {
877      return nullptr;
878    }
879    base::VirtualMemory reservation(start, size);
880    MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
881                            NOT_EXECUTABLE, owner, &reservation);
882    size_.Increment(size);
883    return chunk;
884  }
885  
886  bool MemoryAllocator::CommitBlock(Address start, size_t size,
887                                    Executability executable) {
888    if (!CommitMemory(start, size, executable)) return false;
889  
890    if (Heap::ShouldZapGarbage()) {
891      ZapBlock(start, size);
892    }
893  
894    isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
895    return true;
896  }
897  
898  
899  bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
900    if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
901    isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
902    return true;
903  }
904  
905  
906  void MemoryAllocator::ZapBlock(Address start, size_t size) {
907    for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
908      Memory::Address_at(start + s) = kZapValue;
909    }
910  }
911  
912  #ifdef DEBUG
913  void MemoryAllocator::ReportStatistics() {
914    intptr_t size = Size();
915    float pct = static_cast<float>(capacity_ - size) / capacity_;
916    PrintF("  capacity: %" V8PRIdPTR ", used: %" V8PRIdPTR
917           ", available: %%%d\n\n",
918           capacity_, size, static_cast<int>(pct * 100));
919  }
920  #endif
921  
922  
923  int MemoryAllocator::CodePageGuardStartOffset() {
924    // We are guarding code pages: the first OS page after the header
925    // will be protected as non-writable.
926    return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
927  }
928  
929  
930  int MemoryAllocator::CodePageGuardSize() {
931    return static_cast<int>(base::OS::CommitPageSize());
932  }
933  
934  
935  int MemoryAllocator::CodePageAreaStartOffset() {
936    // We are guarding code pages: the first OS page after the header
937    // will be protected as non-writable.
938    return CodePageGuardStartOffset() + CodePageGuardSize();
939  }
940  
941  
942  int MemoryAllocator::CodePageAreaEndOffset() {
943    // We are guarding code pages: the last OS page will be protected as
944    // non-writable.
945    return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
946  }
947  
948  
949  bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
950                                               Address start, size_t commit_size,
951                                               size_t reserved_size) {
952    // Commit page header (not executable).
953    Address header = start;
954    size_t header_size = CodePageGuardStartOffset();
955    if (vm->Commit(header, header_size, false)) {
956      // Create guard page after the header.
957      if (vm->Guard(start + CodePageGuardStartOffset())) {
958        // Commit page body (executable).
959        Address body = start + CodePageAreaStartOffset();
960        size_t body_size = commit_size - CodePageGuardStartOffset();
961        if (vm->Commit(body, body_size, true)) {
962          // Create guard page before the end.
963          if (vm->Guard(start + reserved_size - CodePageGuardSize())) {
964            UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
965                                                  commit_size -
966                                                  CodePageGuardStartOffset());
967            return true;
968          }
969          vm->Uncommit(body, body_size);
970        }
971      }
972      vm->Uncommit(header, header_size);
973    }
974    return false;
975  }
976  
977  
978  // -----------------------------------------------------------------------------
979  // MemoryChunk implementation
980  
981  void MemoryChunk::ReleaseAllocatedMemory() {
982    if (skip_list_ != nullptr) {
983      delete skip_list_;
984      skip_list_ = nullptr;
985    }
986    if (mutex_ != nullptr) {
987      delete mutex_;
988      mutex_ = nullptr;
989    }
990    if (old_to_new_slots_ != nullptr) ReleaseOldToNewSlots();
991    if (old_to_old_slots_ != nullptr) ReleaseOldToOldSlots();
992    if (typed_old_to_new_slots_ != nullptr) ReleaseTypedOldToNewSlots();
993    if (typed_old_to_old_slots_ != nullptr) ReleaseTypedOldToOldSlots();
994    if (local_tracker_ != nullptr) ReleaseLocalTracker();
995  }
996  
997  static SlotSet* AllocateSlotSet(size_t size, Address page_start) {
998    size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
999    DCHECK(pages > 0);
1000    SlotSet* slot_set = new SlotSet[pages];
1001    for (size_t i = 0; i < pages; i++) {
1002      slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
1003    }
1004    return slot_set;
1005  }
1006  
1007  void MemoryChunk::AllocateOldToNewSlots() {
1008    DCHECK(nullptr == old_to_new_slots_);
1009    old_to_new_slots_ = AllocateSlotSet(size_, address());
1010  }
1011  
1012  void MemoryChunk::ReleaseOldToNewSlots() {
1013    delete[] old_to_new_slots_;
1014    old_to_new_slots_ = nullptr;
1015  }
1016  
1017  void MemoryChunk::AllocateOldToOldSlots() {
1018    DCHECK(nullptr == old_to_old_slots_);
1019    old_to_old_slots_ = AllocateSlotSet(size_, address());
1020  }
1021  
1022  void MemoryChunk::ReleaseOldToOldSlots() {
1023    delete[] old_to_old_slots_;
1024    old_to_old_slots_ = nullptr;
1025  }
1026  
1027  void MemoryChunk::AllocateTypedOldToNewSlots() {
1028    DCHECK(nullptr == typed_old_to_new_slots_);
1029    typed_old_to_new_slots_ = new TypedSlotSet(address());
1030  }
1031  
1032  void MemoryChunk::ReleaseTypedOldToNewSlots() {
1033    delete typed_old_to_new_slots_;
1034    typed_old_to_new_slots_ = nullptr;
1035  }
1036  
1037  void MemoryChunk::AllocateTypedOldToOldSlots() {
1038    DCHECK(nullptr == typed_old_to_old_slots_);
1039    typed_old_to_old_slots_ = new TypedSlotSet(address());
1040  }
1041  
1042  void MemoryChunk::ReleaseTypedOldToOldSlots() {
1043    delete typed_old_to_old_slots_;
1044    typed_old_to_old_slots_ = nullptr;
1045  }
1046  
1047  void MemoryChunk::AllocateLocalTracker() {
1048    DCHECK_NULL(local_tracker_);
1049    local_tracker_ = new LocalArrayBufferTracker(heap());
1050  }
1051  
1052  void MemoryChunk::ReleaseLocalTracker() {
1053    DCHECK_NOT_NULL(local_tracker_);
1054    delete local_tracker_;
1055    local_tracker_ = nullptr;
1056  }
1057  
1058  // -----------------------------------------------------------------------------
1059  // PagedSpace implementation
1060  
1061  STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) ==
1062                ObjectSpace::kObjectSpaceNewSpace);
1063  STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_SPACE) ==
1064                ObjectSpace::kObjectSpaceOldSpace);
1065  STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
1066                ObjectSpace::kObjectSpaceCodeSpace);
1067  STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
1068                ObjectSpace::kObjectSpaceMapSpace);
1069  
1070  void Space::AllocationStep(Address soon_object, int size) {
1071    if (!allocation_observers_paused_) {
1072      for (int i = 0; i < allocation_observers_->length(); ++i) {
1073        AllocationObserver* o = (*allocation_observers_)[i];
1074        o->AllocationStep(size, soon_object, size);
1075      }
1076    }
1077  }
1078  
1079  PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
1080                         Executability executable)
1081      : Space(heap, space, executable), anchor_(this), free_list_(this) {
1082    area_size_ = MemoryAllocator::PageAreaSize(space);
1083    accounting_stats_.Clear();
1084  
1085    allocation_info_.Reset(nullptr, nullptr);
1086  }
1087  
1088  
1089  bool PagedSpace::SetUp() { return true; }
1090  
1091  
1092  bool PagedSpace::HasBeenSetUp() { return true; }
1093  
1094  
1095  void PagedSpace::TearDown() {
1096    for (auto it = begin(); it != end();) {
1097      Page* page = *(it++);  // Will be erased.
1098      ArrayBufferTracker::FreeAll(page);
1099      heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
1100    }
1101    anchor_.set_next_page(&anchor_);
1102    anchor_.set_prev_page(&anchor_);
1103    accounting_stats_.Clear();
1104  }
1105  
1106  void PagedSpace::RefillFreeList() {
1107    // Any PagedSpace might invoke RefillFreeList. We filter all but our old
1108    // generation spaces out.
1109    if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
1110        identity() != MAP_SPACE) {
1111      return;
1112    }
1113    MarkCompactCollector* collector = heap()->mark_compact_collector();
1114    intptr_t added = 0;
1115    {
1116      Page* p = nullptr;
1117      while ((p = collector->sweeper().GetSweptPageSafe(this)) != nullptr) {
1118        // Only during compaction pages can actually change ownership. This is
1119        // safe because there exists no other competing action on the page links
1120        // during compaction.
1121        if (is_local() && (p->owner() != this)) {
1122          base::LockGuard<base::Mutex> guard(
1123              reinterpret_cast<PagedSpace*>(p->owner())->mutex());
1124          p->Unlink();
1125          p->set_owner(this);
1126          p->InsertAfter(anchor_.prev_page());
1127        }
1128        added += RelinkFreeListCategories(p);
1129        added += p->wasted_memory();
1130        if (is_local() && (added > kCompactionMemoryWanted)) break;
1131      }
1132    }
1133    accounting_stats_.IncreaseCapacity(added);
1134  }
1135  
1136  void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
1137    DCHECK(identity() == other->identity());
1138    // Unmerged fields:
1139    //   area_size_
1140    //   anchor_
1141  
1142    other->EmptyAllocationInfo();
1143  
1144    // Update and clear accounting statistics.
1145    accounting_stats_.Merge(other->accounting_stats_);
1146    other->accounting_stats_.Clear();
1147  
1148    // The linear allocation area of {other} should be destroyed now.
1149    DCHECK(other->top() == nullptr);
1150    DCHECK(other->limit() == nullptr);
1151  
1152    AccountCommitted(other->CommittedMemory());
1153  
1154    // Move over pages.
1155    for (auto it = other->begin(); it != other->end();) {
1156      Page* p = *(it++);
1157  
1158      // Relinking requires the category to be unlinked.
1159      other->UnlinkFreeListCategories(p);
1160  
1161      p->Unlink();
1162      p->set_owner(this);
1163      p->InsertAfter(anchor_.prev_page());
1164      RelinkFreeListCategories(p);
1165    }
1166  }
1167  
1168  
1169  size_t PagedSpace::CommittedPhysicalMemory() {
1170    if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
1171    MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1172    size_t size = 0;
1173    for (Page* page : *this) {
1174      size += page->CommittedPhysicalMemory();
1175    }
1176    return size;
1177  }
1178  
1179  bool PagedSpace::ContainsSlow(Address addr) {
1180    Page* p = Page::FromAddress(addr);
1181    for (Page* page : *this) {
1182      if (page == p) return true;
1183    }
1184    return false;
1185  }
1186  
1187  
1188  Object* PagedSpace::FindObject(Address addr) {
1189    // Note: this function can only be called on iterable spaces.
1190    DCHECK(!heap()->mark_compact_collector()->in_use());
1191  
1192    if (!Contains(addr)) return Smi::FromInt(0);  // Signaling not found.
1193  
1194    Page* p = Page::FromAddress(addr);
1195    HeapObjectIterator it(p);
1196    for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1197      Address cur = obj->address();
1198      Address next = cur + obj->Size();
1199      if ((cur <= addr) && (addr < next)) return obj;
1200    }
1201  
1202    UNREACHABLE();
1203    return Smi::FromInt(0);
1204  }
1205  
1206  bool PagedSpace::Expand() {
1207    int size = AreaSize();
1208    if (snapshotable() && !HasPages()) {
1209      size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
1210    }
1211  
1212    if (!heap()->CanExpandOldGeneration(size)) return false;
1213  
1214    Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable());
1215    if (p == nullptr) return false;
1216  
1217    AccountCommitted(static_cast<intptr_t>(p->size()));
1218  
1219    // Pages created during bootstrapping may contain immortal immovable objects.
1220    if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
1221  
1222    // When incremental marking was activated, old space pages are allocated
1223    // black.
1224    if (heap()->incremental_marking()->black_allocation() &&
1225        identity() == OLD_SPACE) {
1226      Bitmap::SetAllBits(p);
1227      p->SetFlag(Page::BLACK_PAGE);
1228      if (FLAG_trace_incremental_marking) {
1229        PrintIsolate(heap()->isolate(), "Added black page %p\n",
1230                     static_cast<void*>(p));
1231      }
1232    }
1233  
1234    DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
1235  
1236    p->InsertAfter(anchor_.prev_page());
1237  
1238    return true;
1239  }
1240  
1241  
1242  int PagedSpace::CountTotalPages() {
1243    int count = 0;
1244    for (Page* page : *this) {
1245      count++;
1246      USE(page);
1247    }
1248    return count;
1249  }
1250  
1251  
1252  void PagedSpace::ResetFreeListStatistics() {
1253    for (Page* page : *this) {
1254      page->ResetFreeListStatistics();
1255    }
1256  }
1257  
1258  
1259  void PagedSpace::IncreaseCapacity(int size) {
1260    accounting_stats_.ExpandSpace(size);
1261  }
1262  
1263  void PagedSpace::ReleasePage(Page* page) {
1264    DCHECK_EQ(page->LiveBytes(), 0);
1265    DCHECK_EQ(AreaSize(), page->area_size());
1266    DCHECK_EQ(page->owner(), this);
1267  
1268    free_list_.EvictFreeListItems(page);
1269    DCHECK(!free_list_.ContainsPageFreeListItems(page));
1270  
1271    if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
1272      allocation_info_.Reset(nullptr, nullptr);
1273    }
1274  
1275    // If page is still in a list, unlink it from that list.
1276    if (page->next_chunk() != NULL) {
1277      DCHECK(page->prev_chunk() != NULL);
1278      page->Unlink();
1279    }
1280  
1281    AccountUncommitted(static_cast<intptr_t>(page->size()));
1282    heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
1283  
1284    DCHECK(Capacity() > 0);
1285    accounting_stats_.ShrinkSpace(AreaSize());
1286  }
1287  
1288  #ifdef DEBUG
1289  void PagedSpace::Print() {}
1290  #endif
1291  
1292  #ifdef VERIFY_HEAP
1293  void PagedSpace::Verify(ObjectVisitor* visitor) {
1294    bool allocation_pointer_found_in_space =
1295        (allocation_info_.top() == allocation_info_.limit());
1296    for (Page* page : *this) {
1297      CHECK(page->owner() == this);
1298      if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
1299        allocation_pointer_found_in_space = true;
1300      }
1301      CHECK(page->SweepingDone());
1302      HeapObjectIterator it(page);
1303      Address end_of_previous_object = page->area_start();
1304      Address top = page->area_end();
1305      int black_size = 0;
1306      for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1307        CHECK(end_of_previous_object <= object->address());
1308  
1309        // The first word should be a map, and we expect all map pointers to
1310        // be in map space.
1311        Map* map = object->map();
1312        CHECK(map->IsMap());
1313        CHECK(heap()->map_space()->Contains(map));
1314  
1315        // Perform space-specific object verification.
1316        VerifyObject(object);
1317  
1318        // The object itself should look OK.
1319        object->ObjectVerify();
1320  
1321        // All the interior pointers should be contained in the heap.
1322        int size = object->Size();
1323        object->IterateBody(map->instance_type(), size, visitor);
1324        if (!page->IsFlagSet(Page::BLACK_PAGE) &&
1325            Marking::IsBlack(Marking::MarkBitFrom(object))) {
1326          black_size += size;
1327        }
1328  
1329        CHECK(object->address() + size <= top);
1330        end_of_previous_object = object->address() + size;
1331      }
1332      CHECK_LE(black_size, page->LiveBytes());
1333    }
1334    CHECK(allocation_pointer_found_in_space);
1335  }
1336  #endif  // VERIFY_HEAP
1337  
1338  // -----------------------------------------------------------------------------
1339  // NewSpace implementation
1340  
1341  bool NewSpace::SetUp(int initial_semispace_capacity,
1342                       int maximum_semispace_capacity) {
1343    DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
1344    DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
1345  
1346    to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
1347    from_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
1348    if (!to_space_.Commit()) {
1349      return false;
1350    }
1351    DCHECK(!from_space_.is_committed());  // No need to use memory yet.
1352    ResetAllocationInfo();
1353  
1354    // Allocate and set up the histogram arrays if necessary.
1355    allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1356    promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1357  #define SET_NAME(name)                        \
1358    allocated_histogram_[name].set_name(#name); \
1359    promoted_histogram_[name].set_name(#name);
1360    INSTANCE_TYPE_LIST(SET_NAME)
1361  #undef SET_NAME
1362  
1363    return true;
1364  }
1365  
1366  
1367  void NewSpace::TearDown() {
1368    if (allocated_histogram_) {
1369      DeleteArray(allocated_histogram_);
1370      allocated_histogram_ = NULL;
1371    }
1372    if (promoted_histogram_) {
1373      DeleteArray(promoted_histogram_);
1374      promoted_histogram_ = NULL;
1375    }
1376  
1377    allocation_info_.Reset(nullptr, nullptr);
1378  
1379    to_space_.TearDown();
1380    from_space_.TearDown();
1381  }
1382  
1383  void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
1384  
1385  
1386  void NewSpace::Grow() {
1387    // Double the semispace size but only up to maximum capacity.
1388    DCHECK(TotalCapacity() < MaximumCapacity());
1389    int new_capacity =
1390        Min(MaximumCapacity(),
1391            FLAG_semi_space_growth_factor * static_cast<int>(TotalCapacity()));
1392    if (to_space_.GrowTo(new_capacity)) {
1393      // Only grow from space if we managed to grow to-space.
1394      if (!from_space_.GrowTo(new_capacity)) {
1395        // If we managed to grow to-space but couldn't grow from-space,
1396        // attempt to shrink to-space.
1397        if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
1398          // We are in an inconsistent state because we could not
1399          // commit/uncommit memory from new space.
1400          CHECK(false);
1401        }
1402      }
1403    }
1404    DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1405  }
1406  
1407  
1408  void NewSpace::Shrink() {
1409    int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt());
1410    int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1411    if (rounded_new_capacity < TotalCapacity() &&
1412        to_space_.ShrinkTo(rounded_new_capacity)) {
1413      // Only shrink from-space if we managed to shrink to-space.
1414      from_space_.Reset();
1415      if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1416        // If we managed to shrink to-space but couldn't shrink from
1417        // space, attempt to grow to-space again.
1418        if (!to_space_.GrowTo(from_space_.current_capacity())) {
1419          // We are in an inconsistent state because we could not
1420          // commit/uncommit memory from new space.
1421          CHECK(false);
1422        }
1423      }
1424    }
1425    DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1426  }
1427  
1428  bool NewSpace::Rebalance() {
1429    CHECK(heap()->promotion_queue()->is_empty());
1430    // Order here is important to make use of the page pool.
1431    return to_space_.EnsureCurrentCapacity() &&
1432           from_space_.EnsureCurrentCapacity();
1433  }
1434  
1435  bool SemiSpace::EnsureCurrentCapacity() {
1436    if (is_committed()) {
1437      const int expected_pages = current_capacity_ / Page::kPageSize;
1438      int actual_pages = 0;
1439      Page* current_page = anchor()->next_page();
1440      while (current_page != anchor()) {
1441        actual_pages++;
1442        current_page = current_page->next_page();
1443        if (actual_pages > expected_pages) {
1444          Page* to_remove = current_page->prev_page();
1445          // Make sure we don't overtake the actual top pointer.
1446          CHECK_NE(to_remove, current_page_);
1447          to_remove->Unlink();
1448          heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
1449              to_remove);
1450        }
1451      }
1452      while (actual_pages < expected_pages) {
1453        actual_pages++;
1454        current_page =
1455            heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
1456                Page::kAllocatableMemory, this, executable());
1457        if (current_page == nullptr) return false;
1458        DCHECK_NOT_NULL(current_page);
1459        current_page->InsertAfter(anchor());
1460        Bitmap::Clear(current_page);
1461        current_page->SetFlags(anchor()->prev_page()->GetFlags(),
1462                               Page::kCopyAllFlags);
1463        heap()->CreateFillerObjectAt(current_page->area_start(),
1464                                     current_page->area_size(),
1465                                     ClearRecordedSlots::kNo);
1466      }
1467    }
1468    return true;
1469  }
1470  
1471  void LocalAllocationBuffer::Close() {
1472    if (IsValid()) {
1473      heap_->CreateFillerObjectAt(
1474          allocation_info_.top(),
1475          static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
1476          ClearRecordedSlots::kNo);
1477    }
1478  }
1479  
1480  
1481  LocalAllocationBuffer::LocalAllocationBuffer(Heap* heap,
1482                                               AllocationInfo allocation_info)
1483      : heap_(heap), allocation_info_(allocation_info) {
1484    if (IsValid()) {
1485      heap_->CreateFillerObjectAt(
1486          allocation_info_.top(),
1487          static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
1488          ClearRecordedSlots::kNo);
1489    }
1490  }
1491  
1492  
1493  LocalAllocationBuffer::LocalAllocationBuffer(
1494      const LocalAllocationBuffer& other) {
1495    *this = other;
1496  }
1497  
1498  
1499  LocalAllocationBuffer& LocalAllocationBuffer::operator=(
1500      const LocalAllocationBuffer& other) {
1501    Close();
1502    heap_ = other.heap_;
1503    allocation_info_ = other.allocation_info_;
1504  
1505    // This is needed since we (a) cannot yet use move-semantics, and (b) want
1506    // to make the use of the class easy by it as value and (c) implicitly call
1507    // {Close} upon copy.
1508    const_cast<LocalAllocationBuffer&>(other)
1509        .allocation_info_.Reset(nullptr, nullptr);
1510    return *this;
1511  }
1512  
1513  
1514  void NewSpace::UpdateAllocationInfo() {
1515    MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1516    allocation_info_.Reset(to_space_.page_low(), to_space_.page_high());
1517    UpdateInlineAllocationLimit(0);
1518    DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1519  }
1520  
1521  
1522  void NewSpace::ResetAllocationInfo() {
1523    Address old_top = allocation_info_.top();
1524    to_space_.Reset();
1525    UpdateAllocationInfo();
1526    // Clear all mark-bits in the to-space.
1527    for (Page* p : to_space_) {
1528      Bitmap::Clear(p);
1529    }
1530    InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
1531  }
1532  
1533  
1534  void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
1535    if (heap()->inline_allocation_disabled()) {
1536      // Lowest limit when linear allocation was disabled.
1537      Address high = to_space_.page_high();
1538      Address new_top = allocation_info_.top() + size_in_bytes;
1539      allocation_info_.set_limit(Min(new_top, high));
1540    } else if (allocation_observers_paused_ || top_on_previous_step_ == 0) {
1541      // Normal limit is the end of the current page.
1542      allocation_info_.set_limit(to_space_.page_high());
1543    } else {
1544      // Lower limit during incremental marking.
1545      Address high = to_space_.page_high();
1546      Address new_top = allocation_info_.top() + size_in_bytes;
1547      Address new_limit = new_top + GetNextInlineAllocationStepSize() - 1;
1548      allocation_info_.set_limit(Min(new_limit, high));
1549    }
1550    DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1551  }
1552  
1553  
1554  bool NewSpace::AddFreshPage() {
1555    Address top = allocation_info_.top();
1556    DCHECK(!Page::IsAtObjectStart(top));
1557    if (!to_space_.AdvancePage()) {
1558      // No more pages left to advance.
1559      return false;
1560    }
1561  
1562    // Clear remainder of current page.
1563    Address limit = Page::FromAllocationAreaAddress(top)->area_end();
1564    if (heap()->gc_state() == Heap::SCAVENGE) {
1565      heap()->promotion_queue()->SetNewLimit(limit);
1566    }
1567  
1568    int remaining_in_page = static_cast<int>(limit - top);
1569    heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
1570    UpdateAllocationInfo();
1571  
1572    return true;
1573  }
1574  
1575  
1576  bool NewSpace::AddFreshPageSynchronized() {
1577    base::LockGuard<base::Mutex> guard(&mutex_);
1578    return AddFreshPage();
1579  }
1580  
1581  
1582  bool NewSpace::EnsureAllocation(int size_in_bytes,
1583                                  AllocationAlignment alignment) {
1584    Address old_top = allocation_info_.top();
1585    Address high = to_space_.page_high();
1586    int filler_size = Heap::GetFillToAlign(old_top, alignment);
1587    int aligned_size_in_bytes = size_in_bytes + filler_size;
1588  
1589    if (old_top + aligned_size_in_bytes > high) {
1590      // Not enough room in the page, try to allocate a new one.
1591      if (!AddFreshPage()) {
1592        return false;
1593      }
1594  
1595      InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
1596  
1597      old_top = allocation_info_.top();
1598      high = to_space_.page_high();
1599      filler_size = Heap::GetFillToAlign(old_top, alignment);
1600    }
1601  
1602    DCHECK(old_top + aligned_size_in_bytes <= high);
1603  
1604    if (allocation_info_.limit() < high) {
1605      // Either the limit has been lowered because linear allocation was disabled
1606      // or because incremental marking wants to get a chance to do a step,
1607      // or because idle scavenge job wants to get a chance to post a task.
1608      // Set the new limit accordingly.
1609      Address new_top = old_top + aligned_size_in_bytes;
1610      Address soon_object = old_top + filler_size;
1611      InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
1612      UpdateInlineAllocationLimit(aligned_size_in_bytes);
1613    }
1614    return true;
1615  }
1616  
1617  
1618  void NewSpace::StartNextInlineAllocationStep() {
1619    if (!allocation_observers_paused_) {
1620      top_on_previous_step_ =
1621          allocation_observers_->length() ? allocation_info_.top() : 0;
1622      UpdateInlineAllocationLimit(0);
1623    }
1624  }
1625  
1626  
1627  intptr_t NewSpace::GetNextInlineAllocationStepSize() {
1628    intptr_t next_step = 0;
1629    for (int i = 0; i < allocation_observers_->length(); ++i) {
1630      AllocationObserver* o = (*allocation_observers_)[i];
1631      next_step = next_step ? Min(next_step, o->bytes_to_next_step())
1632                            : o->bytes_to_next_step();
1633    }
1634    DCHECK(allocation_observers_->length() == 0 || next_step != 0);
1635    return next_step;
1636  }
1637  
1638  void NewSpace::AddAllocationObserver(AllocationObserver* observer) {
1639    Space::AddAllocationObserver(observer);
1640    StartNextInlineAllocationStep();
1641  }
1642  
1643  void NewSpace::RemoveAllocationObserver(AllocationObserver* observer) {
1644    Space::RemoveAllocationObserver(observer);
1645    StartNextInlineAllocationStep();
1646  }
1647  
1648  void NewSpace::PauseAllocationObservers() {
1649    // Do a step to account for memory allocated so far.
1650    InlineAllocationStep(top(), top(), nullptr, 0);
1651    Space::PauseAllocationObservers();
1652    top_on_previous_step_ = 0;
1653    UpdateInlineAllocationLimit(0);
1654  }
1655  
1656  void NewSpace::ResumeAllocationObservers() {
1657    DCHECK(top_on_previous_step_ == 0);
1658    Space::ResumeAllocationObservers();
1659    StartNextInlineAllocationStep();
1660  }
1661  
1662  
1663  void NewSpace::InlineAllocationStep(Address top, Address new_top,
1664                                      Address soon_object, size_t size) {
1665    if (top_on_previous_step_) {
1666      int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
1667      for (int i = 0; i < allocation_observers_->length(); ++i) {
1668        (*allocation_observers_)[i]->AllocationStep(bytes_allocated, soon_object,
1669                                                    size);
1670      }
1671      top_on_previous_step_ = new_top;
1672    }
1673  }
1674  
1675  #ifdef VERIFY_HEAP
1676  // We do not use the SemiSpaceIterator because verification doesn't assume
1677  // that it works (it depends on the invariants we are checking).
1678  void NewSpace::Verify() {
1679    // The allocation pointer should be in the space or at the very end.
1680    DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1681  
1682    // There should be objects packed in from the low address up to the
1683    // allocation pointer.
1684    Address current = to_space_.first_page()->area_start();
1685    CHECK_EQ(current, to_space_.space_start());
1686  
1687    while (current != top()) {
1688      if (!Page::IsAlignedToPageSize(current)) {
1689        // The allocation pointer should not be in the middle of an object.
1690        CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
1691              current < top());
1692  
1693        HeapObject* object = HeapObject::FromAddress(current);
1694  
1695        // The first word should be a map, and we expect all map pointers to
1696        // be in map space.
1697        Map* map = object->map();
1698        CHECK(map->IsMap());
1699        CHECK(heap()->map_space()->Contains(map));
1700  
1701        // The object should not be code or a map.
1702        CHECK(!object->IsMap());
1703        CHECK(!object->IsAbstractCode());
1704  
1705        // The object itself should look OK.
1706        object->ObjectVerify();
1707  
1708        // All the interior pointers should be contained in the heap.
1709        VerifyPointersVisitor visitor;
1710        int size = object->Size();
1711        object->IterateBody(map->instance_type(), size, &visitor);
1712  
1713        current += size;
1714      } else {
1715        // At end of page, switch to next page.
1716        Page* page = Page::FromAllocationAreaAddress(current)->next_page();
1717        // Next page should be valid.
1718        CHECK(!page->is_anchor());
1719        current = page->area_start();
1720      }
1721    }
1722  
1723    // Check semi-spaces.
1724    CHECK_EQ(from_space_.id(), kFromSpace);
1725    CHECK_EQ(to_space_.id(), kToSpace);
1726    from_space_.Verify();
1727    to_space_.Verify();
1728  }
1729  #endif
1730  
1731  // -----------------------------------------------------------------------------
1732  // SemiSpace implementation
1733  
1734  void SemiSpace::SetUp(int initial_capacity, int maximum_capacity) {
1735    DCHECK_GE(maximum_capacity, Page::kPageSize);
1736    minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1737    current_capacity_ = minimum_capacity_;
1738    maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1739    committed_ = false;
1740  }
1741  
1742  
1743  void SemiSpace::TearDown() {
1744    // Properly uncommit memory to keep the allocator counters in sync.
1745    if (is_committed()) {
1746      for (Page* p : *this) {
1747        ArrayBufferTracker::FreeAll(p);
1748      }
1749      Uncommit();
1750    }
1751    current_capacity_ = maximum_capacity_ = 0;
1752  }
1753  
1754  
1755  bool SemiSpace::Commit() {
1756    DCHECK(!is_committed());
1757    Page* current = anchor();
1758    const int num_pages = current_capacity_ / Page::kPageSize;
1759    for (int pages_added = 0; pages_added < num_pages; pages_added++) {
1760      Page* new_page =
1761          heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
1762              Page::kAllocatableMemory, this, executable());
1763      if (new_page == nullptr) {
1764        RewindPages(current, pages_added);
1765        return false;
1766      }
1767      new_page->InsertAfter(current);
1768      current = new_page;
1769    }
1770    Reset();
1771    AccountCommitted(current_capacity_);
1772    if (age_mark_ == nullptr) {
1773      age_mark_ = first_page()->area_start();
1774    }
1775    committed_ = true;
1776    return true;
1777  }
1778  
1779  
1780  bool SemiSpace::Uncommit() {
1781    DCHECK(is_committed());
1782    for (auto it = begin(); it != end();) {
1783      Page* p = *(it++);
1784      heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(p);
1785    }
1786    anchor()->set_next_page(anchor());
1787    anchor()->set_prev_page(anchor());
1788    AccountUncommitted(current_capacity_);
1789    committed_ = false;
1790    heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
1791    return true;
1792  }
1793  
1794  
1795  size_t SemiSpace::CommittedPhysicalMemory() {
1796    if (!is_committed()) return 0;
1797    size_t size = 0;
1798    for (Page* p : *this) {
1799      size += p->CommittedPhysicalMemory();
1800    }
1801    return size;
1802  }
1803  
1804  
1805  bool SemiSpace::GrowTo(int new_capacity) {
1806    if (!is_committed()) {
1807      if (!Commit()) return false;
1808    }
1809    DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
1810    DCHECK_LE(new_capacity, maximum_capacity_);
1811    DCHECK_GT(new_capacity, current_capacity_);
1812    const int delta = new_capacity - current_capacity_;
1813    DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1814    const int delta_pages = delta / Page::kPageSize;
1815    Page* last_page = anchor()->prev_page();
1816    DCHECK_NE(last_page, anchor());
1817    for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
1818      Page* new_page =
1819          heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
1820              Page::kAllocatableMemory, this, executable());
1821      if (new_page == nullptr) {
1822        RewindPages(last_page, pages_added);
1823        return false;
1824      }
1825      new_page->InsertAfter(last_page);
1826      Bitmap::Clear(new_page);
1827      // Duplicate the flags that was set on the old page.
1828      new_page->SetFlags(last_page->GetFlags(), Page::kCopyOnFlipFlagsMask);
1829      last_page = new_page;
1830    }
1831    AccountCommitted(static_cast<intptr_t>(delta));
1832    current_capacity_ = new_capacity;
1833    return true;
1834  }
1835  
1836  void SemiSpace::RewindPages(Page* start, int num_pages) {
1837    Page* new_last_page = nullptr;
1838    Page* last_page = start;
1839    while (num_pages > 0) {
1840      DCHECK_NE(last_page, anchor());
1841      new_last_page = last_page->prev_page();
1842      last_page->prev_page()->set_next_page(last_page->next_page());
1843      last_page->next_page()->set_prev_page(last_page->prev_page());
1844      last_page = new_last_page;
1845      num_pages--;
1846    }
1847  }
1848  
1849  bool SemiSpace::ShrinkTo(int new_capacity) {
1850    DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
1851    DCHECK_GE(new_capacity, minimum_capacity_);
1852    DCHECK_LT(new_capacity, current_capacity_);
1853    if (is_committed()) {
1854      const int delta = current_capacity_ - new_capacity;
1855      DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1856      int delta_pages = delta / Page::kPageSize;
1857      Page* new_last_page;
1858      Page* last_page;
1859      while (delta_pages > 0) {
1860        last_page = anchor()->prev_page();
1861        new_last_page = last_page->prev_page();
1862        new_last_page->set_next_page(anchor());
1863        anchor()->set_prev_page(new_last_page);
1864        heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
1865            last_page);
1866        delta_pages--;
1867      }
1868      AccountUncommitted(static_cast<intptr_t>(delta));
1869      heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
1870    }
1871    current_capacity_ = new_capacity;
1872    return true;
1873  }
1874  
1875  void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
1876    anchor_.set_owner(this);
1877    anchor_.prev_page()->set_next_page(&anchor_);
1878    anchor_.next_page()->set_prev_page(&anchor_);
1879  
1880    for (Page* page : *this) {
1881      page->set_owner(this);
1882      page->SetFlags(flags, mask);
1883      if (id_ == kToSpace) {
1884        page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1885        page->SetFlag(MemoryChunk::IN_TO_SPACE);
1886        page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1887        page->ResetLiveBytes();
1888      } else {
1889        page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1890        page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1891      }
1892      DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1893             page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1894    }
1895  }
1896  
1897  
1898  void SemiSpace::Reset() {
1899    DCHECK_NE(anchor_.next_page(), &anchor_);
1900    current_page_ = anchor_.next_page();
1901    pages_used_ = 0;
1902  }
1903  
1904  void SemiSpace::RemovePage(Page* page) {
1905    if (current_page_ == page) {
1906      current_page_ = page->prev_page();
1907    }
1908    page->Unlink();
1909  }
1910  
1911  void SemiSpace::PrependPage(Page* page) {
1912    page->SetFlags(current_page()->GetFlags(), Page::kCopyAllFlags);
1913    page->set_owner(this);
1914    page->InsertAfter(anchor());
1915    pages_used_++;
1916  }
1917  
1918  void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1919    // We won't be swapping semispaces without data in them.
1920    DCHECK_NE(from->anchor_.next_page(), &from->anchor_);
1921    DCHECK_NE(to->anchor_.next_page(), &to->anchor_);
1922  
1923    intptr_t saved_to_space_flags = to->current_page()->GetFlags();
1924  
1925    // We swap all properties but id_.
1926    std::swap(from->current_capacity_, to->current_capacity_);
1927    std::swap(from->maximum_capacity_, to->maximum_capacity_);
1928    std::swap(from->minimum_capacity_, to->minimum_capacity_);
1929    std::swap(from->age_mark_, to->age_mark_);
1930    std::swap(from->committed_, to->committed_);
1931    std::swap(from->anchor_, to->anchor_);
1932    std::swap(from->current_page_, to->current_page_);
1933  
1934    to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
1935    from->FixPagesFlags(0, 0);
1936  }
1937  
1938  
1939  void SemiSpace::set_age_mark(Address mark) {
1940    DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
1941    age_mark_ = mark;
1942    // Mark all pages up to the one containing mark.
1943    for (Page* p : NewSpacePageRange(space_start(), mark)) {
1944      p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1945    }
1946  }
1947  
1948  
1949  #ifdef DEBUG
1950  void SemiSpace::Print() {}
1951  #endif
1952  
1953  #ifdef VERIFY_HEAP
1954  void SemiSpace::Verify() {
1955    bool is_from_space = (id_ == kFromSpace);
1956    Page* page = anchor_.next_page();
1957    CHECK(anchor_.owner() == this);
1958    while (page != &anchor_) {
1959      CHECK_EQ(page->owner(), this);
1960      CHECK(page->InNewSpace());
1961      CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1962                                          : MemoryChunk::IN_TO_SPACE));
1963      CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1964                                           : MemoryChunk::IN_FROM_SPACE));
1965      CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1966      if (!is_from_space) {
1967        // The pointers-from-here-are-interesting flag isn't updated dynamically
1968        // on from-space pages, so it might be out of sync with the marking state.
1969        if (page->heap()->incremental_marking()->IsMarking()) {
1970          CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1971        } else {
1972          CHECK(
1973              !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1974        }
1975        // TODO(gc): Check that the live_bytes_count_ field matches the
1976        // black marking on the page (if we make it match in new-space).
1977      }
1978      CHECK_EQ(page->prev_page()->next_page(), page);
1979      page = page->next_page();
1980    }
1981  }
1982  #endif
1983  
1984  #ifdef DEBUG
1985  void SemiSpace::AssertValidRange(Address start, Address end) {
1986    // Addresses belong to same semi-space
1987    Page* page = Page::FromAllocationAreaAddress(start);
1988    Page* end_page = Page::FromAllocationAreaAddress(end);
1989    SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
1990    CHECK_EQ(space, end_page->owner());
1991    // Start address is before end address, either on same page,
1992    // or end address is on a later page in the linked list of
1993    // semi-space pages.
1994    if (page == end_page) {
1995      CHECK_LE(start, end);
1996    } else {
1997      while (page != end_page) {
1998        page = page->next_page();
1999        CHECK_NE(page, space->anchor());
2000      }
2001    }
2002  }
2003  #endif
2004  
2005  
2006  // -----------------------------------------------------------------------------
2007  // SemiSpaceIterator implementation.
2008  
2009  SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
2010    Initialize(space->bottom(), space->top());
2011  }
2012  
2013  
2014  void SemiSpaceIterator::Initialize(Address start, Address end) {
2015    SemiSpace::AssertValidRange(start, end);
2016    current_ = start;
2017    limit_ = end;
2018  }
2019  
2020  #ifdef DEBUG
2021  // heap_histograms is shared, always clear it before using it.
2022  static void ClearHistograms(Isolate* isolate) {
2023  // We reset the name each time, though it hasn't changed.
2024  #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
2025    INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
2026  #undef DEF_TYPE_NAME
2027  
2028  #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
2029    INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
2030  #undef CLEAR_HISTOGRAM
2031  
2032    isolate->js_spill_information()->Clear();
2033  }
2034  
2035  
2036  static void ClearCodeKindStatistics(int* code_kind_statistics) {
2037    for (int i = 0; i < AbstractCode::NUMBER_OF_KINDS; i++) {
2038      code_kind_statistics[i] = 0;
2039    }
2040  }
2041  static void ReportCodeKindStatistics(int* code_kind_statistics) {
2042    PrintF("\n   Code kind histograms: \n");
2043    for (int i = 0; i < AbstractCode::NUMBER_OF_KINDS; i++) {
2044      if (code_kind_statistics[i] > 0) {
2045        PrintF("     %-20s: %10d bytes\n",
2046               AbstractCode::Kind2String(static_cast<AbstractCode::Kind>(i)),
2047               code_kind_statistics[i]);
2048      }
2049    }
2050    PrintF("\n");
2051  }
2052  
2053  static int CollectHistogramInfo(HeapObject* obj) {
2054    Isolate* isolate = obj->GetIsolate();
2055    InstanceType type = obj->map()->instance_type();
2056    DCHECK(0 <= type && type <= LAST_TYPE);
2057    DCHECK(isolate->heap_histograms()[type].name() != NULL);
2058    isolate->heap_histograms()[type].increment_number(1);
2059    isolate->heap_histograms()[type].increment_bytes(obj->Size());
2060  
2061    if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
2062      JSObject::cast(obj)
2063          ->IncrementSpillStatistics(isolate->js_spill_information());
2064    }
2065  
2066    return obj->Size();
2067  }
2068  
2069  
2070  static void ReportHistogram(Isolate* isolate, bool print_spill) {
2071    PrintF("\n  Object Histogram:\n");
2072    for (int i = 0; i <= LAST_TYPE; i++) {
2073      if (isolate->heap_histograms()[i].number() > 0) {
2074        PrintF("    %-34s%10d (%10d bytes)\n",
2075               isolate->heap_histograms()[i].name(),
2076               isolate->heap_histograms()[i].number(),
2077               isolate->heap_histograms()[i].bytes());
2078      }
2079    }
2080    PrintF("\n");
2081  
2082    // Summarize string types.
2083    int string_number = 0;
2084    int string_bytes = 0;
2085  #define INCREMENT(type, size, name, camel_name)               \
2086    string_number += isolate->heap_histograms()[type].number(); \
2087    string_bytes += isolate->heap_histograms()[type].bytes();
2088    STRING_TYPE_LIST(INCREMENT)
2089  #undef INCREMENT
2090    if (string_number > 0) {
2091      PrintF("    %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
2092             string_bytes);
2093    }
2094  
2095    if (FLAG_collect_heap_spill_statistics && print_spill) {
2096      isolate->js_spill_information()->Print();
2097    }
2098  }
2099  #endif  // DEBUG
2100  
2101  
2102  // Support for statistics gathering for --heap-stats and --log-gc.
2103  void NewSpace::ClearHistograms() {
2104    for (int i = 0; i <= LAST_TYPE; i++) {
2105      allocated_histogram_[i].clear();
2106      promoted_histogram_[i].clear();
2107    }
2108  }
2109  
2110  
2111  // Because the copying collector does not touch garbage objects, we iterate
2112  // the new space before a collection to get a histogram of allocated objects.
2113  // This only happens when --log-gc flag is set.
2114  void NewSpace::CollectStatistics() {
2115    ClearHistograms();
2116    SemiSpaceIterator it(this);
2117    for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
2118      RecordAllocation(obj);
2119  }
2120  
2121  
2122  static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
2123                                 const char* description) {
2124    LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
2125    // Lump all the string types together.
2126    int string_number = 0;
2127    int string_bytes = 0;
2128  #define INCREMENT(type, size, name, camel_name) \
2129    string_number += info[type].number();         \
2130    string_bytes += info[type].bytes();
2131    STRING_TYPE_LIST(INCREMENT)
2132  #undef INCREMENT
2133    if (string_number > 0) {
2134      LOG(isolate,
2135          HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
2136    }
2137  
2138    // Then do the other types.
2139    for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
2140      if (info[i].number() > 0) {
2141        LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
2142                                         info[i].bytes()));
2143      }
2144    }
2145    LOG(isolate, HeapSampleEndEvent("NewSpace", description));
2146  }
2147  
2148  
2149  void NewSpace::ReportStatistics() {
2150  #ifdef DEBUG
2151    if (FLAG_heap_stats) {
2152      float pct = static_cast<float>(Available()) / TotalCapacity();
2153      PrintF("  capacity: %" V8PRIdPTR ", available: %" V8PRIdPTR ", %%%d\n",
2154             TotalCapacity(), Available(), static_cast<int>(pct * 100));
2155      PrintF("\n  Object Histogram:\n");
2156      for (int i = 0; i <= LAST_TYPE; i++) {
2157        if (allocated_histogram_[i].number() > 0) {
2158          PrintF("    %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
2159                 allocated_histogram_[i].number(),
2160                 allocated_histogram_[i].bytes());
2161        }
2162      }
2163      PrintF("\n");
2164    }
2165  #endif  // DEBUG
2166  
2167    if (FLAG_log_gc) {
2168      Isolate* isolate = heap()->isolate();
2169      DoReportStatistics(isolate, allocated_histogram_, "allocated");
2170      DoReportStatistics(isolate, promoted_histogram_, "promoted");
2171    }
2172  }
2173  
2174  
2175  void NewSpace::RecordAllocation(HeapObject* obj) {
2176    InstanceType type = obj->map()->instance_type();
2177    DCHECK(0 <= type && type <= LAST_TYPE);
2178    allocated_histogram_[type].increment_number(1);
2179    allocated_histogram_[type].increment_bytes(obj->Size());
2180  }
2181  
2182  
2183  void NewSpace::RecordPromotion(HeapObject* obj) {
2184    InstanceType type = obj->map()->instance_type();
2185    DCHECK(0 <= type && type <= LAST_TYPE);
2186    promoted_histogram_[type].increment_number(1);
2187    promoted_histogram_[type].increment_bytes(obj->Size());
2188  }
2189  
2190  
2191  size_t NewSpace::CommittedPhysicalMemory() {
2192    if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2193    MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2194    size_t size = to_space_.CommittedPhysicalMemory();
2195    if (from_space_.is_committed()) {
2196      size += from_space_.CommittedPhysicalMemory();
2197    }
2198    return size;
2199  }
2200  
2201  
2202  // -----------------------------------------------------------------------------
2203  // Free lists for old object spaces implementation
2204  
2205  
2206  void FreeListCategory::Reset() {
2207    set_top(nullptr);
2208    set_prev(nullptr);
2209    set_next(nullptr);
2210    available_ = 0;
2211  }
2212  
2213  FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
2214    DCHECK(page()->CanAllocate());
2215  
2216    FreeSpace* node = top();
2217    if (node == nullptr) return nullptr;
2218    set_top(node->next());
2219    *node_size = node->Size();
2220    available_ -= *node_size;
2221    return node;
2222  }
2223  
2224  FreeSpace* FreeListCategory::TryPickNodeFromList(int minimum_size,
2225                                                   int* node_size) {
2226    DCHECK(page()->CanAllocate());
2227  
2228    FreeSpace* node = PickNodeFromList(node_size);
2229    if ((node != nullptr) && (*node_size < minimum_size)) {
2230      Free(node, *node_size, kLinkCategory);
2231      *node_size = 0;
2232      return nullptr;
2233    }
2234    return node;
2235  }
2236  
2237  FreeSpace* FreeListCategory::SearchForNodeInList(int minimum_size,
2238                                                   int* node_size) {
2239    DCHECK(page()->CanAllocate());
2240  
2241    FreeSpace* prev_non_evac_node = nullptr;
2242    for (FreeSpace* cur_node = top(); cur_node != nullptr;
2243         cur_node = cur_node->next()) {
2244      int size = cur_node->size();
2245      if (size >= minimum_size) {
2246        available_ -= size;
2247        if (cur_node == top()) {
2248          set_top(cur_node->next());
2249        }
2250        if (prev_non_evac_node != nullptr) {
2251          prev_non_evac_node->set_next(cur_node->next());
2252        }
2253        *node_size = size;
2254        return cur_node;
2255      }
2256  
2257      prev_non_evac_node = cur_node;
2258    }
2259    return nullptr;
2260  }
2261  
2262  bool FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes,
2263                              FreeMode mode) {
2264    if (!page()->CanAllocate()) return false;
2265  
2266    free_space->set_next(top());
2267    set_top(free_space);
2268    available_ += size_in_bytes;
2269    if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
2270      owner()->AddCategory(this);
2271    }
2272    return true;
2273  }
2274  
2275  
2276  void FreeListCategory::RepairFreeList(Heap* heap) {
2277    FreeSpace* n = top();
2278    while (n != NULL) {
2279      Map** map_location = reinterpret_cast<Map**>(n->address());
2280      if (*map_location == NULL) {
2281        *map_location = heap->free_space_map();
2282      } else {
2283        DCHECK(*map_location == heap->free_space_map());
2284      }
2285      n = n->next();
2286    }
2287  }
2288  
2289  void FreeListCategory::Relink() {
2290    DCHECK(!is_linked());
2291    owner()->AddCategory(this);
2292  }
2293  
2294  void FreeListCategory::Invalidate() {
2295    page()->add_available_in_free_list(-available());
2296    Reset();
2297    type_ = kInvalidCategory;
2298  }
2299  
2300  FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) {
2301    for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2302      categories_[i] = nullptr;
2303    }
2304    Reset();
2305  }
2306  
2307  
2308  void FreeList::Reset() {
2309    ForAllFreeListCategories(
2310        [](FreeListCategory* category) { category->Reset(); });
2311    for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2312      categories_[i] = nullptr;
2313    }
2314    ResetStats();
2315  }
2316  
2317  int FreeList::Free(Address start, int size_in_bytes, FreeMode mode) {
2318    if (size_in_bytes == 0) return 0;
2319  
2320    owner()->heap()->CreateFillerObjectAt(start, size_in_bytes,
2321                                          ClearRecordedSlots::kNo);
2322  
2323    Page* page = Page::FromAddress(start);
2324  
2325    // Blocks have to be a minimum size to hold free list items.
2326    if (size_in_bytes < kMinBlockSize) {
2327      page->add_wasted_memory(size_in_bytes);
2328      wasted_bytes_.Increment(size_in_bytes);
2329      return size_in_bytes;
2330    }
2331  
2332    FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
2333    // Insert other blocks at the head of a free list of the appropriate
2334    // magnitude.
2335    FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
2336    if (page->free_list_category(type)->Free(free_space, size_in_bytes, mode)) {
2337      page->add_available_in_free_list(size_in_bytes);
2338    }
2339    return 0;
2340  }
2341  
2342  FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, int* node_size) {
2343    FreeListCategoryIterator it(this, type);
2344    FreeSpace* node = nullptr;
2345    while (it.HasNext()) {
2346      FreeListCategory* current = it.Next();
2347      node = current->PickNodeFromList(node_size);
2348      if (node != nullptr) {
2349        Page::FromAddress(node->address())
2350            ->add_available_in_free_list(-(*node_size));
2351        DCHECK(IsVeryLong() || Available() == SumFreeLists());
2352        return node;
2353      }
2354      RemoveCategory(current);
2355    }
2356    return node;
2357  }
2358  
2359  FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, int* node_size,
2360                                     int minimum_size) {
2361    if (categories_[type] == nullptr) return nullptr;
2362    FreeSpace* node =
2363        categories_[type]->TryPickNodeFromList(minimum_size, node_size);
2364    if (node != nullptr) {
2365      Page::FromAddress(node->address())
2366          ->add_available_in_free_list(-(*node_size));
2367      DCHECK(IsVeryLong() || Available() == SumFreeLists());
2368    }
2369    return node;
2370  }
2371  
2372  FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
2373                                           int* node_size, int minimum_size) {
2374    FreeListCategoryIterator it(this, type);
2375    FreeSpace* node = nullptr;
2376    while (it.HasNext()) {
2377      FreeListCategory* current = it.Next();
2378      node = current->SearchForNodeInList(minimum_size, node_size);
2379      if (node != nullptr) {
2380        Page::FromAddress(node->address())
2381            ->add_available_in_free_list(-(*node_size));
2382        DCHECK(IsVeryLong() || Available() == SumFreeLists());
2383        return node;
2384      }
2385    }
2386    return node;
2387  }
2388  
2389  FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2390    FreeSpace* node = nullptr;
2391  
2392    // First try the allocation fast path: try to allocate the minimum element
2393    // size of a free list category. This operation is constant time.
2394    FreeListCategoryType type =
2395        SelectFastAllocationFreeListCategoryType(size_in_bytes);
2396    for (int i = type; i < kHuge; i++) {
2397      node = FindNodeIn(static_cast<FreeListCategoryType>(i), node_size);
2398      if (node != nullptr) return node;
2399    }
2400  
2401    // Next search the huge list for free list nodes. This takes linear time in
2402    // the number of huge elements.
2403    node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
2404    if (node != nullptr) {
2405      DCHECK(IsVeryLong() || Available() == SumFreeLists());
2406      return node;
2407    }
2408  
2409    // We need a huge block of memory, but we didn't find anything in the huge
2410    // list.
2411    if (type == kHuge) return nullptr;
2412  
2413    // Now search the best fitting free list for a node that has at least the
2414    // requested size.
2415    type = SelectFreeListCategoryType(size_in_bytes);
2416    node = TryFindNodeIn(type, node_size, size_in_bytes);
2417  
2418    DCHECK(IsVeryLong() || Available() == SumFreeLists());
2419    return node;
2420  }
2421  
2422  // Allocation on the old space free list.  If it succeeds then a new linear
2423  // allocation space has been set up with the top and limit of the space.  If
2424  // the allocation fails then NULL is returned, and the caller can perform a GC
2425  // or allocate a new page before retrying.
2426  HeapObject* FreeList::Allocate(int size_in_bytes) {
2427    DCHECK(0 < size_in_bytes);
2428    DCHECK(size_in_bytes <= kMaxBlockSize);
2429    DCHECK(IsAligned(size_in_bytes, kPointerSize));
2430    // Don't free list allocate if there is linear space available.
2431    DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
2432  
2433    int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2434    // Mark the old linear allocation area with a free space map so it can be
2435    // skipped when scanning the heap.  This also puts it back in the free list
2436    // if it is big enough.
2437    owner_->Free(owner_->top(), old_linear_size);
2438    owner_->SetTopAndLimit(nullptr, nullptr);
2439  
2440    owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
2441                                                        old_linear_size);
2442  
2443    int new_node_size = 0;
2444    FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2445    if (new_node == nullptr) return nullptr;
2446  
2447    int bytes_left = new_node_size - size_in_bytes;
2448    DCHECK(bytes_left >= 0);
2449  
2450  #ifdef DEBUG
2451    for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2452      reinterpret_cast<Object**>(new_node->address())[i] =
2453          Smi::FromInt(kCodeZapValue);
2454    }
2455  #endif
2456  
2457    // The old-space-step might have finished sweeping and restarted marking.
2458    // Verify that it did not turn the page of the new node into an evacuation
2459    // candidate.
2460    DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2461  
2462    const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2463  
2464    // Memory in the linear allocation area is counted as allocated.  We may free
2465    // a little of this again immediately - see below.
2466    owner_->Allocate(new_node_size);
2467  
2468    if (owner_->heap()->inline_allocation_disabled()) {
2469      // Keep the linear allocation area empty if requested to do so, just
2470      // return area back to the free list instead.
2471      owner_->Free(new_node->address() + size_in_bytes, bytes_left);
2472      owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2473                             new_node->address() + size_in_bytes);
2474    } else if (bytes_left > kThreshold &&
2475               owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2476               FLAG_incremental_marking) {
2477      int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2478      // We don't want to give too large linear areas to the allocator while
2479      // incremental marking is going on, because we won't check again whether
2480      // we want to do another increment until the linear area is used up.
2481      owner_->Free(new_node->address() + size_in_bytes + linear_size,
2482                   new_node_size - size_in_bytes - linear_size);
2483      owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2484                             new_node->address() + size_in_bytes + linear_size);
2485    } else {
2486      DCHECK(bytes_left >= 0);
2487      // Normally we give the rest of the node to the allocator as its new
2488      // linear allocation area.
2489      owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2490                             new_node->address() + new_node_size);
2491    }
2492  
2493    owner_->AllocationStep(new_node->address(), size_in_bytes);
2494  
2495    return new_node;
2496  }
2497  
2498  intptr_t FreeList::EvictFreeListItems(Page* page) {
2499    intptr_t sum = 0;
2500    page->ForAllFreeListCategories(
2501        [this, &sum, page](FreeListCategory* category) {
2502          DCHECK_EQ(this, category->owner());
2503          sum += category->available();
2504          RemoveCategory(category);
2505          category->Invalidate();
2506        });
2507    return sum;
2508  }
2509  
2510  bool FreeList::ContainsPageFreeListItems(Page* page) {
2511    bool contained = false;
2512    page->ForAllFreeListCategories(
2513        [this, &contained](FreeListCategory* category) {
2514          if (category->owner() == this && category->is_linked()) {
2515            contained = true;
2516          }
2517        });
2518    return contained;
2519  }
2520  
2521  void FreeList::RepairLists(Heap* heap) {
2522    ForAllFreeListCategories(
2523        [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
2524  }
2525  
2526  bool FreeList::AddCategory(FreeListCategory* category) {
2527    FreeListCategoryType type = category->type_;
2528    FreeListCategory* top = categories_[type];
2529  
2530    if (category->is_empty()) return false;
2531    if (top == category) return false;
2532  
2533    // Common double-linked list insertion.
2534    if (top != nullptr) {
2535      top->set_prev(category);
2536    }
2537    category->set_next(top);
2538    categories_[type] = category;
2539    return true;
2540  }
2541  
2542  void FreeList::RemoveCategory(FreeListCategory* category) {
2543    FreeListCategoryType type = category->type_;
2544    FreeListCategory* top = categories_[type];
2545  
2546    // Common double-linked list removal.
2547    if (top == category) {
2548      categories_[type] = category->next();
2549    }
2550    if (category->prev() != nullptr) {
2551      category->prev()->set_next(category->next());
2552    }
2553    if (category->next() != nullptr) {
2554      category->next()->set_prev(category->prev());
2555    }
2556    category->set_next(nullptr);
2557    category->set_prev(nullptr);
2558  }
2559  
2560  void FreeList::PrintCategories(FreeListCategoryType type) {
2561    FreeListCategoryIterator it(this, type);
2562    PrintF("FreeList[%p, top=%p, %d] ", static_cast<void*>(this),
2563           static_cast<void*>(categories_[type]), type);
2564    while (it.HasNext()) {
2565      FreeListCategory* current = it.Next();
2566      PrintF("%p -> ", static_cast<void*>(current));
2567    }
2568    PrintF("null\n");
2569  }
2570  
2571  
2572  #ifdef DEBUG
2573  intptr_t FreeListCategory::SumFreeList() {
2574    intptr_t sum = 0;
2575    FreeSpace* cur = top();
2576    while (cur != NULL) {
2577      DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
2578      sum += cur->nobarrier_size();
2579      cur = cur->next();
2580    }
2581    return sum;
2582  }
2583  
2584  int FreeListCategory::FreeListLength() {
2585    int length = 0;
2586    FreeSpace* cur = top();
2587    while (cur != NULL) {
2588      length++;
2589      cur = cur->next();
2590      if (length == kVeryLongFreeList) return length;
2591    }
2592    return length;
2593  }
2594  
2595  bool FreeList::IsVeryLong() {
2596    int len = 0;
2597    for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2598      FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
2599      while (it.HasNext()) {
2600        len += it.Next()->FreeListLength();
2601        if (len >= FreeListCategory::kVeryLongFreeList) return true;
2602      }
2603    }
2604    return false;
2605  }
2606  
2607  
2608  // This can take a very long time because it is linear in the number of entries
2609  // on the free list, so it should not be called if FreeListLength returns
2610  // kVeryLongFreeList.
2611  intptr_t FreeList::SumFreeLists() {
2612    intptr_t sum = 0;
2613    ForAllFreeListCategories(
2614        [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
2615    return sum;
2616  }
2617  #endif
2618  
2619  
2620  // -----------------------------------------------------------------------------
2621  // OldSpace implementation
2622  
2623  void PagedSpace::PrepareForMarkCompact() {
2624    // We don't have a linear allocation area while sweeping.  It will be restored
2625    // on the first allocation after the sweep.
2626    EmptyAllocationInfo();
2627  
2628    // Clear the free list before a full GC---it will be rebuilt afterward.
2629    free_list_.Reset();
2630  }
2631  
2632  
2633  intptr_t PagedSpace::SizeOfObjects() {
2634    const intptr_t size = Size() - (limit() - top());
2635    CHECK_GE(limit(), top());
2636    CHECK_GE(size, 0);
2637    USE(size);
2638    return size;
2639  }
2640  
2641  
2642  // After we have booted, we have created a map which represents free space
2643  // on the heap.  If there was already a free list then the elements on it
2644  // were created with the wrong FreeSpaceMap (normally NULL), so we need to
2645  // fix them.
2646  void PagedSpace::RepairFreeListsAfterDeserialization() {
2647    free_list_.RepairLists(heap());
2648    // Each page may have a small free space that is not tracked by a free list.
2649    // Update the maps for those free space objects.
2650    for (Page* page : *this) {
2651      int size = static_cast<int>(page->wasted_memory());
2652      if (size == 0) continue;
2653      Address address = page->OffsetToAddress(Page::kPageSize - size);
2654      heap()->CreateFillerObjectAt(address, size, ClearRecordedSlots::kNo);
2655    }
2656  }
2657  
2658  
2659  void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
2660    if (allocation_info_.top() >= allocation_info_.limit()) return;
2661  
2662    if (!Page::FromAllocationAreaAddress(allocation_info_.top())->CanAllocate()) {
2663      // Create filler object to keep page iterable if it was iterable.
2664      int remaining =
2665          static_cast<int>(allocation_info_.limit() - allocation_info_.top());
2666      heap()->CreateFillerObjectAt(allocation_info_.top(), remaining,
2667                                   ClearRecordedSlots::kNo);
2668      allocation_info_.Reset(nullptr, nullptr);
2669    }
2670  }
2671  
2672  
2673  HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
2674    MarkCompactCollector* collector = heap()->mark_compact_collector();
2675    if (collector->sweeping_in_progress()) {
2676      // Wait for the sweeper threads here and complete the sweeping phase.
2677      collector->EnsureSweepingCompleted();
2678  
2679      // After waiting for the sweeper threads, there may be new free-list
2680      // entries.
2681      return free_list_.Allocate(size_in_bytes);
2682    }
2683    return nullptr;
2684  }
2685  
2686  
2687  HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
2688    MarkCompactCollector* collector = heap()->mark_compact_collector();
2689    if (collector->sweeping_in_progress()) {
2690      collector->SweepAndRefill(this);
2691      return free_list_.Allocate(size_in_bytes);
2692    }
2693    return nullptr;
2694  }
2695  
2696  
2697  HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2698    const int kMaxPagesToSweep = 1;
2699  
2700    // Allocation in this space has failed.
2701  
2702    MarkCompactCollector* collector = heap()->mark_compact_collector();
2703    // Sweeping is still in progress.
2704    if (collector->sweeping_in_progress()) {
2705      // First try to refill the free-list, concurrent sweeper threads
2706      // may have freed some objects in the meantime.
2707      RefillFreeList();
2708  
2709      // Retry the free list allocation.
2710      HeapObject* object = free_list_.Allocate(size_in_bytes);
2711      if (object != NULL) return object;
2712  
2713      // If sweeping is still in progress try to sweep pages on the main thread.
2714      int max_freed = collector->sweeper().ParallelSweepSpace(
2715          identity(), size_in_bytes, kMaxPagesToSweep);
2716      RefillFreeList();
2717      if (max_freed >= size_in_bytes) {
2718        object = free_list_.Allocate(size_in_bytes);
2719        if (object != nullptr) return object;
2720      }
2721    }
2722  
2723    // Free list allocation failed and there is no next page.  Fail if we have
2724    // hit the old generation size limit that should cause a garbage
2725    // collection.
2726    if (!heap()->always_allocate() &&
2727        heap()->OldGenerationAllocationLimitReached()) {
2728      // If sweeper threads are active, wait for them at that point and steal
2729      // elements form their free-lists.
2730      HeapObject* object = SweepAndRetryAllocation(size_in_bytes);
2731      return object;
2732    }
2733  
2734    // Try to expand the space and allocate in the new next page.
2735    if (Expand()) {
2736      DCHECK((CountTotalPages() > 1) ||
2737             (size_in_bytes <= free_list_.Available()));
2738      return free_list_.Allocate(size_in_bytes);
2739    }
2740  
2741    // If sweeper threads are active, wait for them at that point and steal
2742    // elements form their free-lists. Allocation may still fail their which
2743    // would indicate that there is not enough memory for the given allocation.
2744    return SweepAndRetryAllocation(size_in_bytes);
2745  }
2746  
2747  #ifdef DEBUG
2748  void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
2749    CommentStatistic* comments_statistics =
2750        isolate->paged_space_comments_statistics();
2751    ReportCodeKindStatistics(isolate->code_kind_statistics());
2752    PrintF("Code size including metadata    : %10d bytes\n",
2753           isolate->code_and_metadata_size());
2754    PrintF("Bytecode size including metadata: %10d bytes\n",
2755           isolate->bytecode_and_metadata_size());
2756    PrintF(
2757        "Code comment statistics (\"   [ comment-txt   :    size/   "
2758        "count  (average)\"):\n");
2759    for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2760      const CommentStatistic& cs = comments_statistics[i];
2761      if (cs.size > 0) {
2762        PrintF("   %-30s: %10d/%6d     (%d)\n", cs.comment, cs.size, cs.count,
2763               cs.size / cs.count);
2764      }
2765    }
2766    PrintF("\n");
2767  }
2768  
2769  void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
2770    CommentStatistic* comments_statistics =
2771        isolate->paged_space_comments_statistics();
2772    ClearCodeKindStatistics(isolate->code_kind_statistics());
2773    for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2774      comments_statistics[i].Clear();
2775    }
2776    comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2777    comments_statistics[CommentStatistic::kMaxComments].size = 0;
2778    comments_statistics[CommentStatistic::kMaxComments].count = 0;
2779  }
2780  
2781  
2782  // Adds comment to 'comment_statistics' table. Performance OK as long as
2783  // 'kMaxComments' is small
2784  static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2785    CommentStatistic* comments_statistics =
2786        isolate->paged_space_comments_statistics();
2787    // Do not count empty comments
2788    if (delta <= 0) return;
2789    CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2790    // Search for a free or matching entry in 'comments_statistics': 'cs'
2791    // points to result.
2792    for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2793      if (comments_statistics[i].comment == NULL) {
2794        cs = &comments_statistics[i];
2795        cs->comment = comment;
2796        break;
2797      } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2798        cs = &comments_statistics[i];
2799        break;
2800      }
2801    }
2802    // Update entry for 'comment'
2803    cs->size += delta;
2804    cs->count += 1;
2805  }
2806  
2807  
2808  // Call for each nested comment start (start marked with '[ xxx', end marked
2809  // with ']'.  RelocIterator 'it' must point to a comment reloc info.
2810  static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2811    DCHECK(!it->done());
2812    DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT);
2813    const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2814    if (tmp[0] != '[') {
2815      // Not a nested comment; skip
2816      return;
2817    }
2818  
2819    // Search for end of nested comment or a new nested comment
2820    const char* const comment_txt =
2821        reinterpret_cast<const char*>(it->rinfo()->data());
2822    const byte* prev_pc = it->rinfo()->pc();
2823    int flat_delta = 0;
2824    it->next();
2825    while (true) {
2826      // All nested comments must be terminated properly, and therefore exit
2827      // from loop.
2828      DCHECK(!it->done());
2829      if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2830        const char* const txt =
2831            reinterpret_cast<const char*>(it->rinfo()->data());
2832        flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2833        if (txt[0] == ']') break;  // End of nested  comment
2834        // A new comment
2835        CollectCommentStatistics(isolate, it);
2836        // Skip code that was covered with previous comment
2837        prev_pc = it->rinfo()->pc();
2838      }
2839      it->next();
2840    }
2841    EnterComment(isolate, comment_txt, flat_delta);
2842  }
2843  
2844  // Collects code comment statistics
2845  static void CollectCodeCommentStatistics(HeapObject* obj, Isolate* isolate) {
2846    if (!obj->IsCode()) {
2847      return;
2848    }
2849    Code* code = Code::cast(obj);
2850    RelocIterator it(code);
2851    int delta = 0;
2852    const byte* prev_pc = code->instruction_start();
2853    while (!it.done()) {
2854      if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2855        delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2856        CollectCommentStatistics(isolate, &it);
2857        prev_pc = it.rinfo()->pc();
2858      }
2859      it.next();
2860    }
2861  
2862    DCHECK(code->instruction_start() <= prev_pc &&
2863           prev_pc <= code->instruction_end());
2864    delta += static_cast<int>(code->instruction_end() - prev_pc);
2865    EnterComment(isolate, "NoComment", delta);
2866  }
2867  
2868  
2869  void PagedSpace::ReportStatistics() {
2870    int pct = static_cast<int>(Available() * 100 / Capacity());
2871    PrintF("  capacity: %" V8PRIdPTR ", waste: %" V8PRIdPTR
2872           ", available: %" V8PRIdPTR ", %%%d\n",
2873           Capacity(), Waste(), Available(), pct);
2874  
2875    if (heap()->mark_compact_collector()->sweeping_in_progress()) {
2876      heap()->mark_compact_collector()->EnsureSweepingCompleted();
2877    }
2878    ClearHistograms(heap()->isolate());
2879    HeapObjectIterator obj_it(this);
2880    for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2881      CollectHistogramInfo(obj);
2882    ReportHistogram(heap()->isolate(), true);
2883  }
2884  #endif
2885  
2886  static void RecordCodeSizeIncludingMetadata(AbstractCode* abstract_code,
2887                                              Isolate* isolate) {
2888    int size = abstract_code->SizeIncludingMetadata();
2889    if (abstract_code->IsCode()) {
2890      size += isolate->code_and_metadata_size();
2891      isolate->set_code_and_metadata_size(size);
2892    } else {
2893      size += isolate->bytecode_and_metadata_size();
2894      isolate->set_bytecode_and_metadata_size(size);
2895    }
2896  }
2897  
2898  // Collects code size statistics:
2899  // - code and metadata size
2900  // - by code kind (only in debug mode)
2901  // - by code comment (only in debug mode)
2902  void PagedSpace::CollectCodeStatistics() {
2903    Isolate* isolate = heap()->isolate();
2904    HeapObjectIterator obj_it(this);
2905    for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2906      if (obj->IsAbstractCode()) {
2907        AbstractCode* code = AbstractCode::cast(obj);
2908        RecordCodeSizeIncludingMetadata(code, isolate);
2909  #ifdef DEBUG
2910        isolate->code_kind_statistics()[code->kind()] += code->Size();
2911        CollectCodeCommentStatistics(obj, isolate);
2912  #endif
2913      }
2914    }
2915  }
2916  
2917  void PagedSpace::ResetCodeAndMetadataStatistics(Isolate* isolate) {
2918    isolate->set_code_and_metadata_size(0);
2919    isolate->set_bytecode_and_metadata_size(0);
2920  #ifdef DEBUG
2921    ResetCodeStatistics(isolate);
2922  #endif
2923  }
2924  
2925  // -----------------------------------------------------------------------------
2926  // MapSpace implementation
2927  
2928  #ifdef VERIFY_HEAP
2929  void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
2930  #endif
2931  
2932  
2933  // -----------------------------------------------------------------------------
2934  // LargeObjectIterator
2935  
2936  LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2937    current_ = space->first_page_;
2938  }
2939  
2940  
2941  HeapObject* LargeObjectIterator::Next() {
2942    if (current_ == NULL) return NULL;
2943  
2944    HeapObject* object = current_->GetObject();
2945    current_ = current_->next_page();
2946    return object;
2947  }
2948  
2949  
2950  // -----------------------------------------------------------------------------
2951  // LargeObjectSpace
2952  
2953  LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
2954      : Space(heap, id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
2955        first_page_(NULL),
2956        size_(0),
2957        page_count_(0),
2958        objects_size_(0),
2959        chunk_map_(base::HashMap::PointersMatch, 1024) {}
2960  
2961  LargeObjectSpace::~LargeObjectSpace() {}
2962  
2963  
2964  bool LargeObjectSpace::SetUp() {
2965    first_page_ = NULL;
2966    size_ = 0;
2967    page_count_ = 0;
2968    objects_size_ = 0;
2969    chunk_map_.Clear();
2970    return true;
2971  }
2972  
2973  
2974  void LargeObjectSpace::TearDown() {
2975    while (first_page_ != NULL) {
2976      LargePage* page = first_page_;
2977      first_page_ = first_page_->next_page();
2978      LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2979      heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
2980    }
2981    SetUp();
2982  }
2983  
2984  
2985  AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
2986                                                 Executability executable) {
2987    // Check if we want to force a GC before growing the old space further.
2988    // If so, fail the allocation.
2989    if (!heap()->CanExpandOldGeneration(object_size)) {
2990      return AllocationResult::Retry(identity());
2991    }
2992  
2993    LargePage* page = heap()->memory_allocator()->AllocateLargePage(
2994        object_size, this, executable);
2995    if (page == NULL) return AllocationResult::Retry(identity());
2996    DCHECK(page->area_size() >= object_size);
2997  
2998    size_ += static_cast<int>(page->size());
2999    AccountCommitted(static_cast<intptr_t>(page->size()));
3000    objects_size_ += object_size;
3001    page_count_++;
3002    page->set_next_page(first_page_);
3003    first_page_ = page;
3004  
3005    // Register all MemoryChunk::kAlignment-aligned chunks covered by
3006    // this large page in the chunk map.
3007    uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
3008    uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
3009    for (uintptr_t key = base; key <= limit; key++) {
3010      base::HashMap::Entry* entry = chunk_map_.LookupOrInsert(
3011          reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
3012      DCHECK(entry != NULL);
3013      entry->value = page;
3014    }
3015  
3016    HeapObject* object = page->GetObject();
3017    MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
3018  
3019    if (Heap::ShouldZapGarbage()) {
3020      // Make the object consistent so the heap can be verified in OldSpaceStep.
3021      // We only need to do this in debug builds or if verify_heap is on.
3022      reinterpret_cast<Object**>(object->address())[0] =
3023          heap()->fixed_array_map();
3024      reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
3025    }
3026  
3027    heap()->incremental_marking()->OldSpaceStep(object_size);
3028    AllocationStep(object->address(), object_size);
3029    return object;
3030  }
3031  
3032  
3033  size_t LargeObjectSpace::CommittedPhysicalMemory() {
3034    // On a platform that provides lazy committing of memory, we over-account
3035    // the actually committed memory. There is no easy way right now to support
3036    // precise accounting of committed memory in large object space.
3037    return CommittedMemory();
3038  }
3039  
3040  
3041  // GC support
3042  Object* LargeObjectSpace::FindObject(Address a) {
3043    LargePage* page = FindPage(a);
3044    if (page != NULL) {
3045      return page->GetObject();
3046    }
3047    return Smi::FromInt(0);  // Signaling not found.
3048  }
3049  
3050  
3051  LargePage* LargeObjectSpace::FindPage(Address a) {
3052    uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
3053    base::HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
3054                                                static_cast<uint32_t>(key));
3055    if (e != NULL) {
3056      DCHECK(e->value != NULL);
3057      LargePage* page = reinterpret_cast<LargePage*>(e->value);
3058      DCHECK(LargePage::IsValid(page));
3059      if (page->Contains(a)) {
3060        return page;
3061      }
3062    }
3063    return NULL;
3064  }
3065  
3066  
3067  void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
3068    LargePage* current = first_page_;
3069    while (current != NULL) {
3070      HeapObject* object = current->GetObject();
3071      MarkBit mark_bit = Marking::MarkBitFrom(object);
3072      DCHECK(Marking::IsBlack(mark_bit));
3073      Marking::BlackToWhite(mark_bit);
3074      Page::FromAddress(object->address())->ResetProgressBar();
3075      Page::FromAddress(object->address())->ResetLiveBytes();
3076      current = current->next_page();
3077    }
3078  }
3079  
3080  
3081  void LargeObjectSpace::FreeUnmarkedObjects() {
3082    LargePage* previous = NULL;
3083    LargePage* current = first_page_;
3084    while (current != NULL) {
3085      HeapObject* object = current->GetObject();
3086      MarkBit mark_bit = Marking::MarkBitFrom(object);
3087      DCHECK(!Marking::IsGrey(mark_bit));
3088      if (Marking::IsBlack(mark_bit)) {
3089        previous = current;
3090        current = current->next_page();
3091      } else {
3092        LargePage* page = current;
3093        // Cut the chunk out from the chunk list.
3094        current = current->next_page();
3095        if (previous == NULL) {
3096          first_page_ = current;
3097        } else {
3098          previous->set_next_page(current);
3099        }
3100  
3101        // Free the chunk.
3102        size_ -= static_cast<int>(page->size());
3103        AccountUncommitted(static_cast<intptr_t>(page->size()));
3104        objects_size_ -= object->Size();
3105        page_count_--;
3106  
3107        // Remove entries belonging to this page.
3108        // Use variable alignment to help pass length check (<= 80 characters)
3109        // of single line in tools/presubmit.py.
3110        const intptr_t alignment = MemoryChunk::kAlignment;
3111        uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
3112        uintptr_t limit = base + (page->size() - 1) / alignment;
3113        for (uintptr_t key = base; key <= limit; key++) {
3114          chunk_map_.Remove(reinterpret_cast<void*>(key),
3115                            static_cast<uint32_t>(key));
3116        }
3117  
3118        heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
3119      }
3120    }
3121  }
3122  
3123  
3124  bool LargeObjectSpace::Contains(HeapObject* object) {
3125    Address address = object->address();
3126    MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3127  
3128    bool owned = (chunk->owner() == this);
3129  
3130    SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
3131  
3132    return owned;
3133  }
3134  
3135  
3136  #ifdef VERIFY_HEAP
3137  // We do not assume that the large object iterator works, because it depends
3138  // on the invariants we are checking during verification.
3139  void LargeObjectSpace::Verify() {
3140    for (LargePage* chunk = first_page_; chunk != NULL;
3141         chunk = chunk->next_page()) {
3142      // Each chunk contains an object that starts at the large object page's
3143      // object area start.
3144      HeapObject* object = chunk->GetObject();
3145      Page* page = Page::FromAddress(object->address());
3146      CHECK(object->address() == page->area_start());
3147  
3148      // The first word should be a map, and we expect all map pointers to be
3149      // in map space.
3150      Map* map = object->map();
3151      CHECK(map->IsMap());
3152      CHECK(heap()->map_space()->Contains(map));
3153  
3154      // We have only code, sequential strings, external strings
3155      // (sequential strings that have been morphed into external
3156      // strings), fixed arrays, byte arrays, and constant pool arrays in the
3157      // large object space.
3158      CHECK(object->IsAbstractCode() || object->IsSeqString() ||
3159            object->IsExternalString() || object->IsFixedArray() ||
3160            object->IsFixedDoubleArray() || object->IsByteArray());
3161  
3162      // The object itself should look OK.
3163      object->ObjectVerify();
3164  
3165      // Byte arrays and strings don't have interior pointers.
3166      if (object->IsAbstractCode()) {
3167        VerifyPointersVisitor code_visitor;
3168        object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
3169      } else if (object->IsFixedArray()) {
3170        FixedArray* array = FixedArray::cast(object);
3171        for (int j = 0; j < array->length(); j++) {
3172          Object* element = array->get(j);
3173          if (element->IsHeapObject()) {
3174            HeapObject* element_object = HeapObject::cast(element);
3175            CHECK(heap()->Contains(element_object));
3176            CHECK(element_object->map()->IsMap());
3177          }
3178        }
3179      }
3180    }
3181  }
3182  #endif
3183  
3184  void LargeObjectSpace::CollectCodeStatistics() {
3185    Isolate* isolate = heap()->isolate();
3186    LargeObjectIterator obj_it(this);
3187    for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
3188      if (obj->IsAbstractCode()) {
3189        AbstractCode* code = AbstractCode::cast(obj);
3190        RecordCodeSizeIncludingMetadata(code, isolate);
3191  #ifdef DEBUG
3192        isolate->code_kind_statistics()[code->kind()] += code->Size();
3193        CollectCodeCommentStatistics(obj, isolate);
3194  #endif
3195      }
3196    }
3197  }
3198  
3199  #ifdef DEBUG
3200  void LargeObjectSpace::Print() {
3201    OFStream os(stdout);
3202    LargeObjectIterator it(this);
3203    for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3204      obj->Print(os);
3205    }
3206  }
3207  
3208  
3209  void LargeObjectSpace::ReportStatistics() {
3210    PrintF("  size: %" V8PRIdPTR "\n", size_);
3211    int num_objects = 0;
3212    ClearHistograms(heap()->isolate());
3213    LargeObjectIterator it(this);
3214    for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3215      num_objects++;
3216      CollectHistogramInfo(obj);
3217    }
3218  
3219    PrintF(
3220        "  number of objects %d, "
3221        "size of objects %" V8PRIdPTR "\n",
3222        num_objects, objects_size_);
3223    if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3224  }
3225  
3226  
3227  void Page::Print() {
3228    // Make a best-effort to print the objects in the page.
3229    PrintF("Page@%p in %s\n", static_cast<void*>(this->address()),
3230           AllocationSpaceName(this->owner()->identity()));
3231    printf(" --------------------------------------\n");
3232    HeapObjectIterator objects(this);
3233    unsigned mark_size = 0;
3234    for (HeapObject* object = objects.Next(); object != NULL;
3235         object = objects.Next()) {
3236      bool is_marked = Marking::IsBlackOrGrey(Marking::MarkBitFrom(object));
3237      PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
3238      if (is_marked) {
3239        mark_size += object->Size();
3240      }
3241      object->ShortPrint();
3242      PrintF("\n");
3243    }
3244    printf(" --------------------------------------\n");
3245    printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3246  }
3247  
3248  #endif  // DEBUG
3249  }  // namespace internal
3250  }  // namespace v8
3251