• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/zone.h"
6 
7 #include <cstring>
8 
9 #include "src/v8.h"
10 
11 #ifdef V8_USE_ADDRESS_SANITIZER
12 #include <sanitizer/asan_interface.h>
13 #endif  // V8_USE_ADDRESS_SANITIZER
14 
15 namespace v8 {
16 namespace internal {
17 
18 namespace {
19 
20 #if V8_USE_ADDRESS_SANITIZER
21 
22 const size_t kASanRedzoneBytes = 24;  // Must be a multiple of 8.
23 
24 #else
25 
26 #define ASAN_POISON_MEMORY_REGION(start, size) \
27   do {                                         \
28     USE(start);                                \
29     USE(size);                                 \
30   } while (false)
31 
32 #define ASAN_UNPOISON_MEMORY_REGION(start, size) \
33   do {                                           \
34     USE(start);                                  \
35     USE(size);                                   \
36   } while (false)
37 
38 const size_t kASanRedzoneBytes = 0;
39 
40 #endif  // V8_USE_ADDRESS_SANITIZER
41 
42 }  // namespace
43 
44 
45 // Segments represent chunks of memory: They have starting address
46 // (encoded in the this pointer) and a size in bytes. Segments are
47 // chained together forming a LIFO structure with the newest segment
48 // available as segment_head_. Segments are allocated using malloc()
49 // and de-allocated using free().
50 
51 class Segment {
52  public:
Initialize(Segment * next,size_t size)53   void Initialize(Segment* next, size_t size) {
54     next_ = next;
55     size_ = size;
56   }
57 
next() const58   Segment* next() const { return next_; }
clear_next()59   void clear_next() { next_ = nullptr; }
60 
size() const61   size_t size() const { return size_; }
capacity() const62   size_t capacity() const { return size_ - sizeof(Segment); }
63 
start() const64   Address start() const { return address(sizeof(Segment)); }
end() const65   Address end() const { return address(size_); }
66 
67  private:
68   // Computes the address of the nth byte in this segment.
address(size_t n) const69   Address address(size_t n) const { return Address(this) + n; }
70 
71   Segment* next_;
72   size_t size_;
73 };
74 
Zone(base::AccountingAllocator * allocator)75 Zone::Zone(base::AccountingAllocator* allocator)
76     : allocation_size_(0),
77       segment_bytes_allocated_(0),
78       position_(0),
79       limit_(0),
80       allocator_(allocator),
81       segment_head_(nullptr) {}
82 
~Zone()83 Zone::~Zone() {
84   DeleteAll();
85   DeleteKeptSegment();
86 
87   DCHECK(segment_bytes_allocated_ == 0);
88 }
89 
90 
New(size_t size)91 void* Zone::New(size_t size) {
92   // Round up the requested size to fit the alignment.
93   size = RoundUp(size, kAlignment);
94 
95   // If the allocation size is divisible by 8 then we return an 8-byte aligned
96   // address.
97   if (kPointerSize == 4 && kAlignment == 4) {
98     position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4);
99   } else {
100     DCHECK(kAlignment >= kPointerSize);
101   }
102 
103   // Check if the requested size is available without expanding.
104   Address result = position_;
105 
106   const size_t size_with_redzone = size + kASanRedzoneBytes;
107   const uintptr_t limit = reinterpret_cast<uintptr_t>(limit_);
108   const uintptr_t position = reinterpret_cast<uintptr_t>(position_);
109   // position_ > limit_ can be true after the alignment correction above.
110   if (limit < position || size_with_redzone > limit - position) {
111     result = NewExpand(size_with_redzone);
112   } else {
113     position_ += size_with_redzone;
114   }
115 
116   Address redzone_position = result + size;
117   DCHECK(redzone_position + kASanRedzoneBytes == position_);
118   ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes);
119 
120   // Check that the result has the proper alignment and return it.
121   DCHECK(IsAddressAligned(result, kAlignment, 0));
122   allocation_size_ += size;
123   return reinterpret_cast<void*>(result);
124 }
125 
126 
DeleteAll()127 void Zone::DeleteAll() {
128 #ifdef DEBUG
129   // Constant byte value used for zapping dead memory in debug mode.
130   static const unsigned char kZapDeadByte = 0xcd;
131 #endif
132 
133   // Find a segment with a suitable size to keep around.
134   Segment* keep = nullptr;
135   // Traverse the chained list of segments, zapping (in debug mode)
136   // and freeing every segment except the one we wish to keep.
137   for (Segment* current = segment_head_; current;) {
138     Segment* next = current->next();
139     if (!keep && current->size() <= kMaximumKeptSegmentSize) {
140       // Unlink the segment we wish to keep from the list.
141       keep = current;
142       keep->clear_next();
143     } else {
144       size_t size = current->size();
145 #ifdef DEBUG
146       // Un-poison first so the zapping doesn't trigger ASan complaints.
147       ASAN_UNPOISON_MEMORY_REGION(current, size);
148       // Zap the entire current segment (including the header).
149       memset(current, kZapDeadByte, size);
150 #endif
151       DeleteSegment(current, size);
152     }
153     current = next;
154   }
155 
156   // If we have found a segment we want to keep, we must recompute the
157   // variables 'position' and 'limit' to prepare for future allocate
158   // attempts. Otherwise, we must clear the position and limit to
159   // force a new segment to be allocated on demand.
160   if (keep) {
161     Address start = keep->start();
162     position_ = RoundUp(start, kAlignment);
163     limit_ = keep->end();
164     // Un-poison so we can re-use the segment later.
165     ASAN_UNPOISON_MEMORY_REGION(start, keep->capacity());
166 #ifdef DEBUG
167     // Zap the contents of the kept segment (but not the header).
168     memset(start, kZapDeadByte, keep->capacity());
169 #endif
170   } else {
171     position_ = limit_ = 0;
172   }
173 
174   allocation_size_ = 0;
175   // Update the head segment to be the kept segment (if any).
176   segment_head_ = keep;
177 }
178 
179 
DeleteKeptSegment()180 void Zone::DeleteKeptSegment() {
181 #ifdef DEBUG
182   // Constant byte value used for zapping dead memory in debug mode.
183   static const unsigned char kZapDeadByte = 0xcd;
184 #endif
185 
186   DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr);
187   if (segment_head_ != nullptr) {
188     size_t size = segment_head_->size();
189 #ifdef DEBUG
190     // Un-poison first so the zapping doesn't trigger ASan complaints.
191     ASAN_UNPOISON_MEMORY_REGION(segment_head_, size);
192     // Zap the entire kept segment (including the header).
193     memset(segment_head_, kZapDeadByte, size);
194 #endif
195     DeleteSegment(segment_head_, size);
196     segment_head_ = nullptr;
197   }
198 
199   DCHECK(segment_bytes_allocated_ == 0);
200 }
201 
202 
203 // Creates a new segment, sets it size, and pushes it to the front
204 // of the segment chain. Returns the new segment.
NewSegment(size_t size)205 Segment* Zone::NewSegment(size_t size) {
206   Segment* result = reinterpret_cast<Segment*>(allocator_->Allocate(size));
207   segment_bytes_allocated_ += size;
208   if (result != nullptr) {
209     result->Initialize(segment_head_, size);
210     segment_head_ = result;
211   }
212   return result;
213 }
214 
215 
216 // Deletes the given segment. Does not touch the segment chain.
DeleteSegment(Segment * segment,size_t size)217 void Zone::DeleteSegment(Segment* segment, size_t size) {
218   segment_bytes_allocated_ -= size;
219   allocator_->Free(segment, size);
220 }
221 
222 
NewExpand(size_t size)223 Address Zone::NewExpand(size_t size) {
224   // Make sure the requested size is already properly aligned and that
225   // there isn't enough room in the Zone to satisfy the request.
226   DCHECK_EQ(size, RoundDown(size, kAlignment));
227   DCHECK(limit_ < position_ ||
228          reinterpret_cast<uintptr_t>(limit_) -
229                  reinterpret_cast<uintptr_t>(position_) <
230              size);
231 
232   // Compute the new segment size. We use a 'high water mark'
233   // strategy, where we increase the segment size every time we expand
234   // except that we employ a maximum segment size when we delete. This
235   // is to avoid excessive malloc() and free() overhead.
236   Segment* head = segment_head_;
237   const size_t old_size = (head == nullptr) ? 0 : head->size();
238   static const size_t kSegmentOverhead = sizeof(Segment) + kAlignment;
239   const size_t new_size_no_overhead = size + (old_size << 1);
240   size_t new_size = kSegmentOverhead + new_size_no_overhead;
241   const size_t min_new_size = kSegmentOverhead + size;
242   // Guard against integer overflow.
243   if (new_size_no_overhead < size || new_size < kSegmentOverhead) {
244     V8::FatalProcessOutOfMemory("Zone");
245     return nullptr;
246   }
247   if (new_size < kMinimumSegmentSize) {
248     new_size = kMinimumSegmentSize;
249   } else if (new_size > kMaximumSegmentSize) {
250     // Limit the size of new segments to avoid growing the segment size
251     // exponentially, thus putting pressure on contiguous virtual address space.
252     // All the while making sure to allocate a segment large enough to hold the
253     // requested size.
254     new_size = Max(min_new_size, kMaximumSegmentSize);
255   }
256   if (new_size > INT_MAX) {
257     V8::FatalProcessOutOfMemory("Zone");
258     return nullptr;
259   }
260   Segment* segment = NewSegment(new_size);
261   if (segment == nullptr) {
262     V8::FatalProcessOutOfMemory("Zone");
263     return nullptr;
264   }
265 
266   // Recompute 'top' and 'limit' based on the new segment.
267   Address result = RoundUp(segment->start(), kAlignment);
268   position_ = result + size;
269   // Check for address overflow.
270   // (Should not happen since the segment is guaranteed to accomodate
271   // size bytes + header and alignment padding)
272   DCHECK(reinterpret_cast<uintptr_t>(position_) >=
273          reinterpret_cast<uintptr_t>(result));
274   limit_ = segment->end();
275   DCHECK(position_ <= limit_);
276   return result;
277 }
278 
279 }  // namespace internal
280 }  // namespace v8
281