1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/zone/zone.h"
6
7 #include <cstring>
8 #include <memory>
9
10 #include "src/base/sanitizer/asan.h"
11 #include "src/init/v8.h"
12 #include "src/utils/utils.h"
13 #include "src/zone/type-stats.h"
14
15 namespace v8 {
16 namespace internal {
17
18 namespace {
19
20 #ifdef V8_USE_ADDRESS_SANITIZER
21
22 constexpr size_t kASanRedzoneBytes = 24; // Must be a multiple of 8.
23
24 #else // !V8_USE_ADDRESS_SANITIZER
25
26 constexpr size_t kASanRedzoneBytes = 0;
27
28 #endif // V8_USE_ADDRESS_SANITIZER
29
30 } // namespace
31
Zone(AccountingAllocator * allocator,const char * name,bool support_compression)32 Zone::Zone(AccountingAllocator* allocator, const char* name,
33 bool support_compression)
34 : allocator_(allocator),
35 name_(name),
36 supports_compression_(support_compression) {
37 allocator_->TraceZoneCreation(this);
38 }
39
~Zone()40 Zone::~Zone() {
41 DeleteAll();
42 DCHECK_EQ(segment_bytes_allocated_.load(), 0);
43 }
44
AsanNew(size_t size)45 void* Zone::AsanNew(size_t size) {
46 CHECK(!sealed_);
47
48 // Round up the requested size to fit the alignment.
49 size = RoundUp(size, kAlignmentInBytes);
50
51 // Check if the requested size is available without expanding.
52 Address result = position_;
53
54 const size_t size_with_redzone = size + kASanRedzoneBytes;
55 DCHECK_LE(position_, limit_);
56 if (size_with_redzone > limit_ - position_) {
57 result = NewExpand(size_with_redzone);
58 } else {
59 position_ += size_with_redzone;
60 }
61
62 Address redzone_position = result + size;
63 DCHECK_EQ(redzone_position + kASanRedzoneBytes, position_);
64 ASAN_POISON_MEMORY_REGION(reinterpret_cast<void*>(redzone_position),
65 kASanRedzoneBytes);
66
67 // Check that the result has the proper alignment and return it.
68 DCHECK(IsAligned(result, kAlignmentInBytes));
69 return reinterpret_cast<void*>(result);
70 }
71
Reset()72 void Zone::Reset() {
73 if (!segment_head_) return;
74 Segment* keep = segment_head_;
75 segment_head_ = segment_head_->next();
76 if (segment_head_ != nullptr) {
77 // Reset the position to the end of the new head, and uncommit its
78 // allocation size (which will be re-committed in DeleteAll).
79 position_ = segment_head_->end();
80 allocation_size_ -= segment_head_->end() - segment_head_->start();
81 }
82 keep->set_next(nullptr);
83 DeleteAll();
84 allocator_->TraceZoneCreation(this);
85
86 // Un-poison the kept segment content so we can zap and re-use it.
87 ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void*>(keep->start()),
88 keep->capacity());
89 keep->ZapContents();
90
91 segment_head_ = keep;
92 position_ = RoundUp(keep->start(), kAlignmentInBytes);
93 limit_ = keep->end();
94 DCHECK_LT(allocation_size(), kAlignmentInBytes);
95 DCHECK_EQ(segment_bytes_allocated_, keep->total_size());
96 }
97
DeleteAll()98 void Zone::DeleteAll() {
99 Segment* current = segment_head_;
100 if (current) {
101 // Commit the allocation_size_ of segment_head_ and disconnect the segments
102 // list from the zone in order to ensure that tracing accounting allocator
103 // will observe value including memory from the head segment.
104 allocation_size_ = allocation_size();
105 segment_head_ = nullptr;
106 }
107 allocator_->TraceZoneDestruction(this);
108
109 // Traverse the chained list of segments and return them all to the allocator.
110 while (current) {
111 Segment* next = current->next();
112 segment_bytes_allocated_ -= current->total_size();
113 ReleaseSegment(current);
114 current = next;
115 }
116
117 position_ = limit_ = 0;
118 allocation_size_ = 0;
119 #ifdef V8_ENABLE_PRECISE_ZONE_STATS
120 allocation_size_for_tracing_ = 0;
121 #endif
122 }
123
ReleaseSegment(Segment * segment)124 void Zone::ReleaseSegment(Segment* segment) {
125 // Un-poison the segment content so we can re-use or zap it later.
126 ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void*>(segment->start()),
127 segment->capacity());
128 allocator_->ReturnSegment(segment, supports_compression());
129 }
130
NewExpand(size_t size)131 Address Zone::NewExpand(size_t size) {
132 // Make sure the requested size is already properly aligned and that
133 // there isn't enough room in the Zone to satisfy the request.
134 DCHECK_EQ(size, RoundDown(size, kAlignmentInBytes));
135 DCHECK_LT(limit_ - position_, size);
136
137 // Compute the new segment size. We use a 'high water mark'
138 // strategy, where we increase the segment size every time we expand
139 // except that we employ a maximum segment size when we delete. This
140 // is to avoid excessive malloc() and free() overhead.
141 Segment* head = segment_head_;
142 const size_t old_size = head ? head->total_size() : 0;
143 static const size_t kSegmentOverhead = sizeof(Segment) + kAlignmentInBytes;
144 const size_t new_size_no_overhead = size + (old_size << 1);
145 size_t new_size = kSegmentOverhead + new_size_no_overhead;
146 const size_t min_new_size = kSegmentOverhead + size;
147 // Guard against integer overflow.
148 if (new_size_no_overhead < size || new_size < kSegmentOverhead) {
149 V8::FatalProcessOutOfMemory(nullptr, "Zone");
150 }
151 if (new_size < kMinimumSegmentSize) {
152 new_size = kMinimumSegmentSize;
153 } else if (new_size >= kMaximumSegmentSize) {
154 // Limit the size of new segments to avoid growing the segment size
155 // exponentially, thus putting pressure on contiguous virtual address space.
156 // All the while making sure to allocate a segment large enough to hold the
157 // requested size.
158 new_size = std::max({min_new_size, kMaximumSegmentSize});
159 }
160 if (new_size > INT_MAX) {
161 V8::FatalProcessOutOfMemory(nullptr, "Zone");
162 }
163 Segment* segment =
164 allocator_->AllocateSegment(new_size, supports_compression());
165 if (segment == nullptr) {
166 V8::FatalProcessOutOfMemory(nullptr, "Zone");
167 }
168
169 DCHECK_GE(segment->total_size(), new_size);
170 segment_bytes_allocated_ += segment->total_size();
171 segment->set_zone(this);
172 segment->set_next(segment_head_);
173 // Commit the allocation_size_ of segment_head_ if any, in order to ensure
174 // that tracing accounting allocator will observe value including memory
175 // from the previous head segment.
176 allocation_size_ = allocation_size();
177 segment_head_ = segment;
178 allocator_->TraceAllocateSegment(segment);
179
180 // Recompute 'top' and 'limit' based on the new segment.
181 Address result = RoundUp(segment->start(), kAlignmentInBytes);
182 position_ = result + size;
183 // Check for address overflow.
184 // (Should not happen since the segment is guaranteed to accommodate
185 // size bytes + header and alignment padding)
186 DCHECK(position_ >= result);
187 limit_ = segment->end();
188 DCHECK(position_ <= limit_);
189 return result;
190 }
191
ZoneScope(Zone * zone)192 ZoneScope::ZoneScope(Zone* zone)
193 : zone_(zone),
194 #ifdef V8_ENABLE_PRECISE_ZONE_STATS
195 allocation_size_for_tracing_(zone->allocation_size_for_tracing_),
196 freed_size_for_tracing_(zone->freed_size_for_tracing_),
197 #endif
198 allocation_size_(zone->allocation_size_),
199 segment_bytes_allocated_(zone->segment_bytes_allocated_),
200 position_(zone->position_),
201 limit_(zone->limit_),
202 segment_head_(zone->segment_head_) {
203 }
204
~ZoneScope()205 ZoneScope::~ZoneScope() {
206 // Release segments up to the stored segment_head_.
207 Segment* current = zone_->segment_head_;
208 while (current != segment_head_) {
209 Segment* next = current->next();
210 zone_->ReleaseSegment(current);
211 current = next;
212 }
213
214 // Un-poison the trailing segment content so we can re-use or zap it later.
215 if (segment_head_ != nullptr) {
216 void* const start = reinterpret_cast<void*>(position_);
217 DCHECK_GE(start, reinterpret_cast<void*>(current->start()));
218 DCHECK_LE(start, reinterpret_cast<void*>(current->end()));
219 const size_t length = current->end() - reinterpret_cast<Address>(start);
220 ASAN_UNPOISON_MEMORY_REGION(start, length);
221 }
222
223 // Reset the Zone to the stored state.
224 zone_->allocation_size_ = allocation_size_;
225 zone_->segment_bytes_allocated_ = segment_bytes_allocated_;
226 zone_->position_ = position_;
227 zone_->limit_ = limit_;
228 zone_->segment_head_ = segment_head_;
229 #ifdef V8_ENABLE_PRECISE_ZONE_STATS
230 zone_->allocation_size_for_tracing_ = allocation_size_for_tracing_;
231 zone_->freed_size_for_tracing_ = freed_size_for_tracing_;
232 #endif
233 }
234
235 } // namespace internal
236 } // namespace v8
237