1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/zone/zone.h"
6
7 #include <cstring>
8 #include <memory>
9
10 #include "src/init/v8.h"
11 #include "src/sanitizer/asan.h"
12 #include "src/utils/utils.h"
13 #include "src/zone/type-stats.h"
14
15 namespace v8 {
16 namespace internal {
17
18 namespace {
19
20 #ifdef V8_USE_ADDRESS_SANITIZER
21
22 constexpr size_t kASanRedzoneBytes = 24; // Must be a multiple of 8.
23
24 #else // !V8_USE_ADDRESS_SANITIZER
25
26 constexpr size_t kASanRedzoneBytes = 0;
27
28 #endif // V8_USE_ADDRESS_SANITIZER
29
30 } // namespace
31
Zone(AccountingAllocator * allocator,const char * name,bool support_compression)32 Zone::Zone(AccountingAllocator* allocator, const char* name,
33 bool support_compression)
34 : allocator_(allocator),
35 name_(name),
36 supports_compression_(support_compression) {
37 allocator_->TraceZoneCreation(this);
38 }
39
~Zone()40 Zone::~Zone() {
41 DeleteAll();
42
43 DCHECK_EQ(segment_bytes_allocated_, 0);
44 }
45
AsanNew(size_t size)46 void* Zone::AsanNew(size_t size) {
47 CHECK(!sealed_);
48
49 // Round up the requested size to fit the alignment.
50 size = RoundUp(size, kAlignmentInBytes);
51
52 // Check if the requested size is available without expanding.
53 Address result = position_;
54
55 const size_t size_with_redzone = size + kASanRedzoneBytes;
56 DCHECK_LE(position_, limit_);
57 if (size_with_redzone > limit_ - position_) {
58 result = NewExpand(size_with_redzone);
59 } else {
60 position_ += size_with_redzone;
61 }
62
63 Address redzone_position = result + size;
64 DCHECK_EQ(redzone_position + kASanRedzoneBytes, position_);
65 ASAN_POISON_MEMORY_REGION(reinterpret_cast<void*>(redzone_position),
66 kASanRedzoneBytes);
67
68 // Check that the result has the proper alignment and return it.
69 DCHECK(IsAligned(result, kAlignmentInBytes));
70 return reinterpret_cast<void*>(result);
71 }
72
ReleaseMemory()73 void Zone::ReleaseMemory() {
74 DeleteAll();
75 allocator_->TraceZoneCreation(this);
76 }
77
DeleteAll()78 void Zone::DeleteAll() {
79 Segment* current = segment_head_;
80 if (current) {
81 // Commit the allocation_size_ of segment_head_ and disconnect the segments
82 // list from the zone in order to ensure that tracing accounting allocator
83 // will observe value including memory from the head segment.
84 allocation_size_ = allocation_size();
85 segment_head_ = nullptr;
86 }
87 allocator_->TraceZoneDestruction(this);
88
89 // Traverse the chained list of segments and return them all to the allocator.
90 while (current) {
91 Segment* next = current->next();
92 size_t size = current->total_size();
93
94 // Un-poison the segment content so we can re-use or zap it later.
95 ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void*>(current->start()),
96 current->capacity());
97
98 segment_bytes_allocated_ -= size;
99 allocator_->ReturnSegment(current, supports_compression());
100 current = next;
101 }
102
103 position_ = limit_ = 0;
104 allocation_size_ = 0;
105 #ifdef V8_ENABLE_PRECISE_ZONE_STATS
106 allocation_size_for_tracing_ = 0;
107 #endif
108 }
109
NewExpand(size_t size)110 Address Zone::NewExpand(size_t size) {
111 // Make sure the requested size is already properly aligned and that
112 // there isn't enough room in the Zone to satisfy the request.
113 DCHECK_EQ(size, RoundDown(size, kAlignmentInBytes));
114 DCHECK_LT(limit_ - position_, size);
115
116 // Compute the new segment size. We use a 'high water mark'
117 // strategy, where we increase the segment size every time we expand
118 // except that we employ a maximum segment size when we delete. This
119 // is to avoid excessive malloc() and free() overhead.
120 Segment* head = segment_head_;
121 const size_t old_size = head ? head->total_size() : 0;
122 static const size_t kSegmentOverhead = sizeof(Segment) + kAlignmentInBytes;
123 const size_t new_size_no_overhead = size + (old_size << 1);
124 size_t new_size = kSegmentOverhead + new_size_no_overhead;
125 const size_t min_new_size = kSegmentOverhead + size;
126 // Guard against integer overflow.
127 if (new_size_no_overhead < size || new_size < kSegmentOverhead) {
128 V8::FatalProcessOutOfMemory(nullptr, "Zone");
129 return kNullAddress;
130 }
131 if (new_size < kMinimumSegmentSize) {
132 new_size = kMinimumSegmentSize;
133 } else if (new_size >= kMaximumSegmentSize) {
134 // Limit the size of new segments to avoid growing the segment size
135 // exponentially, thus putting pressure on contiguous virtual address space.
136 // All the while making sure to allocate a segment large enough to hold the
137 // requested size.
138 new_size = Max(min_new_size, kMaximumSegmentSize);
139 }
140 if (new_size > INT_MAX) {
141 V8::FatalProcessOutOfMemory(nullptr, "Zone");
142 return kNullAddress;
143 }
144 Segment* segment =
145 allocator_->AllocateSegment(new_size, supports_compression());
146 if (segment == nullptr) {
147 V8::FatalProcessOutOfMemory(nullptr, "Zone");
148 return kNullAddress;
149 }
150
151 DCHECK_GE(segment->total_size(), new_size);
152 segment_bytes_allocated_ += segment->total_size();
153 segment->set_zone(this);
154 segment->set_next(segment_head_);
155 // Commit the allocation_size_ of segment_head_ if any, in order to ensure
156 // that tracing accounting allocator will observe value including memory
157 // from the previous head segment.
158 allocation_size_ = allocation_size();
159 segment_head_ = segment;
160 allocator_->TraceAllocateSegment(segment);
161
162 // Recompute 'top' and 'limit' based on the new segment.
163 Address result = RoundUp(segment->start(), kAlignmentInBytes);
164 position_ = result + size;
165 // Check for address overflow.
166 // (Should not happen since the segment is guaranteed to accommodate
167 // size bytes + header and alignment padding)
168 DCHECK(position_ >= result);
169 limit_ = segment->end();
170 DCHECK(position_ <= limit_);
171 return result;
172 }
173
174 } // namespace internal
175 } // namespace v8
176